From 01cd1f05b66c68df2330e17792105e032eafef78 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Luk=C3=A1=C5=A1=20Krup=C4=8D=C3=ADk?= <lukas.krupcik@vsb.cz> Date: Fri, 26 Oct 2018 12:06:43 +0200 Subject: [PATCH] fix links --- docs.it4i/anselm/capacity-computing.md | 18 +- docs.it4i/anselm/compute-nodes.md | 4 +- docs.it4i/anselm/hardware-overview.md | 16 +- docs.it4i/anselm/introduction.md | 6 +- docs.it4i/anselm/job-priority.md | 4 +- .../anselm/job-submission-and-execution.md | 10 +- .../anselm/remote-visualization.md.disable | 224 ------------------ .../resource-allocation-and-job-execution.md | 10 +- .../anselm/resources-allocation-policy.md | 10 +- docs.it4i/anselm/shell-and-data-access.md | 14 +- .../anselm/software/debuggers/allinea-ddt.md | 4 +- .../debuggers/allinea-performance-reports.md | 6 +- .../software/intel-suite/intel-compilers.md | 8 +- docs.it4i/anselm/software/mpi/mpi.md | 8 +- .../software/numerical-languages/matlab.md | 10 +- docs.it4i/anselm/storage.md | 2 +- docs.it4i/environment-and-modules.md | 2 +- .../graphical-user-interface.md | 4 +- .../graphical-user-interface/vnc.md | 22 +- .../x-window-system.md | 2 +- .../accessing-the-clusters/introduction.md | 12 +- .../shell-access-and-data-transfer/putty.md | 32 +-- .../vpn-connection-fail-in-win-8.1.md | 17 -- .../accessing-the-clusters/vpn-access.md | 32 +-- .../accessing-the-clusters/vpn1-access.md | 82 ------- docs.it4i/general/applying-for-resources.md | 2 +- .../certificates-faq.md | 2 +- .../obtaining-login-credentials.md | 12 +- .../resource_allocation_and_job_execution.md | 12 +- docs.it4i/index.md | 4 +- docs.it4i/salomon/7d-enhanced-hypercube.md | 4 +- docs.it4i/salomon/capacity-computing.md | 18 +- docs.it4i/salomon/compute-nodes.md | 12 +- docs.it4i/salomon/hardware-overview.md | 12 +- docs.it4i/salomon/ib-single-plane-topology.md | 12 +- docs.it4i/salomon/introduction.md | 10 +- docs.it4i/salomon/job-priority.md | 4 +- .../salomon/job-submission-and-execution.md | 8 +- docs.it4i/salomon/network.md | 6 +- .../resource-allocation-and-job-execution.md | 10 +- .../salomon/resources-allocation-policy.md | 8 +- docs.it4i/salomon/shell-and-data-access.md | 16 +- docs.it4i/salomon/software/ansys/ansys-cfx.md | 3 +- .../salomon/software/ansys/ansys-fluent.md | 10 +- .../salomon/software/ansys/ansys-ls-dyna.md | 2 +- .../software/ansys/ansys-mechanical-apdl.md | 4 +- docs.it4i/salomon/software/ansys/ansys.md | 4 +- .../salomon/software/chemistry/nwchem.md | 2 +- .../software/numerical-languages/octave.md | 4 +- .../software/numerical-libraries/.gitkeep | 0 docs.it4i/salomon/software/phys/LMGC90.md | 9 +- docs.it4i/salomon/software/phys/PragTic.md | 2 +- docs.it4i/salomon/storage.md | 8 +- docs.it4i/salomon/visualization.md | 34 +-- docs.it4i/software/bio/bioinformatics.md | 2 +- .../omics-master/diagnostic-component-team.md | 2 +- .../software/bio/omics-master/overview.md | 23 +- .../priorization-component-bierapp.md | 2 +- .../cae/comsol/comsol-multiphysics.md | 6 +- docs.it4i/software/chemistry/molpro.md | 2 +- docs.it4i/software/chemistry/nwchem.md | 2 +- docs.it4i/software/chemistry/phono3py.md | 6 +- docs.it4i/software/compilers.md | 6 +- docs.it4i/software/debuggers/Introduction.md | 10 +- docs.it4i/software/debuggers/aislinn.md | 4 +- docs.it4i/software/debuggers/allinea-ddt.md | 4 +- .../debuggers/allinea-performance-reports.md | 6 +- docs.it4i/software/debuggers/cube.md | 6 +- .../intel-performance-counter-monitor.md | 10 +- .../debuggers/intel-vtune-amplifier.md | 2 +- docs.it4i/software/debuggers/papi.md | 4 +- docs.it4i/software/debuggers/scalasca.md | 16 +- docs.it4i/software/debuggers/score-p.md | 8 +- docs.it4i/software/debuggers/total-view.md | 4 +- docs.it4i/software/debuggers/valgrind.md | 2 +- docs.it4i/software/debuggers/vampir.md | 6 +- .../intel/intel-suite/intel-compilers.md | 4 +- .../intel/intel-suite/intel-debugger.md | 6 +- .../software/intel/intel-suite/intel-mkl.md | 4 +- .../intel-parallel-studio-introduction.md | 10 +- .../software/intel/intel-suite/intel-tbb.md | 4 +- .../intel-trace-analyzer-and-collector.md | 4 +- docs.it4i/software/isv_licenses.md | 4 +- docs.it4i/software/lang/java.md | 2 +- .../software/machine-learning/introduction.md | 6 +- docs.it4i/software/mic/mic_environment.md | 6 +- docs.it4i/software/mpi/mpi.md | 4 +- .../software/mpi/mpi4py-mpi-for-python.md | 2 +- docs.it4i/software/mpi/ompi-examples.md | 36 +-- docs.it4i/software/mpi/running-mpich2.md | 2 +- .../numerical-languages/introduction.md | 6 +- .../software/numerical-languages/matlab.md | 14 +- .../numerical-languages/matlab_1314.md | 8 +- .../software/numerical-languages/octave.md | 8 +- .../numerical-languages/opencoarrays.md | 4 +- docs.it4i/software/numerical-languages/r.md | 18 +- .../software/numerical-libraries/fftw.md | 2 +- .../software/numerical-libraries/hdf5.md | 4 +- .../intel-numerical-libraries.md | 6 +- .../magma-for-intel-xeon-phi.md | 2 +- docs.it4i/software/tools/ansys/ansys-cfx.md | 3 +- .../software/tools/ansys/ansys-fluent.md | 10 +- .../software/tools/ansys/ansys-ls-dyna.md | 2 +- .../tools/ansys/ansys-mechanical-apdl.md | 3 +- docs.it4i/software/tools/ansys/ansys.md | 2 +- docs.it4i/software/tools/ansys/licensing.md | 6 +- docs.it4i/software/tools/ansys/ls-dyna.md | 2 +- .../ansys/setting-license-preferences.md | 8 +- docs.it4i/software/tools/ansys/workbench.md | 2 +- docs.it4i/software/tools/singularity-it4i.md | 2 +- docs.it4i/software/tools/virtualization.md | 8 +- docs.it4i/software/viz/openfoam.md | 6 +- docs.it4i/software/viz/paraview.md | 6 +- 113 files changed, 410 insertions(+), 742 deletions(-) delete mode 100644 docs.it4i/anselm/remote-visualization.md.disable delete mode 100644 docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md delete mode 100644 docs.it4i/general/accessing-the-clusters/vpn1-access.md delete mode 100644 docs.it4i/salomon/software/numerical-libraries/.gitkeep diff --git a/docs.it4i/anselm/capacity-computing.md b/docs.it4i/anselm/capacity-computing.md index 41bb930fb..b55b85494 100644 --- a/docs.it4i/anselm/capacity-computing.md +++ b/docs.it4i/anselm/capacity-computing.md @@ -9,13 +9,13 @@ However, executing a huge number of jobs via the PBS queue may strain the system !!! note Please follow one of the procedures below, in case you wish to schedule more than 100 jobs at a time. -* Use [Job arrays](capacity-computing/#job-arrays) when running a huge number of [multithread](capacity-computing/#shared-jobscript-on-one-node) (bound to one node only) or multinode (multithread across several nodes) jobs -* Use [GNU parallel](capacity-computing/#gnu-parallel) when running single core jobs -* Combine [GNU parallel with Job arrays](capacity-computing/#job-arrays-and-gnu-parallel) when running huge number of single core jobs +* Use [Job arrays](anselm/capacity-computing/#job-arrays) when running a huge number of [multithread](anselm/capacity-computing/#shared-jobscript-on-one-node) (bound to one node only) or multinode (multithread across several nodes) jobs +* Use [GNU parallel](anselm/capacity-computing/#gnu-parallel) when running single core jobs +* Combine [GNU parallel with Job arrays](anselm/capacity-computing/#job-arrays-and-gnu-parallel) when running huge number of single core jobs ## Policy -1. A user is allowed to submit at most 100 jobs. Each job may be [a job array](capacity-computing/#job-arrays). +1. A user is allowed to submit at most 100 jobs. Each job may be [a job array](anselm/capacity-computing/#job-arrays). 1. The array size is at most 1000 subjobs. ## Job Arrays @@ -76,7 +76,7 @@ If running a huge number of parallel multicore (in means of multinode multithrea ### Submit the Job Array -To submit the job array, use the qsub -J command. The 900 jobs of the [example above](capacity-computing/#array_example) may be submitted like this: +To submit the job array, use the qsub -J command. The 900 jobs of the [example above](anselm/capacity-computing/#array_example) may be submitted like this: ```console $ qsub -N JOBNAME -J 1-900 jobscript @@ -145,7 +145,7 @@ Display status information for all user's subjobs. $ qstat -u $USER -tJ ``` -Read more on job arrays in the [PBSPro Users guide](../pbspro/). +Read more on job arrays in the [PBSPro Users guide](pbspro/). ## GNU Parallel @@ -207,7 +207,7 @@ In this example, tasks from the tasklist are executed via the GNU parallel. The ### Submit the Job -To submit the job, use the qsub command. The 101 task job of the [example above](capacity-computing/#gp_example) may be submitted as follows: +To submit the job, use the qsub command. The 101 task job of the [example above](anselm/capacity-computing/#gp_example) may be submitted as follows: ```console $ qsub -N JOBNAME jobscript @@ -292,7 +292,7 @@ When deciding this values, keep in mind the following guiding rules: ### Submit the Job Array (-J) -To submit the job array, use the qsub -J command. The 992 task job of the [example above](capacity-computing/#combined_example) may be submitted like this: +To submit the job array, use the qsub -J command. The 992 task job of the [example above](anselm/capacity-computing/#combined_example) may be submitted like this: ```console $ qsub -N JOBNAME -J 1-992:32 jobscript @@ -306,7 +306,7 @@ In this example, we submit a job array of 31 subjobs. Note the -J 1-992:**32**, ## Examples -Download the examples in [capacity.zip](capacity.zip), illustrating the above listed ways to run a huge number of jobs. We recommend trying out the examples before using this for running production jobs. +Download the examples in [capacity.zip](anselm/capacity.zip), illustrating the above listed ways to run a huge number of jobs. We recommend trying out the examples before using this for running production jobs. Unzip the archive in an empty directory on Anselm and follow the instructions in the README file diff --git a/docs.it4i/anselm/compute-nodes.md b/docs.it4i/anselm/compute-nodes.md index 2ffd49193..75b890134 100644 --- a/docs.it4i/anselm/compute-nodes.md +++ b/docs.it4i/anselm/compute-nodes.md @@ -47,12 +47,12 @@ Anselm is cluster of x86-64 Intel based nodes built with Bull Extreme Computing * bullx R423-E3 servers * cn[208-209] - + **Anselm bullx B510 servers** ### Compute Node Summary -| Node type | Count | Range | Memory | Cores | [Access](resources-allocation-policy/) | +| Node type | Count | Range | Memory | Cores | [Access](general/resources-allocation-policy/) | | ---------------------------- | ----- | ----------- | ------ | ----------- | -------------------------------------- | | Nodes without an accelerator | 180 | cn[1-180] | 64GB | 16 @ 2.4GHz | qexp, qprod, qlong, qfree, qprace, qatlas | | Nodes with a GPU accelerator | 23 | cn[181-203] | 96GB | 16 @ 2.3GHz | qnvidia, qexp | diff --git a/docs.it4i/anselm/hardware-overview.md b/docs.it4i/anselm/hardware-overview.md index 5f10e6500..cf8f8dadc 100644 --- a/docs.it4i/anselm/hardware-overview.md +++ b/docs.it4i/anselm/hardware-overview.md @@ -6,7 +6,7 @@ The Fat nodes are equipped with a large amount (512 GB) of memory. Virtualizatio Schematic representation of the Anselm cluster. Each box represents a node (computer) or storage capacity: - + The cluster compute nodes cn[1-207] are organized within 13 chassis. @@ -17,16 +17,16 @@ There are four types of compute nodes: * 4 compute nodes with a MIC accelerator - an Intel Xeon Phi 5110P * 2 fat nodes - equipped with 512 GB of RAM and two 100 GB SSD drives -[More about Compute nodes](compute-nodes/). +[More about Compute nodes](anselm/compute-nodes/). -GPU and accelerated nodes are available upon request, see the [Resources Allocation Policy](resources-allocation-policy/). +GPU and accelerated nodes are available upon request, see the [Resources Allocation Policy](anselm/resources-allocation-policy/). -All of these nodes are interconnected through fast InfiniBand and Ethernet networks. [More about the Network](network/). +All of these nodes are interconnected through fast InfiniBand and Ethernet networks. [More about the Network](anselm/network/). Every chassis provides an InfiniBand switch, marked **isw**, connecting all nodes in the chassis, as well as connecting the chassis to the upper level switches. -All of the nodes share a 360 TB /home disk for storage of user files. The 146 TB shared /scratch storage is available for scratch data. These file systems are provided by the Lustre parallel file system. There is also local disk storage available on all compute nodes in /lscratch. [More about Storage](storage/). +All of the nodes share a 360 TB /home disk for storage of user files. The 146 TB shared /scratch storage is available for scratch data. These file systems are provided by the Lustre parallel file system. There is also local disk storage available on all compute nodes in /lscratch. [More about Storage](anselm/storage/). -User access to the Anselm cluster is provided by two login nodes login1, login2, and data mover node dm1. [More about accessing the cluster.](shell-and-data-access/) +User access to the Anselm cluster is provided by two login nodes login1, login2, and data mover node dm1. [More about accessing the cluster.](anselm/shell-and-data-access/) The parameters are summarized in the following tables: @@ -35,7 +35,7 @@ The parameters are summarized in the following tables: | Primary purpose | High Performance Computing | | Architecture of compute nodes | x86-64 | | Operating system | Linux (CentOS) | -| [**Compute nodes**](compute-nodes/) | | +| [**Compute nodes**](anselm/compute-nodes/) | | | Total | 209 | | Processor cores | 16 (2 x 8 cores) | | RAM | min. 64 GB, min. 4 GB per core | @@ -57,4 +57,4 @@ The parameters are summarized in the following tables: | MIC accelerated | 2 x Intel Sandy Bridge E5-2470, 2.3 GHz | 96 GB | Intel Xeon Phi 5110P | | Fat compute node | 2 x Intel Sandy Bridge E5-2665, 2.4 GHz | 512 GB | - | -For more details refer to [Compute nodes](compute-nodes/), [Storage](storage/), and [Network](network/). +For more details refer to [Compute nodes](anselm/compute-nodes/), [Storage](anselm/storage/), and [Network](anselm/network/). diff --git a/docs.it4i/anselm/introduction.md b/docs.it4i/anselm/introduction.md index d40cd090c..7963784c6 100644 --- a/docs.it4i/anselm/introduction.md +++ b/docs.it4i/anselm/introduction.md @@ -1,11 +1,11 @@ # Introduction -Welcome to Anselm supercomputer cluster. The Anselm cluster consists of 209 compute nodes, totalling 3344 compute cores with 15 TB RAM, giving over 94 TFLOP/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 16 cores, at least 64 GB of RAM, and a 500 GB hard disk drive. Nodes are interconnected through a fully non-blocking fat-tree InfiniBand network, and are equipped with Intel Sandy Bridge processors. A few nodes are also equipped with NVIDIA Kepler GPU or Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](hardware-overview/). +Welcome to Anselm supercomputer cluster. The Anselm cluster consists of 209 compute nodes, totalling 3344 compute cores with 15 TB RAM, giving over 94 TFLOP/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 16 cores, at least 64 GB of RAM, and a 500 GB hard disk drive. Nodes are interconnected through a fully non-blocking fat-tree InfiniBand network, and are equipped with Intel Sandy Bridge processors. A few nodes are also equipped with NVIDIA Kepler GPU or Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](anselm/hardware-overview/). The cluster runs with an [operating system](software/operating-system/) which is compatible with the RedHat [Linux family.](http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg) We have installed a wide range of software packages targeted at different scientific domains. These packages are accessible via the [modules environment](environment-and-modules/). The user data shared file-system (HOME, 320 TB) and job data shared file-system (SCRATCH, 146 TB) are available to users. -The PBS Professional workload manager provides [computing resources allocations and job execution](resources-allocation-policy/). +The PBS Professional workload manager provides [computing resources allocations and job execution](anselm/resources-allocation-policy/). -Read more on how to [apply for resources](../general/applying-for-resources/), [obtain login credentials](../general/obtaining-login-credentials/obtaining-login-credentials/) and [access the cluster](shell-and-data-access/). +Read more on how to [apply for resources](general/applying-for-resources/), [obtain login credentials](general/obtaining-login-credentials/obtaining-login-credentials/) and [access the cluster](anselm/shell-and-data-access/). diff --git a/docs.it4i/anselm/job-priority.md b/docs.it4i/anselm/job-priority.md index 6af6c87ec..0d821c533 100644 --- a/docs.it4i/anselm/job-priority.md +++ b/docs.it4i/anselm/job-priority.md @@ -16,7 +16,7 @@ Queue priority is the priority of the queue in which the job is waiting prior to Queue priority has the biggest impact on job execution priority. The execution priority of jobs in higher priority queues is always greater than the execution priority of jobs in lower priority queues. Other properties of jobs used for determining the job execution priority (fair-share priority, eligible time) cannot compete with queue priority. -Queue priorities can be seen at <https://extranet.it4i.cz/anselm/queues> +Queue priorities can be seen at [https://extranet.it4i.cz/anselm/queues](https://extranet.it4i.cz/anselm/queues) ### Fair-Share Priority @@ -36,7 +36,7 @@ Usage counts allocated core-hours (`ncpus x walltime`). Usage decays, halving at Jobs queued in the queue qexp are not used to calculate the project's usage. !!! note - Calculated usage and fair-share priority can be seen at <https://extranet.it4i.cz/anselm/projects>. + Calculated usage and fair-share priority can be seen at [https://extranet.it4i.cz/anselm/projects](https://extranet.it4i.cz/anselm/projects). Calculated fair-share priority can be also be seen in the Resource_List.fairshare attribute of a job. diff --git a/docs.it4i/anselm/job-submission-and-execution.md b/docs.it4i/anselm/job-submission-and-execution.md index a8c6351ca..008a6c07f 100644 --- a/docs.it4i/anselm/job-submission-and-execution.md +++ b/docs.it4i/anselm/job-submission-and-execution.md @@ -92,9 +92,9 @@ In this example, we allocate 4 nodes, 16 cores per node, selecting only the node ### Placement by IB Switch -Groups of computational nodes are connected to chassis integrated Infiniband switches. These switches form the leaf switch layer of the [Infiniband network](network/) fat tree topology. Nodes sharing the leaf switch can communicate most efficiently. Sharing the same switch prevents hops in the network and facilitates unbiased, highly efficient network communication. +Groups of computational nodes are connected to chassis integrated Infiniband switches. These switches form the leaf switch layer of the [Infiniband network](anselm/network/) fat tree topology. Nodes sharing the leaf switch can communicate most efficiently. Sharing the same switch prevents hops in the network and facilitates unbiased, highly efficient network communication. -Nodes sharing the same switch may be selected via the PBS resource attribute ibswitch. Values of this attribute are iswXX, where XX is the switch number. The node-switch mapping can be seen in the [Hardware Overview](hardware-overview/) section. +Nodes sharing the same switch may be selected via the PBS resource attribute ibswitch. Values of this attribute are iswXX, where XX is the switch number. The node-switch mapping can be seen in the [Hardware Overview](anselm/hardware-overview/) section. We recommend allocating compute nodes to a single switch when best possible computational network performance is required to run the job efficiently: @@ -373,7 +373,7 @@ exit In this example, input and executable files are assumed to be preloaded manually in the /scratch/$USER/myjob directory. Note the **mpiprocs** and **ompthreads** qsub options controlling the behavior of the MPI execution. mympiprog.x is executed as one process per node, on all 100 allocated nodes. If mympiprog.x implements OpenMP threads, it will run 16 threads per node. -More information can be found in the [Running OpenMPI](../software/mpi/Running_OpenMPI/) and [Running MPICH2](../software/mpi/running-mpich2/) +More information can be found in the [Running OpenMPI](software/mpi/Running_OpenMPI/) and [Running MPICH2](software/mpi/running-mpich2/) sections. ### Example Jobscript for Single Node Calculation @@ -381,7 +381,7 @@ sections. !!! note The local scratch directory is often useful for single node jobs. Local scratch memory will be deleted immediately after the job ends. -Example jobscript for single node calculation, using [local scratch](storage/) memory on the node: +Example jobscript for single node calculation, using [local scratch](anselm/storage/) memory on the node: ```bash #!/bin/bash @@ -407,4 +407,4 @@ In this example, a directory in /home holds the input file input and executable ### Other Jobscript Examples -Further jobscript examples may be found in the software section and the [Capacity computing](capacity-computing/) section. +Further jobscript examples may be found in the software section and the [Capacity computing](anselm/capacity-computing/) section. diff --git a/docs.it4i/anselm/remote-visualization.md.disable b/docs.it4i/anselm/remote-visualization.md.disable deleted file mode 100644 index 0a97df681..000000000 --- a/docs.it4i/anselm/remote-visualization.md.disable +++ /dev/null @@ -1,224 +0,0 @@ -# Remote Visualization Service - -## Introduction - -The goal of this service is to provide users with GPU accelerated use of OpenGL applications, especially for pre- and post- processing work, where not only GPU performance is needed but also fast access to the shared file systems of the cluster and a reasonable amount of RAM. - -The service is based on integration of the open source tools VirtualGL and TurboVNC together with the cluster's job scheduler PBS Professional. - -Currently there are two dedicated compute nodes for this service with the following configuration for each node: - -| [**Visualization node configuration**](compute-nodes/) | | -| ------------------------------------------------------ | --------------------------------------- | -| CPU | 2 x Intel Sandy Bridge E5-2670, 2.6 GHz | -| Processor cores | 16 (2 x 8 cores) | -| RAM | 64 GB, min. 4 GB per core | -| GPU | NVIDIA Quadro 4000, 2 GB RAM | -| Local disk drive | yes - 500 GB | -| Compute network | InfiniBand QDR | - -## Schematic Overview - - - - - -## How to Use the Service - -### Setup and Start Your Own TurboVNC Server - -TurboVNC is designed and implemented for cooperation with VirtualGL and is available for free for all major platforms. For more information and download, please refer to: <http://sourceforge.net/projects/turbovnc/> - -**Always use TurboVNC on both sides** (server and client) **don't mix TurboVNC and other VNC implementations** (TightVNC, TigerVNC, ...) as the VNC protocol implementation may slightly differ and diminish your user experience by introducing picture artifacts, etc. - -The procedure is: - -#### 1. Connect to a Login Node - -Please [follow the documentation](shell-and-data-access/). - -#### 2. Run Your Own Instance of TurboVNC Server - -To have OpenGL acceleration, **24 bit color depth must be used**. Otherwise only the geometry (desktop size) definition is needed. - -!!! hint - The first time the VNC server is run you need to define a password. - -This example defines a desktop with the dimensions of 1200x700 pixels and 24 bit color depth. - -```console -$ module load turbovnc/1.2.2 -$ vncserver -geometry 1200x700 -depth 24 - -Desktop 'TurboVNC: login2:1 (username)' started on display login2:1 - -Starting applications specified in /home/username/.vnc/xstartup.turbovnc -Log file is /home/username/.vnc/login2:1.log -``` - -#### 3. Remember Which Display Number Your VNC Server Runs (You Will Need It in the Future to Stop the Server) - -```console -$ vncserver -list - -TurboVNC server sessions: - -X DISPLAY # PROCESS ID -:1 23269 -``` - -In this example the VNC server runs on display **:1**. - -#### 4. Remember the Exact Login Node Where Your VNC Server Runs - -```console -$ uname -n -login2 -``` - -In this example the VNC server runs on **login2**. - -#### 5. Remember on Which TCP Port Your Own VNC Server Is Running - -To get the port you have to look to the log file of your VNC server. - -```console -$ grep -E "VNC.*port" /home/username/.vnc/login2:1.log -20/02/2015 14:46:41 Listening for VNC connections on TCP port 5901 -``` - -In this example the VNC server listens on TCP port **5901**. - -#### 6. Connect to the Login Node Where Your VNC Server Runs With SSH to Tunnel Your VNC Session - -Tunnel the TCP port on which your VNC server is listenning. - -```console -$ ssh login2.anselm.it4i.cz -L 5901:localhost:5901 -``` - -x-window-system/ -If you use Windows and Putty, please refer to port forwarding setup in the documentation: -[x-window-and-vnc#section-12](../general/accessing-the-clusters/graphical-user-interface/x-window-system/) - -#### 7. If You Don't Have Turbo VNC Installed on Your Workstation - -Get it from: <http://sourceforge.net/projects/turbovnc/> - -#### 8. Run TurboVNC Viewer From Your Workstation - -Mind that you should connect through the SSH tunneled port. In this example it is 5901 on your workstation (localhost). - -```console -$ vncviewer localhost:5901 -``` - -If you use the Windows version of TurboVNC Viewer, just run the Viewer and use the address **localhost:5901**. - -#### 9. Proceed to the Chapter "Access the Visualization Node" - -Now you should have a working TurboVNC session connected to your workstation. - -#### 10. After You End Your Visualization Session - -Don't forget to correctly shutdown your own VNC server on the login node! - -```console -$ vncserver -kill :1 -``` - -### Access the Visualization Node - -**To access the node use the dedicated PBS Professional scheduler queue -qviz**. The queue has the following properties: - -| queue | active project | project resources | nodes | min ncpus | priority | authorization | walltime | -| ---------------------------- | -------------- | ----------------- | ----- | --------- | -------- | ------------- | ---------------- | -| **qviz** Visualization queue | yes | none required | 2 | 4 | 150 | no | 1 hour / 8 hours | - -Currently when accessing the node, each user gets 4 cores of a CPU allocated, thus approximately 16 GB of RAM and 1/4 of the GPU capacity. - -!!! note - If more GPU power or RAM is required, it is recommended to allocate one whole node per user, so that all 16 cores, the whole RAM, and the whole GPU is exclusive. This is currently also the maximum allocation allowed per user. One hour of work is allocated by default, the user may ask for 2 hours maximum. - -To access the visualization node, follow these steps: - -#### 1. In Your VNC Session, Open a Terminal and Allocate a Node Using the PBSPro qsub Command - -This step is necessary to allow you to proceed with the next steps. - -```console -$ qsub -I -q qviz -A PROJECT_ID -``` - -In this example the default values for CPU cores and usage time are used. - -```console -$ qsub -I -q qviz -A PROJECT_ID -l select=1:ncpus=16 -l walltime=02:00:00 -``` - -Substitute **PROJECT_ID** with the assigned project identification string. - -In this example a whole node is requested for 2 hours. - -If there are free resources for your request, you will have a shell running on an assigned node. Please remember the name of the node. - -```console -$ uname -n -srv8 -``` - -In this example the visualization session was assigned to node **srv8**. - -#### 2. In Your VNC Session Open Another Terminal (Keep the One With Interactive PBSPro Job Open) - -Setup the VirtualGL connection to the node, which PBSPro allocated for our job. - -```console -$ vglconnect srv8 -``` - -You will be connected with the created VirtualGL tunnel to the visualization node, where you will have a shell. - -#### 3. Load the VirtualGL Module - -```console -$ module load virtualgl/2.4 -``` - -#### 4. Run Your Desired OpenGL Accelerated Application Using the VirtualGL Script "Vglrun" - -```console -$ vglrun glxgears -``` - -If you want to run an OpenGL application which is available through modules, you need to first load the respective module. E.g. to run the **Mentat** OpenGL application from **MARC** software package use: - -```console -$ module load marc/2013.1 -$ vglrun mentat -``` - -#### 5. After You End Your Work With the OpenGL Application - -Just logout from the visualization node and exit both opened terminals and end your VNC server session as described above. - -## Tips and Tricks - -If you want to increase the responsibility of the visualization, please adjust your TurboVNC client settings in this way: - - - -To have an idea how the settings are affecting the resulting picture utility three levels of "JPEG image quality" are demonstrated: - -** JPEG image quality = 30 ** - - - -** JPEG image quality = 15 ** - - - -** JPEG image quality = 10 ** - - diff --git a/docs.it4i/anselm/resource-allocation-and-job-execution.md b/docs.it4i/anselm/resource-allocation-and-job-execution.md index a24b81511..7692844eb 100644 --- a/docs.it4i/anselm/resource-allocation-and-job-execution.md +++ b/docs.it4i/anselm/resource-allocation-and-job-execution.md @@ -1,10 +1,10 @@ # Resource Allocation and Job Execution -To run a [job](job-submission-and-execution/), [computational resources](resources-allocation-policy/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [official documentation here](../pbspro/), especially in the PBS Pro User's Guide. +To run a [job](anselm/job-submission-and-execution/), [computational resources](anselm/resources-allocation-policy/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [official documentation here](pbspro/), especially in the PBS Pro User's Guide. ## Resource Allocation Policy -The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. [The Fair-share](job-priority/) system of Anselm ensures that individual users may consume approximately equal amount of resources per week. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. The following queues are available to Anselm users: +The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. [The Fair-share](anselm/job-priority/) system of Anselm ensures that individual users may consume approximately equal amount of resources per week. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. The following queues are available to Anselm users: * **qexp**, the Express queue * **qprod**, the Production queue @@ -15,7 +15,7 @@ The resources are allocated to the job in a fair-share fashion, subject to const !!! note Check the queue status at <https://extranet.it4i.cz/anselm/> -Read more on the [Resource AllocationPolicy](resources-allocation-policy/) page. +Read more on the [Resource AllocationPolicy](anselm/resources-allocation-policy/) page. ## Job Submission and Execution @@ -24,7 +24,7 @@ Read more on the [Resource AllocationPolicy](resources-allocation-policy/) page. The qsub submits the job into the queue. The qsub command creates a request to the PBS Job manager for allocation of specified resources. The **smallest allocation unit is an entire node, 16 cores**, with the exception of the qexp queue. The resources will be allocated when available, subject to allocation policies and constraints. **After the resources are allocated the jobscript or interactive shell is executed on the first of the allocated nodes.** -Read more on the [Job submission and execution](job-submission-and-execution/) page. +Read more on the [Job submission and execution](anselm/job-submission-and-execution/) page. ## Capacity Computing @@ -35,4 +35,4 @@ Use GNU Parallel and/or Job arrays when running (many) single core jobs. In many cases, it is useful to submit a huge (100+) number of computational jobs into the PBS queue system. A huge number of (small) jobs is one of the most effective ways to execute embarrassingly parallel calculations, achieving the best runtime, throughput, and computer utilization. In this chapter, we discuss the the recommended way to run a huge number of jobs, including **ways to run a huge number of single core jobs**. -Read more on the [Capacity computing](capacity-computing/) page. +Read more on the [Capacity computing](anselm/capacity-computing/) page. diff --git a/docs.it4i/anselm/resources-allocation-policy.md b/docs.it4i/anselm/resources-allocation-policy.md index a4f82b288..d62656e22 100644 --- a/docs.it4i/anselm/resources-allocation-policy.md +++ b/docs.it4i/anselm/resources-allocation-policy.md @@ -2,7 +2,7 @@ ## Job Queue Policies -The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and the resources available to the Project. The Fair-share system of Anselm ensures that individual users may consume approximately equal amounts of resources per week. Detailed information can be found in the [Job scheduling](job-priority/) section. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. The following table provides the queue partitioning overview: +The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and the resources available to the Project. The Fair-share system of Anselm ensures that individual users may consume approximately equal amounts of resources per week. Detailed information can be found in the [Job scheduling](anselm/job-priority/) section. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. The following table provides the queue partitioning overview: !!! note Check the queue status at <https://extranet.it4i.cz/anselm/> @@ -29,18 +29,18 @@ The resources are allocated to the job in a fair-share fashion, subject to const ## Queue Notes -The job wall clock time defaults to **half the maximum time**, see the table above. Longer wall time limits can be [set manually, see examples](job-submission-and-execution/). +The job wall clock time defaults to **half the maximum time**, see the table above. Longer wall time limits can be [set manually, see examples](anselm/job-submission-and-execution/). Jobs that exceed the reserved wall clock time (Req'd Time) get killed automatically. The wall clock time limit can be changed for queuing jobs (state Q) using the qalter command, however it cannot be changed for a running job (state R). -Anselm users may check the current queue configuration at <https://extranet.it4i.cz/anselm/queues>. +Anselm users may check the current queue configuration at [https://extranet.it4i.cz/anselm/queues](https://extranet.it4i.cz/anselm/queues). ## Queue Status !!! tip - Check the status of jobs, queues and compute nodes at <https://extranet.it4i.cz/anselm/> + Check the status of jobs, queues and compute nodes at [https://extranet.it4i.cz/anselm/](https://extranet.it4i.cz/anselm/) - + Display the queue status on Anselm: diff --git a/docs.it4i/anselm/shell-and-data-access.md b/docs.it4i/anselm/shell-and-data-access.md index 5373a678f..738c022dd 100644 --- a/docs.it4i/anselm/shell-and-data-access.md +++ b/docs.it4i/anselm/shell-and-data-access.md @@ -10,7 +10,7 @@ The Anselm cluster is accessed by SSH protocol via login nodes login1 and login2 | login1.anselm.it4i.cz | 22 | ssh | login1 | | login2.anselm.it4i.cz | 22 | ssh | login2 | -Authentication is by [private key](../general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) +Authentication is by [private key](general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) !!! note Please verify SSH fingerprints during the first logon. They are identical on all login nodes: @@ -39,7 +39,7 @@ If you see a warning message "UNPROTECTED PRIVATE KEY FILE!", use this command t $ chmod 600 /path/to/id_rsa ``` -On **Windows**, use [PuTTY ssh client](../general/accessing-the-clusters/shell-access-and-data-transfer/putty.md). +On **Windows**, use [PuTTY ssh client](general/accessing-the-clusters/shell-access-and-data-transfer/putty.md). After logging in, you will see the command prompt: @@ -73,7 +73,7 @@ Data in and out of the system may be transferred by the [scp](http://en.wikipedi | login1.anselm.it4i.cz | 22 | scp | | login2.anselm.it4i.cz | 22 | scp | -Authentication is by [private key](../general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) +Authentication is by [private key](general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) !!! note Data transfer rates of up to **160MB/s** can be achieved with scp or sftp. @@ -119,7 +119,7 @@ $ man sshfs On Windows, use the [WinSCP client](http://winscp.net/eng/download.php) to transfer the data. The [win-sshfs client](http://code.google.com/p/win-sshfs/) provides a way to mount the Anselm filesystems directly as an external disc. -More information about the shared file systems is available [here](storage/). +More information about the shared file systems is available [here](access/storage/). ## Connection Restrictions @@ -204,9 +204,9 @@ Now, configure the applications proxy settings to **localhost:6000**. Use port f ## Graphical User Interface -* The [X Window system](../general/accessing-the-clusters/graphical-user-interface/x-window-system/) is the principal way to get GUI access to the clusters. -* [Virtual Network Computing](../general/accessing-the-clusters/graphical-user-interface/vnc/) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing) system that uses the [Remote Frame Buffer protocol](http://en.wikipedia.org/wiki/RFB_protocol) to remotely control another [computer](http://en.wikipedia.org/wiki/Computer). +* The [X Window system](general/accessing-the-clusters/graphical-user-interface/x-window-system/) is the principal way to get GUI access to the clusters. +* [Virtual Network Computing](general/accessing-the-clusters/graphical-user-interface/vnc/) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing) system that uses the [Remote Frame Buffer protocol](http://en.wikipedia.org/wiki/RFB_protocol) to remotely control another [computer](http://en.wikipedia.org/wiki/Computer). ## VPN Access -* Access IT4Innovations internal resources via [VPN](../general/accessing-the-clusters/vpn-access/). +* Access IT4Innovations internal resources via [VPN](general/accessing-the-clusters/vpn-access/). diff --git a/docs.it4i/anselm/software/debuggers/allinea-ddt.md b/docs.it4i/anselm/software/debuggers/allinea-ddt.md index a1f26a9fb..95eb4f107 100644 --- a/docs.it4i/anselm/software/debuggers/allinea-ddt.md +++ b/docs.it4i/anselm/software/debuggers/allinea-ddt.md @@ -59,7 +59,7 @@ Be sure to log in with an X window forwarding enabled. This could mean using the $ ssh -X username@anselm.it4i.cz ``` -Other options is to access login node using VNC. Please see the detailed information on how to [use graphic user interface on Anselm](/general/accessing-the-clusters/graphical-user-interface/x-window-system/) +Other options is to access login node using VNC. Please see the detailed information on how to [use graphic user interface on Anselm](general/accessing-the-clusters/graphical-user-interface/x-window-system/) From the login node an interactive session **with X windows forwarding** (-X option) can be started by following command: @@ -75,7 +75,7 @@ Then launch the debugger with the ddt command followed by the name of the execut A submission window that appears have a prefilled path to the executable to debug. You can select the number of MPI processors and/or OpenMP threads on which to run and press run. Command line arguments to a program can be entered to the "Arguments " box. - + To start the debugging directly without the submission window, user can specify the debugging and execution parameters from the command line. For example the number of MPI processes is set by option "-np 4". Skipping the dialog is done by "-start" option. To see the list of the "ddt" command line parameters, run "ddt --help". diff --git a/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md b/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md index 92602c6b3..64ddec1eb 100644 --- a/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md +++ b/docs.it4i/anselm/software/debuggers/allinea-performance-reports.md @@ -23,13 +23,13 @@ The module sets up environment variables, required for using the Allinea Perform !!! note Use the the perf-report wrapper on your (MPI) program. -Instead of [running your MPI program the usual way](../mpi/), use the the perf report wrapper: +Instead of [running your MPI program the usual way](anselm/software/mpi/), use the the perf report wrapper: ```bash $ perf-report mpirun ./mympiprog.x ``` -The MPI program will run as usual. The perf-report creates two additional files, in \*.txt and \*.html format, containing the performance report. Note that [demanding MPI codes should be run within the queue system](../../job-submission-and-execution/). +The MPI program will run as usual. The perf-report creates two additional files, in \*.txt and \*.html format, containing the performance report. Note that [demanding MPI codes should be run within the queue system](anselm/job-submission-and-execution/). ## Example @@ -56,4 +56,4 @@ Now lets profile the code: $ perf-report mpirun ./mympiprog.x ``` -Performance report files [mympiprog_32p\*.txt](../../../src/mympiprog_32p_2014-10-15_16-56.txt) and [mympiprog_32p\*.html](../../../src/mympiprog_32p_2014-10-15_16-56.html) were created. We can see that the code is very efficient on MPI and is CPU bounded. +Performance report files [mympiprog_32p\*.txt](src/mympiprog_32p_2014-10-15_16-56.txt) and [mympiprog_32p\*.html](src/mympiprog_32p_2014-10-15_16-56.html) were created. We can see that the code is very efficient on MPI and is CPU bounded. diff --git a/docs.it4i/anselm/software/intel-suite/intel-compilers.md b/docs.it4i/anselm/software/intel-suite/intel-compilers.md index 71708a733..5e3a92089 100644 --- a/docs.it4i/anselm/software/intel-suite/intel-compilers.md +++ b/docs.it4i/anselm/software/intel-suite/intel-compilers.md @@ -26,11 +26,11 @@ The compiler recognizes the omp, simd, vector and ivdep pragmas for OpenMP paral $ ifort -ipo -O3 -vec -xAVX -vec-report1 -openmp myprog.f mysubroutines.f -o myprog.x ``` -Read more at <http://software.intel.com/sites/products/documentation/doclib/stdxe/2013/composerxe/compiler/cpp-lin/index.htm> +Read more at [here](http://software.intel.com/sites/products/documentation/doclib/stdxe/2013/composerxe/compiler/cpp-lin/index.htm) ## Sandy Bridge/Haswell Binary Compatibility -Anselm nodes are currently equipped with Sandy Bridge CPUs, while Salomon will use Haswell architecture. >The new processors are backward compatible with the Sandy Bridge nodes, so all programs that ran on the Sandy Bridge processors, should also run on the new Haswell nodes. >To get optimal performance out of the Haswell processors a program should make use of the special AVX2 instructions for this processor. One can do this by recompiling codes with the compiler flags >designated to invoke these instructions. For the Intel compiler suite, there are two ways of doing this: +Anselm nodes are currently equipped with Sandy Bridge CPUs, while Salomon will use Haswell architecture. The new processors are backward compatible with the Sandy Bridge nodes, so all programs that ran on the Sandy Bridge processors, should also run on the new Haswell nodes. To get optimal performance out of the Haswell processors a program should make use of the special AVX2 instructions for this processor. One can do this by recompiling codes with the compiler flags designated to invoke these instructions. For the Intel compiler suite, there are two ways of doing this: -* Using compiler flag (both for Fortran and C): -xCORE-AVX2. This will create a binary with AVX2 instructions, specifically for the Haswell processors. Note that the executable will not run on Sandy Bridge nodes. -* Using compiler flags (both for Fortran and C): -xAVX -axCORE-AVX2. This will generate multiple, feature specific auto-dispatch code paths for Intel® processors, if there is a performance benefit. So this binary will run both on Sandy Bridge and Haswell processors. During runtime it will be decided which path to follow, dependent on which processor you are running on. In general this will result in larger binaries. +* Using compiler flag (both for Fortran and C): **-xCORE-AVX2**. This will create a binary with AVX2 instructions, specifically for the Haswell processors. Note that the executable will not run on Sandy Bridge nodes. +* Using compiler flags (both for Fortran and C): **-xAVX -axCORE-AVX2**. This will generate multiple, feature specific auto-dispatch code paths for Intel® processors, if there is a performance benefit. So this binary will run both on Sandy Bridge and Haswell processors. During runtime it will be decided which path to follow, dependent on which processor you are running on. In general this will result in larger binaries. diff --git a/docs.it4i/anselm/software/mpi/mpi.md b/docs.it4i/anselm/software/mpi/mpi.md index 2990cf222..4d2f17e24 100644 --- a/docs.it4i/anselm/software/mpi/mpi.md +++ b/docs.it4i/anselm/software/mpi/mpi.md @@ -8,7 +8,7 @@ The Anselm cluster provides several implementations of the MPI library: | ---------------------------------------------------- | --------------------------------------------------------------- | | The highly optimized and stable **bullxmpi 1.2.4.1** | Partial thread support up to MPI_THREAD_SERIALIZED | | The **Intel MPI 4.1** | Full thread support up to MPI_THREAD_MULTIPLE | -| The [OpenMPI 1.6.5](href="http://www.open-mpi.org) | Full thread support up to MPI_THREAD_MULTIPLE, BLCR c/r support | +| The [OpenMPI 1.6.5](http://www.open-mpi.org) | Full thread support up to MPI_THREAD_MULTIPLE, BLCR c/r support | | The OpenMPI 1.8.1 | Full thread support up to MPI_THREAD_MULTIPLE, MPI-3.0 support | | The **mpich2 1.9** | Full thread support up to MPI_THREAD_MULTIPLE, BLCR c/r support | @@ -139,10 +139,10 @@ In the previous two cases with one or two MPI processes per node, the operating ### Running OpenMPI -The **bullxmpi-1.2.4.1** and [**OpenMPI 1.6.5**](http://www.open-mpi.org/) are both based on OpenMPI. Read more on [how to run OpenMPI](Running_OpenMPI/) based MPI. +The **bullxmpi-1.2.4.1** and [**OpenMPI 1.6.5**](http://www.open-mpi.org/) are both based on OpenMPI. Read more on [how to run OpenMPI](software/Running_OpenMPI/) based MPI. ### Running MPICH2 -The **Intel MPI** and **mpich2 1.9** are MPICH2 based implementations. Read more on [how to run MPICH2](running-mpich2/) based MPI. +The **Intel MPI** and **mpich2 1.9** are MPICH2 based implementations. Read more on [how to run MPICH2](software/running-mpich2/) based MPI. -The Intel MPI may run on the Intel Xeon Phi accelerators as well. Read more on [how to run Intel MPI on accelerators](../intel-xeon-phi/). +The Intel MPI may run on the Intel Xeon Phi accelerators as well. Read more on [how to run Intel MPI on accelerators](software/intel/intel-xeon-phi-anselm/). diff --git a/docs.it4i/anselm/software/numerical-languages/matlab.md b/docs.it4i/anselm/software/numerical-languages/matlab.md index 847b32d8b..a59a6faf7 100644 --- a/docs.it4i/anselm/software/numerical-languages/matlab.md +++ b/docs.it4i/anselm/software/numerical-languages/matlab.md @@ -21,9 +21,9 @@ $ ml av MATLAB If you need to use the Matlab GUI to prepare your Matlab programs, you can use Matlab directly on the login nodes. But for all computations use Matlab on the compute nodes via PBS Pro scheduler. -If you require the Matlab GUI, follow the general information about [running graphical applications](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/). +If you require the Matlab GUI, follow the general information about [running graphical applications](general/accessing-the-clusters/graphical-user-interface/x-window-system/). -Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (see the "GUI Applications on Compute Nodes over VNC" part [here](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/x-window-system/)) is recommended. +Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (see the "GUI Applications on Compute Nodes over VNC" part [here](general/accessing-the-clusters/graphical-user-interface/x-window-system/x-window-system/)) is recommended. To run Matlab with GUI, use @@ -69,7 +69,7 @@ With the new mode, MATLAB itself launches the workers via PBS, so you can either ### Parallel Matlab Interactive Session -Following example shows how to start interactive session with support for Matlab GUI. For more information about GUI based applications on Anselm see [this page](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/x-window-system/). +Following example shows how to start interactive session with support for Matlab GUI. For more information about GUI based applications on Anselm see [this page](general/accessing-the-clusters/graphical-user-interface/x-window-system/x-window-system/). ```bash $ xhost + @@ -251,9 +251,9 @@ delete(pool) ### Non-Interactive Session and Licenses -If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the `-l _feature_matlab_MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro [look here](../isv_licenses/). +If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the `-l _feature_matlab_MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro [look here](software/isv_licenses/). -In case of non-interactive session read the [following information](../isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation. +In case of non-interactive session read the [following information](software/isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation. ### Matlab Distributed Computing Engines Start Up Time diff --git a/docs.it4i/anselm/storage.md b/docs.it4i/anselm/storage.md index 4842ee179..b5b3fb87b 100644 --- a/docs.it4i/anselm/storage.md +++ b/docs.it4i/anselm/storage.md @@ -105,7 +105,7 @@ The HOME filesystem is mounted in directory /home. Users home directories /home/ The HOME filesystem should not be used to archive data of past Projects or other unrelated data. -The files on HOME filesystem will not be deleted until end of the [users lifecycle](../general/obtaining-login-credentials/obtaining-login-credentials/). +The files on HOME filesystem will not be deleted until end of the [users lifecycle](general/obtaining-login-credentials/obtaining-login-credentials/). The filesystem is backed up, such that it can be restored in case of catasthropic failure resulting in significant data loss. This backup however is not intended to restore old versions of user data or to restore (accidentaly) deleted files. diff --git a/docs.it4i/environment-and-modules.md b/docs.it4i/environment-and-modules.md index 5dd3bbbfd..632a7574f 100644 --- a/docs.it4i/environment-and-modules.md +++ b/docs.it4i/environment-and-modules.md @@ -32,7 +32,7 @@ In order to configure your shell for running particular application on clusters Application modules on clusters are built using [EasyBuild](software/tools/easybuild/). The modules are divided into the following structure: -```console +``` base: Default module class bio: Bioinformatics, biology and biomedical cae: Computer Aided Engineering (incl. CFD) diff --git a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/graphical-user-interface.md b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/graphical-user-interface.md index b12d1bb97..832335df2 100644 --- a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/graphical-user-interface.md +++ b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/graphical-user-interface.md @@ -4,10 +4,10 @@ The X Window system is a principal way to get GUI access to the clusters. -Read more about configuring [**X Window System**](x-window-system/). +Read more about configuring [X Window System](general/accessing-the-clusters/graphical-user-interface/x-window-system/). ## VNC The **Virtual Network Computing** (**VNC**) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing "Desktop sharing") system that uses the [Remote Frame Buffer protocol (RFB)](http://en.wikipedia.org/wiki/RFB_protocol "RFB protocol") to remotely control another [computer](http://en.wikipedia.org/wiki/Computer "Computer"). -Read more about configuring **[VNC](vnc/)**. \ No newline at end of file +Read more about configuring [VNC](general/accessing-the-clusters/graphical-user-interface/vnc/). diff --git a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md index 538ed5c5b..deac80094 100644 --- a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md +++ b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/vnc.md @@ -2,7 +2,7 @@ The **Virtual Network Computing** (**VNC**) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing "Desktop sharing") system that uses the [Remote Frame Buffer protocol (RFB)](http://en.wikipedia.org/wiki/RFB_protocol "RFB protocol") to remotely control another [computer](http://en.wikipedia.org/wiki/Computer "Computer"). It transmits the [keyboard](http://en.wikipedia.org/wiki/Computer_keyboard "Computer keyboard") and [mouse](http://en.wikipedia.org/wiki/Computer_mouse") events from one computer to another, relaying the graphical [screen](http://en.wikipedia.org/wiki/Computer_screen "Computer screen") updates back in the other direction, over a [network](http://en.wikipedia.org/wiki/Computer_network "Computer network"). -Vnc-based connections are usually faster (require less network bandwidth) then [X11](x-window-system) applications forwarded directly through ssh. +Vnc-based connections are usually faster (require less network bandwidth) then [X11](general/accessing-the-clusters/graphical-user-interface/x-window-system) applications forwarded directly through ssh. The recommended clients are [TightVNC](http://www.tightvnc.com) or [TigerVNC](http://sourceforge.net/apps/mediawiki/tigervnc/index.php?title=Main_Page) (free, open source, available for almost any platform). @@ -141,7 +141,7 @@ On the PuTTY Configuration screen go to Connection->SSH->Tunnels to set up the t Fill the Source port and Destination fields. **Do not forget to click the Add button**. - + ### WSL (Bash on Windows) @@ -159,7 +159,7 @@ Run the VNC client of your choice, select VNC server 127.0.0.1, port 5961 and co ### TigerVNC Viewer - + In this example, we connect to VNC server on port 5961, via the ssh tunnel, using TigerVNC viewer. The connection is encrypted and secured. The VNC server listening on port 5961 provides screen of 1600x900 pixels. @@ -167,23 +167,23 @@ In this example, we connect to VNC server on port 5961, via the ssh tunnel, usin Use your VNC password to log using TightVNC Viewer and start a Gnome Session on the login node. - + ## Gnome Session You should see after the successful login. - + ### Disable Your Gnome Session Screensaver Open Screensaver preferences dialog: - + Uncheck both options below the slider: - + ### Kill Screensaver if Locked Screen @@ -222,7 +222,7 @@ The very same methods as described above, may be used to run the GUI application Open a Terminal (Applications -> System Tools -> Terminal). Run all the next commands in the terminal. - + Allow incoming X11 graphics from the compute nodes at the login node: @@ -230,10 +230,10 @@ Allow incoming X11 graphics from the compute nodes at the login node: $ xhost + ``` -Get an interactive session on a compute node (for more detailed info [look here](../../../anselm/job-submission-and-execution/)). Use the **-v DISPLAY** option to propagate the DISPLAY on the compute node. In this example, we want a complete node (24 cores in this example) from the production queue: +Get an interactive session on a compute node (for more detailed info [look here](anselm/job-submission-and-execution/)). Use the **-v DISPLAY** option to propagate the DISPLAY on the compute node. In this example, we want a complete node (16 cores in this example) from the production queue: ```console -$ qsub -I -v DISPLAY=$(uname -n):$(echo $DISPLAY | cut -d ':' -f 2) -A PROJECT_ID -q qprod -l select=1:ncpus=24 +$ qsub -I -v DISPLAY=$(uname -n):$(echo $DISPLAY | cut -d ':' -f 2) -A PROJECT_ID -q qprod -l select=1:ncpus=16 ``` Test that the DISPLAY redirection into your VNC session works, by running a X11 application (e. g. XTerm) on the assigned compute node: @@ -244,4 +244,4 @@ $ xterm Example described above: - + diff --git a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/x-window-system.md b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/x-window-system.md index 1f87d62a5..5381c07d4 100644 --- a/docs.it4i/general/accessing-the-clusters/graphical-user-interface/x-window-system.md +++ b/docs.it4i/general/accessing-the-clusters/graphical-user-interface/x-window-system.md @@ -19,7 +19,7 @@ In order to display graphical user interface GUI of various software tools, you On Windows use the PuTTY client to enable X11 forwarding. In PuTTY menu, go to Connection-SSH-X11, mark the Enable X11 forwarding checkbox before logging in. - + Then log in as usual. diff --git a/docs.it4i/general/accessing-the-clusters/introduction.md b/docs.it4i/general/accessing-the-clusters/introduction.md index 5ca0d7689..f4c93dec2 100644 --- a/docs.it4i/general/accessing-the-clusters/introduction.md +++ b/docs.it4i/general/accessing-the-clusters/introduction.md @@ -3,22 +3,22 @@ The IT4Innovations clusters are accessed by SSH protocol via login nodes. !!! note - Read more on [Accessing the Salomon Cluster](../../salomon/shell-and-data-access.md) or [Accessing the Anselm Cluster](../../anselm/shell-and-data-access.md) pages. + Read more on [Accessing the Salomon Cluster](salomon/shell-and-data-access.md) or [Accessing the Anselm Cluster](nselm/shell-and-data-access.md) pages. ## PuTTY -On **Windows**, use [PuTTY ssh client](shell-access-and-data-transfer/putty/). +On **Windows**, use [PuTTY ssh client](general/accessing-the-clusters/shell-access-and-data-transfer/putty/). ## SSH Keys -Read more about [SSH keys management](shell-access-and-data-transfer/ssh-keys/). +Read more about [SSH keys management](general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/). ## Graphical User Interface -Read more about [X Window System](./graphical-user-interface/x-window-system/). +Read more about [X Window System](general/accessing-the-clusters/graphical-user-interface/x-window-system/). -Read more about [Virtual Network Computing (VNC)](./graphical-user-interface/vnc/). +Read more about [Virtual Network Computing (VNC)](general/accessing-the-clusters/graphical-user-interface/vnc/). ## Accessing IT4Innovations Internal Resources via VPN -Read more about [VPN Access](vpn-access/). +Read more about [VPN Access](general/accessing-the-clusters/vpn-access/). diff --git a/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/putty.md b/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/putty.md index 7a4d63ed9..9e9d8e4ca 100644 --- a/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/putty.md +++ b/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/putty.md @@ -16,27 +16,27 @@ We recommned you to download "**A Windows installer for everything except PuTTYt ## PuTTY - How to Connect to the IT4Innovations Cluster * Run PuTTY -* Enter Host name and Save session fields with [Login address](../../../salomon/shell-and-data-access.md) and browse Connection - SSH - Auth menu. The _Host Name_ input may be in the format **"username@clustername.it4i.cz"** so you don't have to type your login each time.In this example we will connect to the Salomon cluster using **"salomon.it4i.cz"**. +* Enter Host name and Save session fields with [Login address](salomon/shell-and-data-access.md) and browse Connection - SSH - Auth menu. The _Host Name_ input may be in the format **"username@clustername.it4i.cz"** so you don't have to type your login each time.In this example we will connect to the Salomon cluster using **"salomon.it4i.cz"**. - + * Category - Connection - SSH - Auth: Select Attempt authentication using Pageant. Select Allow agent forwarding. Browse and select your [private key](ssh-keys/) file. - + * Return to Session page and Save selected configuration with _Save_ button. - + * Now you can log in using _Open_ button. - + * Enter your username if the _Host Name_ input is not in the format "username@salomon.it4i.cz". -* Enter passphrase for selected [private key](ssh-keys/) file if Pageant **SSH authentication agent is not used.** +* Enter passphrase for selected [private key](general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) file if Pageant **SSH authentication agent is not used.** ## Another PuTTY Settings @@ -53,7 +53,7 @@ Pageant holds your private key in memory without needing to retype a passphrase * Enter your passphrase. * Now you have your private key in memory without needing to retype a passphrase on every login. - + ## PuTTY Key Generator @@ -63,13 +63,13 @@ PuTTYgen is the PuTTY key generator. You can load in an existing private key and You can change the password of your SSH key with "PuTTY Key Generator". Make sure to backup the key. -* Load your [private key](../shell-access-and-data-transfer/ssh-keys/) file with _Load_ button. +* Load your [private key](general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) file with _Load_ button. * Enter your current passphrase. * Change key passphrase. * Confirm key passphrase. * Save your private key with _Save private key_ button. - + ### Generate a New Public/Private Key @@ -77,31 +77,31 @@ You can generate an additional public/private key pair and insert public key int * Start with _Generate_ button. - + * Generate some randomness. - + * Wait. - + * Enter a _comment_ for your key using format 'username@organization.example.com'. Enter key passphrase. Confirm key passphrase. Save your new private key in "_.ppk" format with _Save private key\* button. - + * Save the public key with _Save public key_ button. You can copy public key out of the ‘Public key for pasting into authorized_keys file’ box. - + * Export private key in OpenSSH format "id_rsa" using Conversion - Export OpenSSH key - + * Now you can insert additional public key into authorized_keys file for authentication with your own private key. - You must log in using ssh key received after registration. Then proceed to [How to add your own key](../shell-access-and-data-transfer/ssh-keys/). + You must log in using ssh key received after registration. Then proceed to [How to add your own key](general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/). diff --git a/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md b/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md deleted file mode 100644 index 5b34c3405..000000000 --- a/docs.it4i/general/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.md +++ /dev/null @@ -1,17 +0,0 @@ -# VPN - Connection Fail in Win 8.1 - -## Failed to Initialize Connection Subsystem Win 8.1 - 02-10-15 MS Patch - -AnyConnect users on Windows 8.1 will receive a "Failed to initialize connection subsystem" error after installing the Windows 8.1 02/10/15 security patch. This OS defect introduced with the 02/10/15 patch update will also impact WIndows 7 users with IE11. Windows Server 2008/2012 are also impacted by this defect, but neither is a supported OS for AnyConnect. - -## Workaround - -* Close the Cisco AnyConnect Window and the taskbar mini-icon -* Right click vpnui.exe in the 'Cisco AnyConnect Secure Mobility Client' folder. (C:Program Files (x86)CiscoCisco AnyConnect Secure Mobility Client) -* Click on the 'Run compatibility troubleshooter' button -* Choose 'Try recommended settings' -* The wizard suggests Windows 8 compatibility. -* Click 'Test Program'. This will open the program. -* Close - - diff --git a/docs.it4i/general/accessing-the-clusters/vpn-access.md b/docs.it4i/general/accessing-the-clusters/vpn-access.md index dc89ef681..a76275513 100644 --- a/docs.it4i/general/accessing-the-clusters/vpn-access.md +++ b/docs.it4i/general/accessing-the-clusters/vpn-access.md @@ -15,27 +15,27 @@ It is impossible to connect to VPN from other operating systems. ## VPN Client Installation -You can install VPN client from web interface after successful login with [IT4I credentials](../obtaining-login-credentials/obtaining-login-credentials/#login-credentials) on address <https://vpn.it4i.cz/user> +You can install VPN client from web interface after successful login with [IT4I credentials](general/obtaining-login-credentials/obtaining-login-credentials/#login-credentials) on address [https://vpn.it4i.cz/user](https://vpn.it4i.cz/user) - + According to the Java settings after login, the client either automatically installs, or downloads installation file for your operating system. It is necessary to allow start of installation tool for automatic installation. If auto install does not start, then proceed with manual installation described in next steps. - - - + + + After successful installation, VPN connection will be established and you can use available resources from IT4I network. - + If your Java setting doesn't allow automatic installation, you can download installation file and install VPN client manually. - + After you click on the link, download of installation file will start. - + After successful download of installation file, you have to execute this executable with administrator or root rights and install VPN client manually. @@ -43,32 +43,32 @@ After successful download of installation file, you have to execute this executa You can use graphical user interface or command line interface to run VPN client on all supported operating systems. We suggest using GUI. -Before the first login to VPN, you have to fill URL **<https://vpn.it4i.cz/user>** into the text field. +Before the first login to VPN, you have to fill URL **[https://vpn.it4i.cz/user](https://vpn.it4i.cz/user)** into the text field. - + After you click on the Connect button, you must fill your login credentials. - + After a successful login, the client will minimize to the system tray. If everything works, you can see a lock in the Cisco tray icon. - + If you right-click on this icon, you will see a context menu in which you can control the VPN connection. - + When you connect to the VPN for the first time, the client downloads the profile and creates a new item "IT4I cluster" in the connection list. For subsequent connections, it is not necessary to re-enter the URL address, but just select the corresponding item. - + Then AnyConnect automatically proceeds like in the case of first logon. - + After a successful logon, you can see a green circle with a tick mark on the lock icon. - + For disconnecting, right-click on the AnyConnect client icon in the system tray and select **VPN Disconnect**. diff --git a/docs.it4i/general/accessing-the-clusters/vpn1-access.md b/docs.it4i/general/accessing-the-clusters/vpn1-access.md deleted file mode 100644 index b7cacfd24..000000000 --- a/docs.it4i/general/accessing-the-clusters/vpn1-access.md +++ /dev/null @@ -1,82 +0,0 @@ -# VPN Access - -## Accessing IT4Innovations Internal Resources via VPN - -!!! note - **Failed to initialize connection subsystem Win 8.1 - 02-10-15 MS patch** - -Workaround can be found at [vpn-connection-fail-in-win-8.1](../../general/accessing-the-clusters/shell-access-and-data-transfer/vpn-connection-fail-in-win-8.1.html) - -For using resources and licenses which are located at IT4Innovations local network, it is necessary to VPN connect to this network. We use Cisco AnyConnect Secure Mobility Client, which is supported on the following operating systems: - -* Windows XP -* Windows Vista -* Windows 7 -* Windows 8 -* Linux -* MacOS - -It is impossible to connect to VPN from other operating systems. - -## VPN Client Installation - -You can install VPN client from web interface after successful login with LDAP credentials on address <https://vpn1.it4i.cz/anselm> - - - -According to the Java settings after login, the client either automatically installs, or downloads installation file for your operating system. It is necessary to allow start of installation tool for automatic installation. - - - - - - - -After successful installation, VPN connection will be established and you can use available resources from IT4I network. - - - -If your Java setting doesn't allow automatic installation, you can download installation file and install VPN client manually. - - - -After you click on the link, download of installation file will start. - - - -After successful download of installation file, you have to execute this tool with administrator's rights and install VPN client manually. - -## Working With VPN Client - -You can use graphical user interface or command line interface to run VPN client on all supported operating systems. We suggest using GUI. - -Before the first login to VPN, you have to fill URL [**https://vpn1.it4i.cz/anselm**](https://vpn1.it4i.cz/anselm) into the text field. - - - -After you click on the Connect button, you must fill your login credentials. - - - -After a successful login, the client will minimize to the system tray. -If everything works, you can see a lock in the Cisco tray icon. - - - -If you right-click on this icon, you will see a context menu in which you can control the VPN connection. - - - -When you connect to the VPN for the first time, the client downloads the profile and creates a new item "ANSELM" in the connection list. For subsequent connections, it is not necessary to re-enter the URL address, but just select the corresponding item. - - - -Then AnyConnect automatically proceeds like in the case of first logon. - - - -After a successful logon, you can see a green circle with a tick mark on the lock icon. - - - -For disconnecting, right-click on the AnyConnect client icon in the system tray and select **VPN Disconnect**. diff --git a/docs.it4i/general/applying-for-resources.md b/docs.it4i/general/applying-for-resources.md index 8875ec91b..40307695f 100644 --- a/docs.it4i/general/applying-for-resources.md +++ b/docs.it4i/general/applying-for-resources.md @@ -8,4 +8,4 @@ Anyone is welcomed to apply via the [Directors Discretion.](http://www.it4i.cz/o Foreign (mostly European) users can obtain computational resources via the [PRACE (DECI) program](http://www.prace-ri.eu/DECI-Projects). -In all cases, IT4Innovations’ access mechanisms are aimed at distributing computational resources while taking into account the development and application of supercomputing methods and their benefits and usefulness for society. The applicants are expected to submit a proposal. In the proposal, the applicants **apply for a particular amount of core-hours** of computational resources. The requested core-hours should be substantiated by scientific excellence of the proposal, its computational maturity and expected impacts. Proposals do undergo a scientific, technical and economic evaluation. The allocation decisions are based on this evaluation. More information at [Computing resources allocation](http://www.it4i.cz/computing-resources-allocation/?lang=en) and [Obtaining Login Credentials](obtaining-login-credentials/obtaining-login-credentials/) page. +In all cases, IT4Innovations’ access mechanisms are aimed at distributing computational resources while taking into account the development and application of supercomputing methods and their benefits and usefulness for society. The applicants are expected to submit a proposal. In the proposal, the applicants **apply for a particular amount of core-hours** of computational resources. The requested core-hours should be substantiated by scientific excellence of the proposal, its computational maturity and expected impacts. Proposals do undergo a scientific, technical and economic evaluation. The allocation decisions are based on this evaluation. More information at [Computing resources allocation](http://www.it4i.cz/computing-resources-allocation/?lang=en) and [Obtaining Login Credentials](general/obtaining-login-credentials/obtaining-login-credentials/) page. diff --git a/docs.it4i/general/obtaining-login-credentials/certificates-faq.md b/docs.it4i/general/obtaining-login-credentials/certificates-faq.md index 815344b2d..6b9f9fbbb 100644 --- a/docs.it4i/general/obtaining-login-credentials/certificates-faq.md +++ b/docs.it4i/general/obtaining-login-credentials/certificates-faq.md @@ -17,7 +17,7 @@ However, users need only manage User and CA certificates. Note that your user ce ## Q: Which X.509 Certificates Are Recognised by IT4Innovations? -[The Certificates for Digital Signatures](obtaining-login-credentials/#the-certificates-for-digital-signatures). +[The Certificates for Digital Signatures](general/obtaining-login-credentials/#the-certificates-for-digital-signatures). ## Q: How Do I Get a User Certificate That Can Be Used With IT4Innovations? diff --git a/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md b/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md index 5aa71b9e8..7f41a37ca 100644 --- a/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md +++ b/docs.it4i/general/obtaining-login-credentials/obtaining-login-credentials.md @@ -2,9 +2,9 @@ ## Obtaining Authorization -The computational resources of IT4I are allocated by the Allocation Committee to a [Project](/), investigated by a Primary Investigator. By allocating the computational resources, the Allocation Committee is authorizing the PI to access and use the clusters. The PI may decide to authorize a number of her/his Collaborators to access and use the clusters, to consume the resources allocated to her/his Project. These collaborators will be associated to the Project. The Figure below is depicting the authorization chain: +The computational resources of IT4I are allocated by the Allocation Committee to a Project, investigated by a Primary Investigator. By allocating the computational resources, the Allocation Committee is authorizing the PI to access and use the clusters. The PI may decide to authorize a number of her/his Collaborators to access and use the clusters, to consume the resources allocated to her/his Project. These collaborators will be associated to the Project. The Figure below is depicting the authorization chain: - + !!! note You need to either [become the PI](../applying-for-resources/) or [be named as a collaborator](obtaining-login-credentials/#authorization-by-web) by a PI in order to access and use the clusters. @@ -17,7 +17,7 @@ The PI is authorized to use the clusters by the allocation decision issued by th ## Process Flow Chart -This chart describes the process of obtaining login credentials on the clusters. You may skip the tasks, that you have already done. Some of the tasks, marked with asterisk (*), are clickable and will take you to more detailed description. +This chart describes the process of obtaining login credentials on the clusters. You may skip the tasks, that you have already done. Some of the tasks, marked with asterisk (\*), are clickable and will take you to more detailed description. * I am collaborator on a project and want to obtain login credetials @@ -141,7 +141,7 @@ You will receive your personal login credentials by protected e-mail. The login 1. ssh private key and private key passphrase 1. system password -The clusters are accessed by the [private key](../accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) and username. Username and password is used for login to the [information systems](http://support.it4i.cz/). +The clusters are accessed by the [private key](general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) and username. Username and password is used for login to the [information systems](http://support.it4i.cz/). ## Authorization by Web @@ -192,7 +192,7 @@ On Linux, use local $ ssh-keygen -f id_rsa -p ``` -On Windows, use [PuTTY Key Generator](../accessing-the-clusters/shell-access-and-data-transfer/putty/#putty-key-generator). +On Windows, use [PuTTY Key Generator](general/accessing-the-clusters/shell-access-and-data-transfer/putty/#putty-key-generator). ## Certificates for Digital Signatures @@ -207,7 +207,7 @@ Certificate generation process for academic purposes, utilizing the CESNET certi If you are not able to obtain certificate from any of the respected certification authorities, follow the Alternative Way bellow. -A FAQ about certificates can be found here: [Certificates FAQ](certificates-faq/). +A FAQ about certificates can be found here: [Certificates FAQ](general/obtaining-login-credentials/certificates-faq/). ## Alternative Way to Personal Certificate diff --git a/docs.it4i/general/resource_allocation_and_job_execution.md b/docs.it4i/general/resource_allocation_and_job_execution.md index d21f7b9c9..2c8594e1d 100644 --- a/docs.it4i/general/resource_allocation_and_job_execution.md +++ b/docs.it4i/general/resource_allocation_and_job_execution.md @@ -1,10 +1,10 @@ # Resource Allocation and Job Execution -To run a [job](/#terminology-frequently-used-on-these-pages), [computational resources](/salomon/resources-allocation-policy/#resource-accounting-policy) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [PBS Pro User's Guide](/pbspro). +To run a [job](/#terminology-frequently-used-on-these-pages), [computational resources](salomon/resources-allocation-policy/#resource-accounting-policy) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [PBS Pro User's Guide](pbspro). ## Resources Allocation Policy -The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. [The Fair-share](/salomon/job-priority/#fair-share-priority) ensures that individual users may consume approximately equal amount of resources per week. The resources are accessible via queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following queues are are the most important: +The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. [The Fair-share](salomon/job-priority/#fair-share-priority) ensures that individual users may consume approximately equal amount of resources per week. The resources are accessible via queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following queues are are the most important: * **qexp**, the Express queue * **qprod**, the Production queue @@ -14,9 +14,9 @@ The resources are allocated to the job in a fair-share fashion, subject to const * **qfree**, the Free resource utilization queue !!! note - Check the queue status at <https://extranet.it4i.cz/> + Check the queue status at [https://extranet.it4i.cz/](https://extranet.it4i.cz/) -Read more on the [Resource AllocationPolicy](/salomon/resources-allocation-policy) page. +Read more on the [Resource AllocationPolicy](salomon/resources-allocation-policy) page. ## Job Submission and Execution @@ -25,7 +25,7 @@ Read more on the [Resource AllocationPolicy](/salomon/resources-allocation-polic The qsub submits the job into the queue. The qsub command creates a request to the PBS Job manager for allocation of specified resources. The **smallest allocation unit is entire node, 16 cores**, with exception of the qexp queue. The resources will be allocated when available, subject to allocation policies and constraints. **After the resources are allocated the jobscript or interactive shell is executed on first of the allocated nodes.** -Read more on the [Job submission and execution](/salomon/job-submission-and-execution) page. +Read more on the [Job submission and execution](salomon/job-submission-and-execution) page. ## Capacity Computing @@ -36,4 +36,4 @@ Use GNU Parallel and/or Job arrays when running (many) single core jobs. In many cases, it is useful to submit huge (100+) number of computational jobs into the PBS queue system. Huge number of (small) jobs is one of the most effective ways to execute embarrassingly parallel calculations, achieving best runtime, throughput and computer utilization. In this chapter, we discuss the the recommended way to run huge number of jobs, including **ways to run huge number of single core jobs**. -Read more on [Capacity computing](/salomon/capacity-computing) page. +Read more on [Capacity computing](salomon/capacity-computing) page. diff --git a/docs.it4i/index.md b/docs.it4i/index.md index 2fffc1639..784e7b220 100644 --- a/docs.it4i/index.md +++ b/docs.it4i/index.md @@ -1,6 +1,6 @@ # Documentation -Welcome to the IT4Innovations documentation pages. The IT4Innovations national supercomputing center operates the supercomputers [Salomon](salomon/introduction/) and [Anselm](/anselm/introduction/). The supercomputers are [available](general/applying-for-resources/) to the academic community within the Czech Republic and Europe, and the industrial community worldwide. The purpose of these pages is to provide comprehensive documentation of the hardware, software and usage of the computers. +Welcome to the IT4Innovations documentation pages. The IT4Innovations national supercomputing center operates the supercomputers [Salomon](salomon/introduction/) and [Anselm](anselm/introduction/). The supercomputers are [available](general/applying-for-resources/) to the academic community within the Czech Republic and Europe, and the industrial community worldwide. The purpose of these pages is to provide comprehensive documentation of the hardware, software and usage of the computers. ## How to Read the Documentation @@ -60,7 +60,7 @@ local $ ## Errors Although we have taken every care to ensure the accuracy of the content, mistakes do happen. -If you find an inconsistency or error, report it by visiting <http://support.it4i.cz/rt>, creating a new ticket, and entering the details. +If you find an inconsistency or error, report it by visiting [http://support.it4i.cz/rt](http://support.it4i.cz/rt), creating a new ticket, and entering the details. By doing so, you can save other readers from frustration and help us improve. !!! tip diff --git a/docs.it4i/salomon/7d-enhanced-hypercube.md b/docs.it4i/salomon/7d-enhanced-hypercube.md index af082502d..2c434c21b 100644 --- a/docs.it4i/salomon/7d-enhanced-hypercube.md +++ b/docs.it4i/salomon/7d-enhanced-hypercube.md @@ -1,6 +1,6 @@ # 7D Enhanced Hypercube - + | Node type | Count | Short name | Long name | Rack | | ------------------------------------ | ----- | ---------------- | ------------------------ | ----- | @@ -9,4 +9,4 @@ ## IB Topology - + diff --git a/docs.it4i/salomon/capacity-computing.md b/docs.it4i/salomon/capacity-computing.md index e65e5ad99..e2121b61d 100644 --- a/docs.it4i/salomon/capacity-computing.md +++ b/docs.it4i/salomon/capacity-computing.md @@ -9,13 +9,13 @@ However, executing huge number of jobs via the PBS queue may strain the system. !!! note Please follow one of the procedures below, in case you wish to schedule more than 100 jobs at a time. -* Use [Job arrays](capacity-computing.md#job-arrays) when running huge number of [multithread](capacity-computing/#shared-jobscript-on-one-node) (bound to one node only) or multinode (multithread across several nodes) jobs -* Use [GNU parallel](capacity-computing/#gnu-parallel) when running single core jobs -* Combine [GNU parallel with Job arrays](capacity-computing/#job-arrays-and-gnu-parallel) when running huge number of single core jobs +* Use [Job arrays](salomon/capacity-computing.md#job-arrays) when running huge number of [multithread](salomon/capacity-computing/#shared-jobscript-on-one-node) (bound to one node only) or multinode (multithread across several nodes) jobs +* Use [GNU parallel](salomon/capacity-computing/#gnu-parallel) when running single core jobs +* Combine [GNU parallel with Job arrays](salomon/capacity-computing/#job-arrays-and-gnu-parallel) when running huge number of single core jobs ## Policy -1. A user is allowed to submit at most 100 jobs. Each job may be [a job array](capacity-computing/#job-arrays). +1. A user is allowed to submit at most 100 jobs. Each job may be [a job array](salomon/capacity-computing/#job-arrays). 1. The array size is at most 1500 subjobs. ## Job Arrays @@ -76,7 +76,7 @@ If huge number of parallel multicore (in means of multinode multithread, e. g. M ### Submit the Job Array -To submit the job array, use the qsub -J command. The 900 jobs of the [example above](capacity-computing/#array_example) may be submitted like this: +To submit the job array, use the qsub -J command. The 900 jobs of the [example above](salomon/capacity-computing/#array_example) may be submitted like this: ```console $ qsub -N JOBNAME -J 1-900 jobscript @@ -147,7 +147,7 @@ Display status information for all user's subjobs. $ qstat -u $USER -tJ ``` -Read more on job arrays in the [PBSPro Users guide](../pbspro/). +Read more on job arrays in the [PBSPro Users guide](software/pbspro/). ## GNU Parallel @@ -209,7 +209,7 @@ In this example, tasks from tasklist are executed via the GNU parallel. The jobs ### Submit the Job -To submit the job, use the qsub command. The 101 tasks' job of the [example above](capacity-computing/#gp_example) may be submitted like this: +To submit the job, use the qsub command. The 101 tasks' job of the [example above](salomon/capacity-computing/#gp_example) may be submitted like this: ```console $ qsub -N JOBNAME jobscript @@ -294,7 +294,7 @@ When deciding this values, think about following guiding rules : ### Submit the Job Array (-J) -To submit the job array, use the qsub -J command. The 960 tasks' job of the [example above](capacity-computing/#combined_example) may be submitted like this: +To submit the job array, use the qsub -J command. The 960 tasks' job of the [example above](salomon/capacity-computing/#combined_example) may be submitted like this: ```console $ qsub -N JOBNAME -J 1-960:48 jobscript @@ -308,7 +308,7 @@ In this example, we submit a job array of 20 subjobs. Note the -J 1-960:48, thi ## Examples -Download the examples in [capacity.zip](capacity.zip), illustrating the above listed ways to run huge number of jobs. We recommend to try out the examples, before using this for running production jobs. +Download the examples in [capacity.zip](salomon/capacity.zip), illustrating the above listed ways to run huge number of jobs. We recommend to try out the examples, before using this for running production jobs. Unzip the archive in an empty directory on the cluster and follow the instructions in the README file diff --git a/docs.it4i/salomon/compute-nodes.md b/docs.it4i/salomon/compute-nodes.md index 20163d0ee..f4bbb717f 100644 --- a/docs.it4i/salomon/compute-nodes.md +++ b/docs.it4i/salomon/compute-nodes.md @@ -5,7 +5,7 @@ Salomon is cluster of x86-64 Intel based nodes. The cluster contains two types of compute nodes of the same processor type and memory size. Compute nodes with MIC accelerator **contains two Intel Xeon Phi 7120P accelerators.** -[More about schematic representation of the Salomon cluster compute nodes IB topology](ib-single-plane-topology/). +[More about schematic representation of the Salomon cluster compute nodes IB topology](salomon/ib-single-plane-topology/). ### Compute Nodes Without Accelerator @@ -15,7 +15,7 @@ Compute nodes with MIC accelerator **contains two Intel Xeon Phi 7120P accelerat * two Intel Xeon E5-2680v3, 12-core, 2.5 GHz processors per node * 128 GB of physical memory per node - + ### Compute Nodes With MIC Accelerator @@ -26,11 +26,11 @@ Compute nodes with MIC accelerator **contains two Intel Xeon Phi 7120P accelerat * 128 GB of physical memory per node * MIC accelerator 2 x Intel Xeon Phi 7120P per node, 61-cores, 16 GB per accelerator - + - + - + ### Uv 2000 @@ -41,7 +41,7 @@ Compute nodes with MIC accelerator **contains two Intel Xeon Phi 7120P accelerat * 3328 GB of physical memory per node * 1 x NVIDIA GM200 (GeForce GTX TITAN X), 12 GB RAM - + ### Compute Nodes Summary diff --git a/docs.it4i/salomon/hardware-overview.md b/docs.it4i/salomon/hardware-overview.md index 4569358cf..5e10652b9 100644 --- a/docs.it4i/salomon/hardware-overview.md +++ b/docs.it4i/salomon/hardware-overview.md @@ -4,9 +4,9 @@ The Salomon cluster consists of 1008 computational nodes of which 576 are regular compute nodes and 432 accelerated nodes. Each node is a powerful x86-64 computer, equipped with 24 cores (two twelve-core Intel Xeon processors) and 128 GB RAM. The nodes are interlinked by high speed InfiniBand and Ethernet networks. All nodes share 0.5 PB /home NFS disk storage to store the user files. Users may use a DDN Lustre shared storage with capacity of 1.69 PB which is available for the scratch project data. The user access to the Salomon cluster is provided by four login nodes. -[More about schematic representation of the Salomon cluster compute nodes IB topology](ib-single-plane-topology/). +[More about schematic representation of the Salomon cluster compute nodes IB topology](salomon/ib-single-plane-topology/). - + The parameters are summarized in the following tables: @@ -17,7 +17,7 @@ The parameters are summarized in the following tables: | Primary purpose | High Performance Computing | | Architecture of compute nodes | x86-64 | | Operating system | CentOS 6.x Linux | -| [**Compute nodes**](compute-nodes/) | | +| [**Compute nodes**](salomon/compute-nodes/) | | | Totally | 1008 | | Processor | 2 x Intel Xeon E5-2680v3, 2.5 GHz, 12 cores | | RAM | 128GB, 5.3 GB per core, DDR4@2133 MHz | @@ -36,7 +36,7 @@ The parameters are summarized in the following tables: | w/o accelerator | 576 | 2 x Intel Xeon E5-2680v3, 2.5 GHz | 24 | 128 GB | - | | MIC accelerated | 432 | 2 x Intel Xeon E5-2680v3, 2.5 GHz | 24 | 128 GB | 2 x Intel Xeon Phi 7120P, 61 cores, 16 GB RAM | -For more details refer to the [Compute nodes](compute-nodes/). +For more details refer to the [Compute nodes](salomon/compute-nodes/). ## Remote Visualization Nodes @@ -52,6 +52,6 @@ For large memory computations a special SMP/NUMA SGI UV 2000 server is available | Node | Count | Processor | Cores | Memory | Extra HW | | ------ | ----- | ------------------------------------------- | ----- | --------------------- | ------------------------------------------------------------------------ | -| UV2000 | 1 | 14 x Intel Xeon E5-4627v2, 3.3 GHz, 8 cores | 112 | 3328 GB DDR3@1866 MHz | 2 x 400GB local SSD</br>1x NVIDIA GM200 (GeForce GTX TITAN X), 12 GB RAM | +| UV2000 | 1 | 14 x Intel Xeon E5-4627v2, 3.3 GHz, 8 cores | 112 | 3328 GB DDR3@1866 MHz | 2 x 400GB local SSD, 1x NVIDIA GM200 (GeForce GTX TITAN X), 12 GB RAM | - + diff --git a/docs.it4i/salomon/ib-single-plane-topology.md b/docs.it4i/salomon/ib-single-plane-topology.md index e8f72801a..4af3d3212 100644 --- a/docs.it4i/salomon/ib-single-plane-topology.md +++ b/docs.it4i/salomon/ib-single-plane-topology.md @@ -12,20 +12,20 @@ The SGI ICE X IB Premium Blade provides the first level of interconnection via d Each color in each physical IRU represents one dual-switch ASIC switch. -[IB single-plane topology - ICEX Mcell.pdf](<../src/IB single-plane topology - ICEX Mcell.pdf>) +[IB single-plane topology - ICEX Mcell.pdf](src/IB single-plane topology - ICEX Mcell.pdf) - + ## IB Single-Plane Topology - Accelerated Nodes -Each of the 3 inter-connected D racks are equivalent to one half of M-Cell rack. 18 x D rack with MIC accelerated nodes [r21-r38] are equivalent to 3 M-Cell racks as shown in a diagram [7D Enhanced Hypercube](7d-enhanced-hypercube/). +Each of the 3 inter-connected D racks are equivalent to one half of M-Cell rack. 18 x D rack with MIC accelerated nodes [r21-r38] are equivalent to 3 M-Cell racks as shown in a diagram [7D Enhanced Hypercube](salomon/7d-enhanced-hypercube/). -As shown in a diagram [IB Topology](7d-enhanced-hypercube/#ib-topology) +As shown in a diagram [IB Topology](salomon/7d-enhanced-hypercube/#ib-topology) * Racks 21, 22, 23, 24, 25, 26 are equivalent to one M-Cell rack. * Racks 27, 28, 29, 30, 31, 32 are equivalent to one M-Cell rack. * Racks 33, 34, 35, 36, 37, 38 are equivalent to one M-Cell rack. -[IB single-plane topology - Accelerated nodes.pdf](<../src/IB single-plane topology - Accelerated nodes.pdf>) +[IB single-plane topology - Accelerated nodes.pdf](src/IB single-plane topology - Accelerated nodes.pdf) - + diff --git a/docs.it4i/salomon/introduction.md b/docs.it4i/salomon/introduction.md index 5e6882737..c60a205bc 100644 --- a/docs.it4i/salomon/introduction.md +++ b/docs.it4i/salomon/introduction.md @@ -1,17 +1,17 @@ # Introduction -Welcome to Salomon supercomputer cluster. The Salomon cluster consists of 1008 compute nodes, totalling 24192 compute cores with 129 TB RAM and giving over 2 Pflop/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 24 cores, and at least 128 GB RAM. Nodes are interconnected through a 7D Enhanced hypercube InfiniBand network and are equipped with Intel Xeon E5-2680v3 processors. The Salomon cluster consists of 576 nodes without accelerators, and 432 nodes equipped with Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](hardware-overview/). +Welcome to Salomon supercomputer cluster. The Salomon cluster consists of 1008 compute nodes, totalling 24192 compute cores with 129 TB RAM and giving over 2 Pflop/s theoretical peak performance. Each node is a powerful x86-64 computer, equipped with 24 cores, and at least 128 GB RAM. Nodes are interconnected through a 7D Enhanced hypercube InfiniBand network and are equipped with Intel Xeon E5-2680v3 processors. The Salomon cluster consists of 576 nodes without accelerators, and 432 nodes equipped with Intel Xeon Phi MIC accelerators. Read more in [Hardware Overview](salomon/hardware-overview/). The cluster runs with a [CentOS Linux](http://www.bull.com/bullx-logiciels/systeme-exploitation.html) operating system, which is compatible with the RedHat [Linux family.](http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg) ## Water-Cooled Compute Nodes With MIC Accelerators - + - + ## Tape Library T950B - + - + diff --git a/docs.it4i/salomon/job-priority.md b/docs.it4i/salomon/job-priority.md index 3083c8396..e4515f3d9 100644 --- a/docs.it4i/salomon/job-priority.md +++ b/docs.it4i/salomon/job-priority.md @@ -16,7 +16,7 @@ Queue priority is priority of queue where job is queued before execution. Queue priority has the biggest impact on job execution priority. Execution priority of jobs in higher priority queues is always greater than execution priority of jobs in lower priority queues. Other properties of job used for determining job execution priority (fair-share priority, eligible time) cannot compete with queue priority. -Queue priorities can be seen at <https://extranet.it4i.cz/rsweb/salomon/queues> +Queue priorities can be seen at [https://extranet.it4i.cz/rsweb/salomon/queues](https://extranet.it4i.cz/rsweb/salomon/queues) ### Fair-Share Priority @@ -72,6 +72,6 @@ Specifying more accurate walltime enables better scheduling, better execution ti ### Job Placement -Job [placement can be controlled by flags during submission](job-submission-and-execution/#job_placement). +Job [placement can be controlled by flags during submission](salomon/job-submission-and-execution/#job_placement). ---8<--- "mathjax.md" diff --git a/docs.it4i/salomon/job-submission-and-execution.md b/docs.it4i/salomon/job-submission-and-execution.md index 0018ea9e3..ee87ddcf2 100644 --- a/docs.it4i/salomon/job-submission-and-execution.md +++ b/docs.it4i/salomon/job-submission-and-execution.md @@ -102,7 +102,7 @@ exec_vnode = (r21u05n581-mic0:naccelerators=1:ncpus=0) Per NUMA node allocation. Jobs are isolated by cpusets. -The UV2000 (node uv1) offers 3TB of RAM and 104 cores, distributed in 13 NUMA nodes. A NUMA node packs 8 cores and approx. 247GB RAM (with exception, node 11 has only 123GB RAM). In the PBS the UV2000 provides 13 chunks, a chunk per NUMA node (see [Resource allocation policy](resources-allocation-policy/)). The jobs on UV2000 are isolated from each other by cpusets, so that a job by one user may not utilize CPU or memory allocated to a job by other user. Always, full chunks are allocated, a job may only use resources of the NUMA nodes allocated to itself. +The UV2000 (node uv1) offers 3TB of RAM and 104 cores, distributed in 13 NUMA nodes. A NUMA node packs 8 cores and approx. 247GB RAM (with exception, node 11 has only 123GB RAM). In the PBS the UV2000 provides 13 chunks, a chunk per NUMA node (see [Resource allocation policy](salomon/resources-allocation-policy/)). The jobs on UV2000 are isolated from each other by cpusets, so that a job by one user may not utilize CPU or memory allocated to a job by other user. Always, full chunks are allocated, a job may only use resources of the NUMA nodes allocated to itself. ```console $ qsub -A OPEN-0-0 -q qfat -l select=13 ./myjob @@ -165,7 +165,7 @@ In this example, we allocate nodes r24u35n680 and r24u36n681, all 24 cores per n ### Placement by Network Location -Network location of allocated nodes in the [InifiBand network](network/) influences efficiency of network communication between nodes of job. Nodes on the same InifiBand switch communicate faster with lower latency than distant nodes. To improve communication efficiency of jobs, PBS scheduler on Salomon is configured to allocate nodes - from currently available resources - which are as close as possible in the network topology. +Network location of allocated nodes in the [InifiBand network](salomon/network/) influences efficiency of network communication between nodes of job. Nodes on the same InifiBand switch communicate faster with lower latency than distant nodes. To improve communication efficiency of jobs, PBS scheduler on Salomon is configured to allocate nodes - from currently available resources - which are as close as possible in the network topology. For communication intensive jobs it is possible to set stricter requirement - to require nodes directly connected to the same InifiBand switch or to require nodes located in the same dimension group of the InifiBand network. @@ -238,7 +238,7 @@ Nodes located in the same dimension group may be allocated using node grouping o | 6D | ehc_6d | 432,576 | | 7D | ehc_7d | all | -In this example, we allocate 16 nodes in the same [hypercube dimension](7d-enhanced-hypercube/) 1 group. +In this example, we allocate 16 nodes in the same [hypercube dimension](salomon/7d-enhanced-hypercube/) 1 group. ```console $ qsub -A OPEN-0-0 -q qprod -l select=16:ncpus=24 -l place=group=ehc_1d -I @@ -516,7 +516,7 @@ HTML commented section #2 (examples need to be reworked) !!! note Local scratch directory is often useful for single node jobs. Local scratch will be deleted immediately after the job ends. Be very careful, use of RAM disk filesystem is at the expense of operational memory. -Example jobscript for single node calculation, using [local scratch](storage/) on the node: +Example jobscript for single node calculation, using [local scratch](salomon/storage/) on the node: ```bash #!/bin/bash diff --git a/docs.it4i/salomon/network.md b/docs.it4i/salomon/network.md index 91da0de5e..252fe034a 100644 --- a/docs.it4i/salomon/network.md +++ b/docs.it4i/salomon/network.md @@ -5,10 +5,10 @@ network. Only [InfiniBand](http://en.wikipedia.org/wiki/InfiniBand) network may ## InfiniBand Network -All compute and login nodes of Salomon are interconnected by 7D Enhanced hypercube [Infiniband](http://en.wikipedia.org/wiki/InfiniBand) network (56 Gbps). The network topology is a [7D Enhanced hypercube](7d-enhanced-hypercube/). +All compute and login nodes of Salomon are interconnected by 7D Enhanced hypercube [Infiniband](http://en.wikipedia.org/wiki/InfiniBand) network (56 Gbps). The network topology is a [7D Enhanced hypercube](salomon/7d-enhanced-hypercube/). -Read more about schematic representation of the Salomon cluster [IB single-plain topology](ib-single-plane-topology/) -([hypercube dimension](7d-enhanced-hypercube/) 0). +Read more about schematic representation of the Salomon cluster [IB single-plain topology](salomon/ib-single-plane-topology/) +([hypercube dimension](salomon/7d-enhanced-hypercube/)). The compute nodes may be accessed via the Infiniband network using ib0 network interface, in address range 10.17.0.0 (mask 255.255.224.0). The MPI may be used to establish native Infiniband connection among the nodes. diff --git a/docs.it4i/salomon/resource-allocation-and-job-execution.md b/docs.it4i/salomon/resource-allocation-and-job-execution.md index b1ab38d58..2dd2b394e 100644 --- a/docs.it4i/salomon/resource-allocation-and-job-execution.md +++ b/docs.it4i/salomon/resource-allocation-and-job-execution.md @@ -1,10 +1,10 @@ # Resource Allocation and Job Execution -To run a [job](job-submission-and-execution/), [computational resources](resources-allocation-policy/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [official documentation here](../pbspro/), especially in the PBS Pro User's Guide. +To run a [job](salomon/job-submission-and-execution/), [computational resources](salomon/resources-allocation-policy/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [official documentation here](software/pbspro/), especially in the PBS Pro User's Guide. ## Resources Allocation Policy -The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. [The Fair-share](job-priority/) at Salomon ensures that individual users may consume approximately equal amount of resources per week. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following queues are available to Anselm users: +The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. [The Fair-share](salomon/job-priority/) at Salomon ensures that individual users may consume approximately equal amount of resources per week. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following queues are available to Anselm users: * **qexp**, the Express queue * **qprod**, the Production queue @@ -15,9 +15,9 @@ The resources are allocated to the job in a fair-share fashion, subject to const * **qfree**, the Free resource utilization queue !!! note - Check the queue status at <https://extranet.it4i.cz/rsweb/salomon/> + Check the queue status at [https://extranet.it4i.cz/rsweb/salomon/](https://extranet.it4i.cz/rsweb/salomon/) -Read more on the [Resource Allocation Policy](resources-allocation-policy/) page. +Read more on the [Resource Allocation Policy](salomon/resources-allocation-policy/) page. ## Job Submission and Execution @@ -26,4 +26,4 @@ Read more on the [Resource Allocation Policy](resources-allocation-policy/) page The qsub submits the job into the queue. The qsub command creates a request to the PBS Job manager for allocation of specified resources. The **smallest allocation unit is entire node, 24 cores**, with exception of the qexp queue. The resources will be allocated when available, subject to allocation policies and constraints. **After the resources are allocated the jobscript or interactive shell is executed on first of the allocated nodes.** -Read more on the [Job submission and execution](job-submission-and-execution/) page. +Read more on the [Job submission and execution](salomon/job-submission-and-execution/) page. diff --git a/docs.it4i/salomon/resources-allocation-policy.md b/docs.it4i/salomon/resources-allocation-policy.md index 82cb9c046..e4af3e86e 100644 --- a/docs.it4i/salomon/resources-allocation-policy.md +++ b/docs.it4i/salomon/resources-allocation-policy.md @@ -2,7 +2,7 @@ ## Job Queue Policies -The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. The fair-share at Anselm ensures that individual users may consume approximately equal amount of resources per week. Detailed information in the [Job scheduling](job-priority/) section. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following table provides the queue partitioning overview: +The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. The fair-share at Anselm ensures that individual users may consume approximately equal amount of resources per week. Detailed information in the [Job scheduling](salomon/job-priority/) section. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following table provides the queue partitioning overview: !!! note Check the queue status at <https://extranet.it4i.cz/rsweb/salomon/> @@ -35,18 +35,18 @@ The resources are allocated to the job in a fair-share fashion, subject to const ## Queue Notes -The job wall-clock time defaults to **half the maximum time**, see table above. Longer wall time limits can be [set manually, see examples](job-submission-and-execution/). +The job wall-clock time defaults to **half the maximum time**, see table above. Longer wall time limits can be [set manually, see examples](salomon/job-submission-and-execution/). Jobs that exceed the reserved wall-clock time (Req'd Time) get killed automatically. Wall-clock time limit can be changed for queuing jobs (state Q) using the qalter command, however can not be changed for a running job (state R). -Salomon users may check current queue configuration at <https://extranet.it4i.cz/rsweb/salomon/queues>. +Salomon users may check current queue configuration at [https://extranet.it4i.cz/rsweb/salomon/queues](https://extranet.it4i.cz/rsweb/salomon/queues). ## Queue Status !!! note Check the status of jobs, queues and compute nodes at [https://extranet.it4i.cz/rsweb/salomon/](https://extranet.it4i.cz/rsweb/salomon) - + Display the queue status on Salomon: diff --git a/docs.it4i/salomon/shell-and-data-access.md b/docs.it4i/salomon/shell-and-data-access.md index c02f2bc18..8d8b918d0 100644 --- a/docs.it4i/salomon/shell-and-data-access.md +++ b/docs.it4i/salomon/shell-and-data-access.md @@ -15,7 +15,7 @@ The Salomon cluster is accessed by SSH protocol via login nodes login1, login2, | login3.salomon.it4i.cz | 22 | ssh | login3 | | login4.salomon.it4i.cz | 22 | ssh | login4 | -The authentication is by the [private key](../general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) +The authentication is by the [private key](general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) !!! note Please verify SSH fingerprints during the first logon. They are identical on all login nodes: @@ -44,7 +44,7 @@ If you see warning message "UNPROTECTED PRIVATE KEY FILE!", use this command to local $ chmod 600 /path/to/id_rsa ``` -On **Windows**, use [PuTTY ssh client](../general/accessing-the-clusters/shell-access-and-data-transfer/putty.md). +On **Windows**, use [PuTTY ssh client](general/accessing-the-clusters/shell-access-and-data-transfer/putty.md). After logging in, you will see the command prompt: @@ -60,12 +60,12 @@ After logging in, you will see the command prompt: http://www.it4i.cz/?lang=en -Last login: Tue Jul 9 15:57:38 2013 from your-host.example.com +Last login: Tue Jul 9 15:57:38 2018 from your-host.example.com [username@login2.salomon ~]$ ``` !!! note - The environment is **not** shared between login nodes, except for [shared filesystems](storage/). + The environment is **not** shared between login nodes, except for [shared filesystems](salomon/storage/). ## Data Transfer @@ -79,7 +79,7 @@ Data in and out of the system may be transferred by the [scp](http://en.wikipedi | login3.salomon.it4i.cz | 22 | scp, sftp | | login4.salomon.it4i.cz | 22 | scp, sftp | -The authentication is by the [private key](../general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) +The authentication is by the [private key](general/accessing-the-clusters/shell-access-and-data-transfer/ssh-keys/) On linux or Mac, use scp or sftp client to transfer the data to Salomon: @@ -115,7 +115,7 @@ $ man sshfs On Windows, use [WinSCP client](http://winscp.net/eng/download.php) to transfer the data. The [win-sshfs client](http://code.google.com/p/win-sshfs/) provides a way to mount the Salomon filesystems directly as an external disc. -More information about the shared file systems is available [here](storage/). +More information about the shared file systems is available [here](salomon/storage/). ## Connection Restrictions @@ -199,9 +199,9 @@ Now, configure the applications proxy settings to **localhost:6000**. Use port f ## Graphical User Interface -* The [X Window system](../general/accessing-the-clusters/graphical-user-interface/x-window-system/) is a principal way to get GUI access to the clusters. +* The [X Window system](general/accessing-the-clusters/graphical-user-interface/x-window-system/) is a principal way to get GUI access to the clusters. * The [Virtual Network Computing](../general/accessing-the-clusters/graphical-user-interface/vnc/) is a graphical [desktop sharing](http://en.wikipedia.org/wiki/Desktop_sharing) system that uses the [Remote Frame Buffer protocol](http://en.wikipedia.org/wiki/RFB_protocol) to remotely control another [computer](http://en.wikipedia.org/wiki/Computer). ## VPN Access -* Access to IT4Innovations internal resources via [VPN](../general/accessing-the-clusters/vpn-access/). +* Access to IT4Innovations internal resources via [VPN](general/accessing-the-clusters/vpn-access/). diff --git a/docs.it4i/salomon/software/ansys/ansys-cfx.md b/docs.it4i/salomon/software/ansys/ansys-cfx.md index ce25a028b..41987cb48 100644 --- a/docs.it4i/salomon/software/ansys/ansys-cfx.md +++ b/docs.it4i/salomon/software/ansys/ansys-cfx.md @@ -47,9 +47,8 @@ echo Machines: $hl /ansys_inc/v145/CFX/bin/cfx5solve -def input.def -size 4 -size-ni 4x -part-large -start-method "Platform MPI Distributed Parallel" -par-dist $hl -P aa_r ``` -Header of the pbs file (above) is common and description can be find on [this site](../../job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the pbs file (above) is common and description can be find on [this site](salomon/job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. >Input file has to be defined by common CFX def file which is attached to the CFX solver via parameter -def **License** should be selected by parameter -P (Big letter **P**). Licensed products are the following: aa_r (ANSYS **Academic** Research), ane3fl (ANSYS Multiphysics)-**Commercial**. -[More about licensing here](licensing/) diff --git a/docs.it4i/salomon/software/ansys/ansys-fluent.md b/docs.it4i/salomon/software/ansys/ansys-fluent.md index 27afebf82..8b8329950 100644 --- a/docs.it4i/salomon/software/ansys/ansys-fluent.md +++ b/docs.it4i/salomon/software/ansys/ansys-fluent.md @@ -38,7 +38,7 @@ NCORES=`wc -l $PBS_NODEFILE |awk '{print $1}'` /ansys_inc/v145/fluent/bin/fluent 3d -t$NCORES -cnf=$PBS_NODEFILE -g -i fluent.jou ``` -Header of the pbs file (above) is common and description can be find on [this site](../../resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the pbs file (above) is common and description can be find on [this site](salomon/resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. Working directory has to be created before sending pbs job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common Fluent journal file which is attached to the Fluent solver via parameter -i fluent.jou @@ -151,12 +151,12 @@ Fluent could be run in parallel only under Academic Research license. To do so t ANSLIC_ADMIN Utility will be run - + - + - + ANSYS Academic Research license should be moved up to the top of the list. - + diff --git a/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md b/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md index 55be78c14..4d96c7485 100644 --- a/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md +++ b/docs.it4i/salomon/software/ansys/ansys-ls-dyna.md @@ -50,6 +50,6 @@ echo Machines: $hl /ansys_inc/v145/ansys/bin/ansys145 -dis -lsdynampp i=input.k -machines $hl ``` -Header of the PBS file (above) is common and description can be find on [this site](../../resource-allocation-and-job-execution/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the PBS file (above) is common and description can be find on [this site](salomon/resource-allocation-and-job-execution/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA .**k** file which is attached to the ansys solver via parameter i= diff --git a/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md b/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md index 450c9750d..22db12bea 100644 --- a/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md +++ b/docs.it4i/salomon/software/ansys/ansys-mechanical-apdl.md @@ -49,8 +49,8 @@ echo Machines: $hl /ansys_inc/v145/ansys/bin/ansys145 -b -dis -p aa_r -i input.dat -o file.out -machines $hl -dir $WORK_DIR ``` -Header of the PBS file (above) is common and description can be find on [this site](../../resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the PBS file (above) is common and description can be find on [this site](salomon/resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common APDL file which is attached to the ansys solver via parameter -i -**License** should be selected by parameter -p. Licensed products are the following: aa_r (ANSYS **Academic** Research), ane3fl (ANSYS Multiphysics)-**Commercial**, aa_r_dy (ANSYS **Academic** AUTODYN) [More about licensing here](licensing/) +**License** should be selected by parameter -p. Licensed products are the following: aa_r (ANSYS **Academic** Research), ane3fl (ANSYS Multiphysics)-**Commercial**, aa_r_dy (ANSYS **Academic** AUTODYN) diff --git a/docs.it4i/salomon/software/ansys/ansys.md b/docs.it4i/salomon/software/ansys/ansys.md index 79fca741f..1692c321c 100644 --- a/docs.it4i/salomon/software/ansys/ansys.md +++ b/docs.it4i/salomon/software/ansys/ansys.md @@ -2,12 +2,12 @@ **[SVS FEM](http://www.svsfem.cz/)** as **[ANSYS Channel partner](http://www.ansys.com/)** for Czech Republic provided all ANSYS licenses for ANSELM cluster and supports of all ANSYS Products (Multiphysics, Mechanical, MAPDL, CFX, Fluent, Maxwell, LS-DYNA...) to IT staff and ANSYS users. If you are challenging to problem of ANSYS functionality contact [hotline@svsfem.cz](mailto:hotline@svsfem.cz?subject=Ostrava%20-%20ANSELM) -Anselm provides as commercial as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of license or by two letter preposition "**aa\_**" in the license feature name. Change of license is realized on command line respectively directly in user's PBS file (see individual products). [More about licensing here](licensing/) +Anselm provides as commercial as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of license or by two letter preposition "**aa\_**" in the license feature name. Change of license is realized on command line respectively directly in user's PBS file (see individual products). To load the latest version of any ANSYS product (Mechanical, Fluent, CFX, MAPDL,...) load the module: ```bash - $ ml ansys +$ ml ansys ``` ANSYS supports interactive regime, but due to assumed solution of extremely difficult tasks it is not recommended. diff --git a/docs.it4i/salomon/software/chemistry/nwchem.md b/docs.it4i/salomon/software/chemistry/nwchem.md index bbabdd3ee..ed6414026 100644 --- a/docs.it4i/salomon/software/chemistry/nwchem.md +++ b/docs.it4i/salomon/software/chemistry/nwchem.md @@ -40,4 +40,4 @@ The recommend to use version 6.5. Version 6.3 fails on Salomon nodes with accele Please refer to [the documentation](http://www.nwchem-sw.org/index.php/Release62:Top-level) and in the input file set the following directives : * MEMORY : controls the amount of memory NWChem will use -* SCRATCH_DIR : set this to a directory in [SCRATCH filesystem](../../storage/storage/) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, e.g. `scf direct` +* SCRATCH_DIR : set this to a directory in [SCRATCH filesystem](salomon/storage/) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, e.g. `scf direct` diff --git a/docs.it4i/salomon/software/numerical-languages/octave.md b/docs.it4i/salomon/software/numerical-languages/octave.md index 787e6a325..9750a20db 100644 --- a/docs.it4i/salomon/software/numerical-languages/octave.md +++ b/docs.it4i/salomon/software/numerical-languages/octave.md @@ -1,6 +1,6 @@ # Octave -GNU Octave is a high-level interpreted language, primarily intended for numerical computations. It provides capabilities for the numerical solution of linear and nonlinear problems, and for performing other numerical experiments. It also provides extensive graphics capabilities for data visualization and manipulation. Octave is normally used through its interactive command line interface, but it can also be used to write non-interactive programs. The Octave language is quite similar to Matlab so that most programs are easily portable. Read more on <http://www.gnu.org/software/octave/> +GNU Octave is a high-level interpreted language, primarily intended for numerical computations. It provides capabilities for the numerical solution of linear and nonlinear problems, and for performing other numerical experiments. It also provides extensive graphics capabilities for data visualization and manipulation. Octave is normally used through its interactive command line interface, but it can also be used to write non-interactive programs. The Octave language is quite similar to Matlab so that most programs are easily portable. Read more on [http://www.gnu.org/software/octave/](http://www.gnu.org/software/octave/). Two versions of octave are available on the cluster, via module @@ -45,7 +45,7 @@ To run octave in batch mode, write an octave script, then write a bash jobscript exit ``` -This script may be submitted directly to the PBS workload manager via the qsub command. The inputs are in octcode.m file, outputs in output.out file. See the single node jobscript example in the [Job execution section](../../). +This script may be submitted directly to the PBS workload manager via the qsub command. The inputs are in octcode.m file, outputs in output.out file. See the single node jobscript example in the [Job execution section](salomon/job-submission-and-execution). The octave c compiler mkoctfile calls the GNU gcc 4.8.1 for compiling native c code. This is very useful for running native c subroutines in octave environment. diff --git a/docs.it4i/salomon/software/numerical-libraries/.gitkeep b/docs.it4i/salomon/software/numerical-libraries/.gitkeep deleted file mode 100644 index e69de29bb..000000000 diff --git a/docs.it4i/salomon/software/phys/LMGC90.md b/docs.it4i/salomon/software/phys/LMGC90.md index aa796c19d..923e1a442 100644 --- a/docs.it4i/salomon/software/phys/LMGC90.md +++ b/docs.it4i/salomon/software/phys/LMGC90.md @@ -3,7 +3,6 @@ ## Introduction LMGC90 is a free and open source software dedicated to multiple physics simulation of discrete material and structures. -More details on the capabilities of LMGC90 are available [here][Welcome]. ## Modules @@ -70,7 +69,7 @@ The files inside the `DISPLAY` directory can be visualized with paraview. It is - porofe: porous mechanical mesh - multife: multi-phasic fluid in porous media mesh -[Welcome]: <http://www.lmgc.univ-montp2.fr/~dubois/LMGC90/Web/Welcome_!.html> -[pre_lmgc]: <http://www.lmgc.univ-montp2.fr/%7Edubois/LMGC90/UserDoc/pre/index.html> -[chipy]: <http://www.lmgc.univ-montp2.fr/%7Edubois/LMGC90/UserDoc/chipy/index.html> -[LMGC90_Postpro.pdf]: <https://git-xen.lmgc.univ-montp2.fr/lmgc90/lmgc90_user/blob/2017.rc1/manuals/LMGC90_Postpro.pdf> +[Welcome](http://www.lmgc.univ-montp2.fr/~dubois/LMGC90/Web/Welcome_!.html) +[pre_lmgc](http://www.lmgc.univ-montp2.fr/%7Edubois/LMGC90/UserDoc/pre/index.html) +[chipy](http://www.lmgc.univ-montp2.fr/%7Edubois/LMGC90/UserDoc/chipy/index.html) +[LMGC90_Postpro.pdf](https://git-xen.lmgc.univ-montp2.fr/lmgc90/lmgc90_user/blob/2017.rc1/manuals/LMGC90_Postpro.pdf) diff --git a/docs.it4i/salomon/software/phys/PragTic.md b/docs.it4i/salomon/software/phys/PragTic.md index 7a4f5ccfb..ee97b63e0 100644 --- a/docs.it4i/salomon/software/phys/PragTic.md +++ b/docs.it4i/salomon/software/phys/PragTic.md @@ -51,4 +51,4 @@ where After computation newly created result file *RESULT_FILE* in the current directory should contain results. More detailed result informations then should be found in the file *res.txt* which is in every single randomly named folder created by PragTic in the very same current directory. -[Welcome]: <http://www.pragtic.com/> +[Welcome](http://www.pragtic.com/) diff --git a/docs.it4i/salomon/storage.md b/docs.it4i/salomon/storage.md index a592c1058..fee9f7cd1 100644 --- a/docs.it4i/salomon/storage.md +++ b/docs.it4i/salomon/storage.md @@ -46,7 +46,7 @@ Configuration of the SCRATCH Lustre storage ### Understanding the Lustre File Systems -<http://www.nas.nasa.gov> +[http://www.nas.nasa.gov](http://www.nas.nasa.gov) A user file on the Lustre file system can be divided into multiple chunks (stripes) and stored across a subset of the object storage targets (OSTs) (disks). The stripes are distributed among the OSTs in a round-robin fashion to ensure load balancing. @@ -106,7 +106,7 @@ Another good practice is to make the stripe count be an integral factor of the n Large stripe size allows each client to have exclusive access to its own part of a file. However, it can be counterproductive in some cases if it does not match your I/O pattern. The choice of stripe size has no effect on a single-stripe file. -Read more on <http://wiki.lustre.org/manual/LustreManual20_HTML/ManagingStripingFreeSpace.html> +Read more on [http://wiki.lustre.org/manual/LustreManual20_HTML/ManagingStripingFreeSpace.html](http://wiki.lustre.org/manual/LustreManual20_HTML/ManagingStripingFreeSpace.html) ## Disk Usage and Quota Commands @@ -235,7 +235,7 @@ Users home directories /home/username reside on HOME file system. Accessible cap The HOME should not be used to archive data of past Projects or other unrelated data. -The files on HOME will not be deleted until end of the [users lifecycle](../general/obtaining-login-credentials/obtaining-login-credentials/). +The files on HOME will not be deleted until end of the [users lifecycle](general/obtaining-login-credentials/obtaining-login-credentials/). The workspace is backed up, such that it can be restored in case of catasthropic failure resulting in significant data loss. This backup however is not intended to restore old versions of user data or to restore (accidentaly) deleted files. @@ -332,7 +332,7 @@ It is not recommended to allocate large amount of memory and use large amount of The Global RAM disk spans the local RAM disks of all the nodes within a single job. - + The Global RAM disk deploys BeeGFS On Demand parallel filesystem, using local RAM disks as a storage backend. diff --git a/docs.it4i/salomon/visualization.md b/docs.it4i/salomon/visualization.md index ccc778ddc..062a17c98 100644 --- a/docs.it4i/salomon/visualization.md +++ b/docs.it4i/salomon/visualization.md @@ -14,8 +14,8 @@ Remote visualization with NICE DCV software is availabe on two nodes. ## References -* [Graphical User Interface](shell-and-data-access/#graphical-user-interface) -* [VPN Access](shell-and-data-access/#vpn-access) +* [Graphical User Interface](salomon/shell-and-data-access/#graphical-user-interface) +* [VPN Access](salomon/shell-and-data-access/#vpn-access) ## Install and Run @@ -25,7 +25,7 @@ Remote visualization with NICE DCV software is availabe on two nodes. * [Linux download](http://www.nice-software.com/storage/nice-dcv/2016.0/endstation/linux/nice-dcv-endstation-2016.0-17066.run) * [Windows download](http://www.nice-software.com/storage/nice-dcv/2016.0/endstation/win/nice-dcv-endstation-2016.0-17066-Release.msi) -**Install VPN client** [VPN Access](../general/accessing-the-clusters/vpn-access/) (user-computer) +**Install VPN client** [VPN Access](general/accessing-the-clusters/vpn-access/) (user-computer) !!! note Visualisation server is a compute node. You are not able to SSH with your private key. There are two solutions available to solve login issue. @@ -36,11 +36,11 @@ Remote visualization with NICE DCV software is availabe on two nodes. * Generate public fingerprint for your private key with PuTTYgen - + * Add this key to `~/.ssh/authorized_keys` on the cluster - + * Use your standard SSH key to connect to visualization server @@ -49,17 +49,17 @@ Remote visualization with NICE DCV software is availabe on two nodes. * Install WinSCP client (user-computer) [Download WinSCP installer](https://winscp.net/download/WinSCP-5.13.3-Setup.exe) * Add credentials - + * Add path to key file - + * Save * Copy `~/.ssh/id_rsa` to your computer * Convert key to PuTTY format with PuTTYgen - + * Use this new ssh key to connect to visualization server @@ -69,12 +69,12 @@ Remote visualization with NICE DCV software is availabe on two nodes. * [Download PuTTY installer](https://the.earth.li/~sgtatham/putty/latest/w64/putty-64bit-0.70-installer.msi) * Configure PuTTY - + * Add credentials and key file (create 3x sessions: **vizserv1.salomon.it4i.cz**, **vizserv2.salomon.it4i.cz**, **login1.salomon.it4i.cz**) * Config SSH tunnels (user-computer) (for sessions vizserv1 and vizserv2 only) - ports: **5901**, **5902**, **7300-7305** - + * Save @@ -110,14 +110,14 @@ $ qsub -I -q qviz -A OPEN-XX-XX -l select=1:ncpus=4:host=vizserv2,walltime=04:00 * vizserv2: localhost:5902 * fill password - - + + **Check DCV status (Salomon-vizservX) in VNC window** **Run glxgears (Salomon-vizservX)** - + **LOGOUT FROM MENU: System->Logout** @@ -170,13 +170,13 @@ $ qsub -I -q qviz -A OPEN-XX-XX -l select=1:ncpus=4:host=vizserv2,walltime=04:00 * vizserv2: localhost:5902 * fill password - - + + **Check DCV status in VNC window** **Run glxgears** - + -**LOGOUT FROM MENU: System->Logout** \ No newline at end of file +**LOGOUT FROM MENU: System->Logout** diff --git a/docs.it4i/software/bio/bioinformatics.md b/docs.it4i/software/bio/bioinformatics.md index 91de9ca9c..2b4a68850 100644 --- a/docs.it4i/software/bio/bioinformatics.md +++ b/docs.it4i/software/bio/bioinformatics.md @@ -222,7 +222,7 @@ sci-libs/umfpack-5.6.2 | libraries | 4 | | **Total** | **93** | - + ## Other Applications Available Through Gentoo Linux diff --git a/docs.it4i/software/bio/omics-master/diagnostic-component-team.md b/docs.it4i/software/bio/omics-master/diagnostic-component-team.md index f54fe184a..bdac047d1 100644 --- a/docs.it4i/software/bio/omics-master/diagnostic-component-team.md +++ b/docs.it4i/software/bio/omics-master/diagnostic-component-team.md @@ -13,6 +13,6 @@ VCF files are scanned by this diagnostic tool for known diagnostic disease-assoc TEAM (27) is an intuitive and easy-to-use web tool that fills the gap between the predicted mutations and the final diagnostic in targeted enrichment sequencing analysis. The tool searches for known diagnostic mutations, corresponding to a disease panel, among the predicted patient’s variants. Diagnostic variants for the disease are taken from four databases of disease-related variants (HGMD, HUMSAVAR , ClinVar and COSMIC) If no primary diagnostic variant is found, then a list of secondary findings that can help to establish a diagnostic is produced. TEAM also provides with an interface for the definition of and customization of panels, by means of which, genes and mutations can be added or discarded to adjust panel definitions. - + ** Figure 5. **Interface of the application. Panels for defining targeted regions of interest can be set up by just drag and drop known disease genes or disease definitions from the lists. Thus, virtual panels can be interactively improved as the knowledge of the disease increases. diff --git a/docs.it4i/software/bio/omics-master/overview.md b/docs.it4i/software/bio/omics-master/overview.md index e29f1daec..70798018d 100644 --- a/docs.it4i/software/bio/omics-master/overview.md +++ b/docs.it4i/software/bio/omics-master/overview.md @@ -9,7 +9,7 @@ The scope of this OMICS MASTER solution is restricted to human genomics research The pipeline inputs the raw data produced by the sequencing machines and undergoes a processing procedure that consists on a quality control, the mapping and variant calling steps that result in a file containing the set of variants in the sample. From this point, the prioritization component or the diagnostic component can be launched.  +them, depending of the experimental design carried out.](img/fig1.png) Figure 1. OMICS MASTER solution overview. Data is produced in the external labs and comes to IT4I (represented by the blue dashed line). The data pre-processor converts raw data into a list of variants and annotations for each sequenced patient. These lists files together with primary and secondary (alignment) data files are stored in IT4I sequence DB and uploaded to the discovery (candidate prioritization) or diagnostic component where they can be analyzed directly by the user that produced them, depending of the experimental design carried out. @@ -41,7 +41,7 @@ Output: FASTQ file plus an HTML file containing statistics on the data. FASTQ format It represents the nucleotide sequence and its corresponding quality scores. - + Figure 2.FASTQ file. #### Mapping @@ -81,7 +81,7 @@ corresponding information is unavailable. The standard CIGAR description of pairwise alignment defines three operations: ‘M’ for match/mismatch, ‘I’ for insertion compared with the reference and ‘D’ for deletion. The extended CIGAR proposed in SAM added four more operations: ‘N’ for skipped bases on the reference, ‘S’ for soft clipping, ‘H’ for hard clipping and ‘P’ for padding. These support splicing, clipping, multi-part and padded alignments. Figure 3 shows examples of CIGAR strings for different types of alignments. - + Figure 3 . SAM format file. The ‘@SQ’ line in the header section gives the order of reference sequences. Notably, r001 is the name of a read pair. According to FLAG 163 (=1+2+32+128), the read mapped to position 7 is the second read in the pair (128) and regarded as properly paired (1 + 2); its mate is mapped to 37 on the reverse strand (32). Read r002 has three soft-clipped (unaligned) bases. The coordinate shown in SAM is the position of the first aligned base. The CIGAR string for this alignment contains a P (padding) operation which correctly aligns the inserted sequences. Padding operations can be absent when an aligner does not support multiple sequence alignment. The last six bases of read r003 map to position 9, and the first five to position 29 on the reverse strand. The hard clipping operation H indicates that the clipped sequence is not present in the sequence field. The NM tag gives the number of mismatches. Read r004 is aligned across an intron, indicated by the N operation. @@ -124,8 +124,7 @@ VCF (3) is a standardized format for storing the most prevalent types of sequenc A VCF file consists of a header section and a data section. The header contains an arbitrary number of metainformation lines, each starting with characters ‘##’, and a TAB delimited field definition line, starting with a single ‘#’ character. The meta-information header lines provide a standardized description of tags and annotations used in the data section. The use of meta-information allows the information stored within a VCF file to be tailored to the dataset in question. It can be also used to provide information about the means of file creation, date of creation, version of the reference sequence, software used and any other information relevant to the history of the file. The field definition line names eight mandatory columns, corresponding to data columns representing the chromosome (CHROM), a 1-based position of the start of the variant (POS), unique identifiers of the variant (ID), the reference allele (REF), a comma separated list of alternate non-reference alleles (ALT), a phred-scaled quality score (QUAL), site filtering information (FILTER) and a semicolon separated list of additional, user extensible annotation (INFO). In addition, if samples are present in the file, the mandatory header columns are followed by a FORMAT column and an arbitrary number of sample IDs that define the samples included in the VCF file. The FORMAT column is used to define the information contained within each subsequent genotype column, which consists of a colon separated list of fields. For example, the FORMAT field GT:GQ:DP in the fourth data entry of Figure 1a indicates that the subsequent entries contain information regarding the genotype, genotype quality and read depth for each sample. All data lines are TAB delimited and the number of fields in each data line must match the number of fields in the header line. It is strongly recommended that all annotation tags used are declared in the VCF header section.  +this list; the reference haplotype is designated as 0. For multiploid data, the separator indicates whether the data are phased (|) or unphased (/). Thus, the two alleles C and G at the positions 2 and 5 in this figure occur on the same chromosome in SAMPLE1. The first data line shows an example of a deletion (present in SAMPLE1) and a replacement of two bases by another base (SAMPLE2); the second line shows a SNP and an insertion; the third a SNP; the fourth a large structural variant described by the annotation in the INFO column, the coordinate is that of the base before the variant. (b–f ) Alignments and VCF representations of different sequence variants: SNP, insertion, deletion, replacement, and a large deletion. The REF columns shows the reference bases replaced by the haplotype in the ALT column. The coordinate refers to the first reference base. (g) Users are advised to use simplest representation possible and lowest coordinate in cases where the position is ambiguous.](img/fig4.png) Figure 4 . (a) Example of valid VCF. The header lines ##fileformat and #CHROM are mandatory, the rest is optional but strongly recommended. Each line of the body describes variants present in the sampled population at one genomic position or region. All alternate alleles are listed in the ALT column and referenced from the genotype fields as 1-based indexes to this list; the reference haplotype is designated as 0. For multiploid data, the separator indicates whether the data are phased (|) or unphased (/). Thus, the two alleles C and G at the positions 2 and 5 in this figure occur on the same chromosome in SAMPLE1. The first data line shows an example of a deletion (present in SAMPLE1) and a replacement of two bases by another base (SAMPLE2); the second line shows a SNP and an insertion; the third a SNP; the fourth a large structural variant described by the annotation in the INFO column, the coordinate is that of the base before the variant. (b–f ) Alignments and VCF representations of different sequence variants: SNP, insertion, deletion, replacement, and a large deletion. The REF columns shows the reference bases replaced by the haplotype in the ALT column. The coordinate refers to the first reference base. (g) Users are advised to use simplest representation possible and lowest coordinate in cases where the position is ambiguous. @@ -167,9 +166,9 @@ Systems biology We also import systems biology information like interactome information from IntAct (24). Reactome (25) stores pathway and interaction information in BioPAX (26) format. BioPAX data exchange format enables the integration of diverse pathway resources. We successfully solved the problem of storing data released in BioPAX format into a SQL relational schema, which allowed us importing Reactome in CellBase. -### [Diagnostic Component (TEAM)](diagnostic-component-team/) +### [Diagnostic Component (TEAM)](software/bio/omics-master/diagnostic-component-team/) -### [Priorization Component (BiERApp)](priorization-component-bierapp/) +### [Priorization Component (BiERApp)](software/bio/omics-master/priorization-component-bierapp/) ## Usage @@ -264,7 +263,7 @@ The ped file ( file.ped) contains the following info: FAM sample_B 0 0 2 2 ``` -Now, lets load the NGSPipeline module and copy the sample data to a [scratch directory](../../salomon/storage/): +Now, lets load the NGSPipeline module and copy the sample data to a [scratch directory](salomon/storage/): ```console $ ml ngsPipeline @@ -278,7 +277,7 @@ Now, we can launch the pipeline (replace OPEN-0-0 with your Project ID): $ ngsPipeline -i /scratch/$USER/omics/sample_data/data -o /scratch/$USER/omics/results -p /scratch/$USER/omics/sample_data/data/file.ped --project OPEN-0-0 --queue qprod ``` -This command submits the processing [jobs to the queue](../../salomon/job-submission-and-execution/). +This command submits the processing [jobs to the queue](salomon/job-submission-and-execution/). If we want to re-launch the pipeline from stage 4 until stage 20 we should use the next command: @@ -342,19 +341,19 @@ The output folder contains all the subfolders with the intermediate data. This f Once the file has been uploaded, a panel must be chosen from the Panel list. Then, pressing the Run button the diagnostic process starts. TEAM searches first for known diagnostic mutation(s) taken from four databases: HGMD-public (20), [HUMSAVAR](http://www.uniprot.org/docs/humsavar), ClinVar (29) and COSMIC (23). - + Figure 7. The panel manager. The elements used to define a panel are ( A ) disease terms, ( B ) diagnostic mutations and ( C ) genes. Arrows represent actions that can be taken in the panel manager. Panels can be defined by using the known mutations and genes of a particular disease. This can be done by dragging them to the Primary Diagnostic box (action D ). This action, in addition to defining the diseases in the Primary Diagnostic box, automatically adds the corresponding genes to the Genes box. The panels can be customized by adding new genes (action F ) or removing undesired genes (action G). New disease mutations can be added independently or associated to an already existing disease term (action E ). Disease terms can be removed by simply dragging them back (action H ). For variant discovering/filtering we should upload the VCF file into BierApp by using the following form: -\\ +\ Figure 8 . \BierApp VCF upload panel. It is recommended to choose a name for the job as well as a description \\. Each prioritization (‘job’) has three associated screens that facilitate the filtering steps. The first one, the ‘Summary’ tab, displays a statistic of the data set analyzed, containing the samples analyzed, the number and types of variants found and its distribution according to consequence types. The second screen, in the ‘Variants and effect’ tab, is the actual filtering tool, and the third one, the ‘Genome view’ tab, offers a representation of the selected variants within the genomic context provided by an embedded version of the Genome Maps Tool (30). - + Figure 9 . This picture shows all the information associated to the variants. If a variant has an associated phenotype we could see it in the last column. In this case, the variant 7:132481242 CT is associated to the phenotype: large intestine tumor. diff --git a/docs.it4i/software/bio/omics-master/priorization-component-bierapp.md b/docs.it4i/software/bio/omics-master/priorization-component-bierapp.md index df31fced4..ae65293e0 100644 --- a/docs.it4i/software/bio/omics-master/priorization-component-bierapp.md +++ b/docs.it4i/software/bio/omics-master/priorization-component-bierapp.md @@ -13,7 +13,7 @@ BiERapp is available at the [following address](http://omics.it4i.cz/bierapp/) BiERapp (28) efficiently helps in the identification of causative variants in family and sporadic genetic diseases. The program reads lists of predicted variants (nucleotide substitutions and indels) in affected individuals or tumor samples and controls. In family studies, different modes of inheritance can easily be defined to filter out variants that do not segregate with the disease along the family. Moreover, BiERapp integrates additional information such as allelic frequencies in the general population and the most popular damaging scores to further narrow down the number of putative variants in successive filtering steps. BiERapp provides an interactive and user-friendly interface that implements the filtering strategy used in the context of a large-scale genomic project carried out by the Spanish Network for Research, in Rare Diseases (CIBERER) and the Medical Genome Project. in which more than 800 exomes have been analyzed. - + ** Figure 6 **. Web interface to the prioritization tool. This figure shows the interface of the web tool for candidate gene prioritization with the filters available. The tool includes a genomic viewer (Genome Maps 30) that enables the representation of the variants in the corresponding genomic coordinates. diff --git a/docs.it4i/software/cae/comsol/comsol-multiphysics.md b/docs.it4i/software/cae/comsol/comsol-multiphysics.md index dfe984124..f60a1be85 100644 --- a/docs.it4i/software/cae/comsol/comsol-multiphysics.md +++ b/docs.it4i/software/cae/comsol/comsol-multiphysics.md @@ -18,7 +18,7 @@ On the clusters COMSOL is available in the latest stable version. There are two * **Non commercial** or so called **EDU variant**, which can be used for research and educational purposes. -* **Commercial** or so called **COM variant**, which can used also for commercial activities. **COM variant** has only subset of features compared to the **EDU variant** available. More about licensing [here](licensing-and-available-versions/). +* **Commercial** or so called **COM variant**, which can used also for commercial activities. **COM variant** has only subset of features compared to the **EDU variant** available. More about licensing [here](software/cae/comsol/licensing-and-available-versions/). To load the of COMSOL load the module @@ -32,7 +32,7 @@ By default the **EDU variant** will be loaded. If user needs other version or va $ ml av COMSOL ``` -If user needs to prepare COMSOL jobs in the interactive mode it is recommend to use COMSOL on the compute nodes via PBS Pro scheduler. In order run the COMSOL Desktop GUI on Windows is recommended to use the [Virtual Network Computing (VNC)](../../general/accessing-the-clusters/graphical-user-interface/x-window-system/). +If user needs to prepare COMSOL jobs in the interactive mode it is recommend to use COMSOL on the compute nodes via PBS Pro scheduler. In order run the COMSOL Desktop GUI on Windows is recommended to use the [Virtual Network Computing (VNC)](general/accessing-the-clusters/graphical-user-interface/x-window-system/). Example for Salomon: @@ -76,7 +76,7 @@ Working directory has to be created before sending the (comsol.pbs) job script i COMSOL is the software package for the numerical solution of the partial differential equations. LiveLink for MATLAB allows connection to the COMSOL API (Application Programming Interface) with the benefits of the programming language and computing environment of the MATLAB. -LiveLink for MATLAB is available in both **EDU** and **COM** **variant** of the COMSOL release. On the clusters 1 commercial (**COM**) license and the 5 educational (**EDU**) licenses of LiveLink for MATLAB (see the [ISV Licenses](../isv_licenses/)) are available. Following example shows how to start COMSOL model from MATLAB via LiveLink in the interactive mode (on Anselm use 16 threads). +LiveLink for MATLAB is available in both **EDU** and **COM** **variant** of the COMSOL release. On the clusters 1 commercial (**COM**) license and the 5 educational (**EDU**) licenses of LiveLink for MATLAB (see the [ISV Licenses](software/isv_licenses/)) are available. Following example shows how to start COMSOL model from MATLAB via LiveLink in the interactive mode (on Anselm use 16 threads). ```console $ xhost + diff --git a/docs.it4i/software/chemistry/molpro.md b/docs.it4i/software/chemistry/molpro.md index 2fb61643a..b6d157164 100644 --- a/docs.it4i/software/chemistry/molpro.md +++ b/docs.it4i/software/chemistry/molpro.md @@ -35,7 +35,7 @@ Molpro is compiled for parallel execution using MPI and OpenMP. By default, Molp !!! note The OpenMP parallelization in Molpro is limited and has been observed to produce limited scaling. We therefore recommend to use MPI parallelization only. This can be achieved by passing option mpiprocs=16:ompthreads=1 to PBS. -You are advised to use the -d option to point to a directory in [SCRATCH file system - Salomon](../../salomon/storage/). Molpro can produce a large amount of temporary data during its run, and it is important that these are placed in the fast scratch file system. +You are advised to use the -d option to point to a directory in [SCRATCH file system - Salomon](salomon/storage/). Molpro can produce a large amount of temporary data during its run, and it is important that these are placed in the fast scratch file system. ### Example jobscript diff --git a/docs.it4i/software/chemistry/nwchem.md b/docs.it4i/software/chemistry/nwchem.md index 41c2006e4..3c7a1ca5f 100644 --- a/docs.it4i/software/chemistry/nwchem.md +++ b/docs.it4i/software/chemistry/nwchem.md @@ -33,4 +33,4 @@ mpirun nwchem h2o.nw Please refer to [the documentation](http://www.nwchem-sw.org/index.php/Release62:Top-level) and in the input file set the following directives : * MEMORY : controls the amount of memory NWChem will use -* SCRATCH_DIR : set this to a directory in [SCRATCH filesystem - Salomon](../../salomon/storage/) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, eg. "scf direct" +* SCRATCH_DIR : set this to a directory in [SCRATCH filesystem - Salomon](salomon/storage/) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, eg. "scf direct" diff --git a/docs.it4i/software/chemistry/phono3py.md b/docs.it4i/software/chemistry/phono3py.md index 9aa2faa7d..073ab4155 100644 --- a/docs.it4i/software/chemistry/phono3py.md +++ b/docs.it4i/software/chemistry/phono3py.md @@ -2,7 +2,7 @@ ## Introduction -This GPL software calculates phonon-phonon interactions via the third order force constants. It allows to obtain lattice thermal conductivity, phonon lifetime/linewidth, imaginary part of self energy at the lowest order, joint density of states (JDOS) and weighted-JDOS. For details see Phys. Rev. B 91, 094306 (2015) and <http://atztogo.github.io/phono3py/index.html> +This GPL software calculates phonon-phonon interactions via the third order force constants. It allows to obtain lattice thermal conductivity, phonon lifetime/linewidth, imaginary part of self energy at the lowest order, joint density of states (JDOS) and weighted-JDOS. For details see Phys. Rev. B 91, 094306 (2015) and [http://atztogo.github.io/phono3py/index.html](http://atztogo.github.io/phono3py/index.html) Available modules @@ -61,7 +61,7 @@ POSCAR-00006 POSCAR-00015 POSCAR-00024 POSCAR-00033 POSCAR-00042 POSCAR-00051 POSCAR-00007 POSCAR-00016 POSCAR-00025 POSCAR-00034 POSCAR-00043 POSCAR-00052 POSCAR-00061 POSCAR-00070 POSCAR-00079 POSCAR-00088 POSCAR-00097 POSCAR-00106 ``` -For each displacement the forces needs to be calculated, i.e. in form of the output file of VASP (vasprun.xml). For a single VASP calculations one needs [KPOINTS](KPOINTS), [POTCAR](POTCAR), [INCAR](INCAR) in your case directory (where you have POSCARS) and those 111 displacements calculations can be generated by [prepare.sh](prepare.sh) script. Then each of the single 111 calculations is submitted [run.sh](run.sh) by [submit.sh](submit.sh). +For each displacement the forces needs to be calculated, i.e. in form of the output file of VASP (vasprun.xml). For a single VASP calculations one needs [KPOINTS](software/chemistry/KPOINTS), [POTCAR](software/chemistry/POTCAR), [INCAR](software/chemistry/INCAR) in your case directory (where you have POSCARS) and those 111 displacements calculations can be generated by [prepare.sh](software/chemistry/prepare.sh) script. Then each of the single 111 calculations is submitted [run.sh](software/chemistry/run.sh) by [submit.sh](software/chemistry/submit.sh). ```console $./prepare.sh @@ -155,7 +155,7 @@ one finds which grid points needed to be calculated, for instance using followin $ phono3py --fc3 --fc2 --dim="2 2 2" --mesh="9 9 9" -c POSCAR --sigma 0.1 --br --write-gamma --gp="0 1 2 ``` -one calculates grid points 0, 1, 2. To automize one can use for instance scripts to submit 5 points in series, see [gofree-cond1.sh](gofree-cond1.sh) +one calculates grid points 0, 1, 2. To automize one can use for instance scripts to submit 5 points in series, see [gofree-cond1.sh](software/chemistry/gofree-cond1.sh) ```console $ qsub gofree-cond1.sh diff --git a/docs.it4i/software/compilers.md b/docs.it4i/software/compilers.md index 293926cbe..0aa3a56c9 100644 --- a/docs.it4i/software/compilers.md +++ b/docs.it4i/software/compilers.md @@ -24,7 +24,7 @@ Commercial licenses: ## Intel Compilers -For information about the usage of Intel Compilers and other Intel products, read the [Intel Parallel studio](intel-suite/intel-compilers/) page. +For information about the usage of Intel Compilers and other Intel products, read the [Intel Parallel studio](software/intel-suite/intel-compilers/) page. ## PGI Compilers (Only on Salomon) @@ -187,8 +187,8 @@ For more information see the man pages. ## Java -For information how to use Java (runtime and/or compiler), read the [Java page](java/). +For information how to use Java (runtime and/or compiler), read the [Java page](software/java/). ## NVIDIA CUDA -For information how to work with NVIDIA CUDA, read the [NVIDIA CUDA page](../anselm/software/nvidia-cuda/). +For information how to work with NVIDIA CUDA, read the [NVIDIA CUDA page](anselm/software/nvidia-cuda/). diff --git a/docs.it4i/software/debuggers/Introduction.md b/docs.it4i/software/debuggers/Introduction.md index 87f642fda..947da202b 100644 --- a/docs.it4i/software/debuggers/Introduction.md +++ b/docs.it4i/software/debuggers/Introduction.md @@ -15,7 +15,7 @@ $ ml intel $ idb ``` -Read more at the [Intel Debugger](../intel/intel-suite/intel-debugger/) page. +Read more at the [Intel Debugger](software/intel/intel-suite/intel-debugger/) page. ## Allinea Forge (DDT/MAP) @@ -26,7 +26,7 @@ $ ml Forge $ forge ``` -Read more at the [Allinea DDT](allinea-ddt/) page. +Read more at the [Allinea DDT](software/debuggers/allinea-ddt/) page. ## Allinea Performance Reports @@ -37,7 +37,7 @@ $ ml PerformanceReports/6.0 $ perf-report mpirun -n 64 ./my_application argument01 argument02 ``` -Read more at the [Allinea Performance Reports](allinea-performance-reports/) page. +Read more at the [Allinea Performance Reports](software/debuggers/allinea-performance-reports/) page. ## RougeWave Totalview @@ -48,7 +48,7 @@ $ ml TotalView/8.15.4-6-linux-x86-64 $ totalview ``` -Read more at the [Totalview](total-view/) page. +Read more at the [Totalview](software/debuggers/total-view/) page. ## Vampir Trace Analyzer @@ -59,4 +59,4 @@ Vampir is a GUI trace analyzer for traces in OTF format. $ vampir ``` -Read more at the [Vampir](vampir/) page. +Read more at the [Vampir](software/debuggers/vampir/) page. diff --git a/docs.it4i/software/debuggers/aislinn.md b/docs.it4i/software/debuggers/aislinn.md index 0a10684f0..029ddebb2 100644 --- a/docs.it4i/software/debuggers/aislinn.md +++ b/docs.it4i/software/debuggers/aislinn.md @@ -3,7 +3,7 @@ * Aislinn is a dynamic verifier for MPI programs. For a fixed input it covers all possible runs with respect to nondeterminism introduced by MPI. It allows to detect bugs (for sure) that occurs very rare in normal runs. * Aislinn detects problems like invalid memory accesses, deadlocks, misuse of MPI, and resource leaks. * Aislinn is open-source software; you can use it without any licensing limitations. -* Web page of the project: <http://verif.cs.vsb.cz/aislinn/> +* Web page of the project: [http://verif.cs.vsb.cz/aislinn/](http://verif.cs.vsb.cz/aislinn/) !!! note Aislinn is software developed at IT4Innovations and some parts are still considered experimental. If you have any questions or experienced any problems, contact the author: <mailto:stanislav.bohm@vsb.cz>. @@ -79,7 +79,7 @@ $ firefox report.html At the beginning of the report there are some basic summaries of the verification. In the second part (depicted in the following picture), the error is described. - + It shows us: diff --git a/docs.it4i/software/debuggers/allinea-ddt.md b/docs.it4i/software/debuggers/allinea-ddt.md index 67bfdff18..69964d186 100644 --- a/docs.it4i/software/debuggers/allinea-ddt.md +++ b/docs.it4i/software/debuggers/allinea-ddt.md @@ -59,7 +59,7 @@ Be sure to log in with an X window forwarding enabled. This could mean using the $ ssh -X username@anselm.it4i.cz ``` -Other options is to access login node using VNC. Please see the detailed information on how to [use graphic user interface on Anselm](/general/accessing-the-clusters/graphical-user-interface/x-window-system/) +Other options is to access login node using VNC. Please see the detailed information on how to [use graphic user interface on Anselm](general/accessing-the-clusters/graphical-user-interface/x-window-system/) From the login node an interactive session **with X windows forwarding** (-X option) can be started by following command: @@ -75,7 +75,7 @@ $ ddt test_debug A submission window that appears have a prefilled path to the executable to debug. You can select the number of MPI processors and/or OpenMP threads on which to run and press run. Command line arguments to a program can be entered to the "Arguments " box. - + To start the debugging directly without the submission window, user can specify the debugging and execution parameters from the command line. For example the number of MPI processes is set by option "-np 4". Skipping the dialog is done by "-start" option. To see the list of the "ddt" command line parameters, run "ddt --help". diff --git a/docs.it4i/software/debuggers/allinea-performance-reports.md b/docs.it4i/software/debuggers/allinea-performance-reports.md index cea9649a2..b1e35186a 100644 --- a/docs.it4i/software/debuggers/allinea-performance-reports.md +++ b/docs.it4i/software/debuggers/allinea-performance-reports.md @@ -22,13 +22,13 @@ The module sets up environment variables, required for using the Allinea Perform Use the the perf-report wrapper on your (MPI) program. -Instead of [running your MPI program the usual way](../mpi/mpi/), use the the perf report wrapper: +Instead of [running your MPI program the usual way](software/mpi/mpi/), use the the perf report wrapper: ```console $ perf-report mpirun ./mympiprog.x ``` -The MPI program will run as usual. The perf-report creates two additional files, in \*.txt and \*.html format, containing the performance report. Note that demanding MPI codes should be run within [the queue system](../../salomon/job-submission-and-execution/). +The MPI program will run as usual. The perf-report creates two additional files, in \*.txt and \*.html format, containing the performance report. Note that demanding MPI codes should be run within [the queue system](salomon/job-submission-and-execution/). ## Example @@ -56,4 +56,4 @@ Now lets profile the code: $ perf-report mpirun ./mympiprog.x ``` -Performance report files [mympiprog_32p\*.txt](mympiprog_32p_2014-10-15_16-56.txt) and [mympiprog_32p\*.html](mympiprog_32p_2014-10-15_16-56.html) were created. We can see that the code is very efficient on MPI and is CPU bounded. +Performance report files [mympiprog_32p\*.txt](software/debuggers/mympiprog_32p_2014-10-15_16-56.txt) and [mympiprog_32p\*.html](software/debuggers/mympiprog_32p_2014-10-15_16-56.html) were created. We can see that the code is very efficient on MPI and is CPU bounded. diff --git a/docs.it4i/software/debuggers/cube.md b/docs.it4i/software/debuggers/cube.md index 680a4e505..4b5f56e72 100644 --- a/docs.it4i/software/debuggers/cube.md +++ b/docs.it4i/software/debuggers/cube.md @@ -10,7 +10,7 @@ CUBE is a graphical performance report explorer for displaying data from Score-P Each dimension is organized in a tree, for example the time performance metric is divided into Execution time and Overhead time, call path dimension is organized by files and routines in your source code etc. - + \*Figure 1. Screenshot of CUBE displaying data from Scalasca.\* @@ -18,7 +18,7 @@ Each node in the tree is colored by severity (the color scheme is displayed at t ## Installed Versions -Currently, there are two versions of CUBE 4.2.3 available as [modules](../../modules-matrix/): +Currently, there are two versions of CUBE 4.2.3 available as [modules](modules-matrix/): * cube/4.2.3-gcc, compiled with GCC * cube/4.2.3-icc, compiled with Intel compiler @@ -33,4 +33,4 @@ CUBE is a graphical application. Refer to Graphical User Interface documentation After loading the appropriate module, simply launch cube command, or alternatively you can use Scalasca -examine command to launch the GUI. Note that for Scalasca data sets, if you do not analyze the data with `scalasca -examine` before to opening them with CUBE, not all performance data will be available. References -1\. <http://www.scalasca.org/software/cube-4.x/download.html> +1\. [http://www.scalasca.org/software/cube-4.x/download.html](http://www.scalasca.org/software/cube-4.x/download.html) diff --git a/docs.it4i/software/debuggers/intel-performance-counter-monitor.md b/docs.it4i/software/debuggers/intel-performance-counter-monitor.md index 3373cc4ee..b5da7bf60 100644 --- a/docs.it4i/software/debuggers/intel-performance-counter-monitor.md +++ b/docs.it4i/software/debuggers/intel-performance-counter-monitor.md @@ -2,11 +2,11 @@ ## Introduction -Intel PCM (Performance Counter Monitor) is a tool to monitor performance hardware counters on Intel>® processors, similar to [PAPI](papi/). The difference between PCM and PAPI is that PCM supports only Intel hardware, but PCM can monitor also uncore metrics, like memory controllers and >QuickPath Interconnect links. +Intel PCM (Performance Counter Monitor) is a tool to monitor performance hardware counters on Intel>® processors, similar to [PAPI](software/debuggers/papi/). The difference between PCM and PAPI is that PCM supports only Intel hardware, but PCM can monitor also uncore metrics, like memory controllers and QuickPath Interconnect links. ## Installed Version -Currently installed version 2.6. To load the [module](../../modules-matrix/) issue: +Currently installed version 2.6. To load the [module](modules-matrix/) issue: ```console $ ml intelpcm @@ -276,6 +276,6 @@ $ ./matrix ## References -1. <https://software.intel.com/en-us/articles/intel-performance-counter-monitor-a-better-way-to-measure-cpu-utilization> -1. <https://software.intel.com/sites/default/files/m/3/2/2/xeon-e5-2600-uncore-guide.pdf> Intel® Xeon® Processor E5-2600 Product Family Uncore Performance Monitoring Guide. -1. <http://intel-pcm-api-documentation.github.io/classPCM.html> API Documentation +1. [https://software.intel.com/en-us/articles/intel-performance-counter-monitor-a-better-way-to-measure-cpu-utilization](https://software.intel.com/en-us/articles/intel-performance-counter-monitor-a-better-way-to-measure-cpu-utilization]) +1. [Intel® Xeon® Processor E5-2600 Product Family Uncore Performance Monitoring Guide](https://software.intel.com/sites/default/files/m/3/2/2/xeon-e5-2600-uncore-guide.pdf) +1. [API Documentation](http://intel-pcm-api-documentation.github.io/classPCM.html) diff --git a/docs.it4i/software/debuggers/intel-vtune-amplifier.md b/docs.it4i/software/debuggers/intel-vtune-amplifier.md index d3529ba98..d79e6e639 100644 --- a/docs.it4i/software/debuggers/intel-vtune-amplifier.md +++ b/docs.it4i/software/debuggers/intel-vtune-amplifier.md @@ -9,7 +9,7 @@ Intel *®* VTune™ Amplifier, part of Intel Parallel studio, is a GUI profiling * Low level specific counters, such as branch analysis and memory bandwidth * Power usage analysis - frequency and sleep states. - + ## Usage diff --git a/docs.it4i/software/debuggers/papi.md b/docs.it4i/software/debuggers/papi.md index a873d3693..8361776db 100644 --- a/docs.it4i/software/debuggers/papi.md +++ b/docs.it4i/software/debuggers/papi.md @@ -10,7 +10,7 @@ PAPI can be used with parallel as well as serial programs. ## Usage -To use PAPI, load [module](../../environment-and-modules/) PAPI: +To use PAPI, load [module](environment-and-modules/) PAPI: ```console $ ml papi @@ -193,7 +193,7 @@ $ ./matrix !!! note PAPI currently supports only a subset of counters on the Intel Xeon Phi processor compared to Intel Xeon, for example the floating point operations counter is missing. -To use PAPI in [Intel Xeon Phi](../intel/intel-xeon-phi-salomon/) native applications, you need to load module with " -mic" suffix, for example " papi/5.3.2-mic" : +To use PAPI in [Intel Xeon Phi](software/intel/intel-xeon-phi-salomon/) native applications, you need to load module with " -mic" suffix, for example " papi/5.3.2-mic" : ```console $ ml papi/5.3.2-mic diff --git a/docs.it4i/software/debuggers/scalasca.md b/docs.it4i/software/debuggers/scalasca.md index 8cf2fa2cf..066076b88 100644 --- a/docs.it4i/software/debuggers/scalasca.md +++ b/docs.it4i/software/debuggers/scalasca.md @@ -8,10 +8,10 @@ Scalasca supports profiling of MPI, OpenMP and hybrid MPI+OpenMP applications. ## Installed Versions -There are currently two versions of Scalasca 2.0 [modules](../../modules-matrix/) installed on Anselm: +There are currently two versions of Scalasca 2.0 [modules](modules-matrix/) installed on Anselm: -* scalasca2/2.0-gcc-openmpi, for usage with [GNU Compiler](../compilers/) and [OpenMPI](../mpi/Running_OpenMPI/), -* scalasca2/2.0-icc-impi, for usage with [Intel Compiler](../compilers/) and [Intel MPI](../mpi/running-mpich2/). +* scalasca2/2.0-gcc-openmpi, for usage with [GNU Compiler](software/compilers/) and [OpenMPI](software/mpi/Running_OpenMPI/), +* scalasca2/2.0-icc-impi, for usage with [Intel Compiler](software/compilers/) and [Intel MPI](software/mpi/running-mpich2/). ## Usage @@ -23,7 +23,7 @@ Profiling a parallel application with Scalasca consists of three steps: ### Instrumentation -Instrumentation via `scalasca -instrument` is discouraged. Use [Score-P instrumentation](score-p/). +Instrumentation via `scalasca -instrument` is discouraged. Use [Score-P instrumentation](software/debuggers/score-p/). ### Runtime Measurement @@ -43,11 +43,11 @@ Some notable Scalasca options are: * **-e <directory> Specify a directory to save the collected data to. By default, Scalasca saves the data to a directory with prefix scorep\_, followed by name of the executable and launch configuration.** !!! note - Scalasca can generate a huge amount of data, especially if tracing is enabled. Please consider saving the data to a [scratch directory](../../salomon/storage/). + Scalasca can generate a huge amount of data, especially if tracing is enabled. Please consider saving the data to a [scratch directory](salomon/storage/). ### Analysis of Reports -For the analysis, you must have [Score-P](score-p/) and [CUBE](cube/) modules loaded. The analysis is done in two steps, first, the data is preprocessed and then CUBE GUI tool is launched. +For the analysis, you must have [Score-P](software/debuggers/score-p/) and [CUBE](software/debuggers/cube/) modules loaded. The analysis is done in two steps, first, the data is preprocessed and then CUBE GUI tool is launched. To launch the analysis, run : @@ -63,8 +63,8 @@ scalasca -examine -s <experiment_directory> Alternatively you can open CUBE and load the data directly from here. Keep in mind that in that case the pre-processing is not done and not all metrics will be shown in the viewer. -Refer to [CUBE documentation](cube/) on usage of the GUI viewer. +Refer to [CUBE documentation](software/debuggers/cube/) on usage of the GUI viewer. ## References -1. <http://www.scalasca.org/> +1. [http://www.scalasca.org/](http://www.scalasca.org/) diff --git a/docs.it4i/software/debuggers/score-p.md b/docs.it4i/software/debuggers/score-p.md index 45de7f9cb..afb55bc3b 100644 --- a/docs.it4i/software/debuggers/score-p.md +++ b/docs.it4i/software/debuggers/score-p.md @@ -4,14 +4,14 @@ The [Score-P measurement infrastructure](http://www.vi-hps.org/projects/score-p/) is a highly scalable and easy-to-use tool suite for profiling, event tracing, and online analysis of HPC applications. -Score-P can be used as an instrumentation tool for [Scalasca](scalasca/). +Score-P can be used as an instrumentation tool for [Scalasca](software/debuggers/scalasca/). ## Installed Versions -There are currently two versions of Score-P version 1.2.6 [modules](../../modules-matrix/) installed on Anselm : +There are currently two versions of Score-P version 1.2.6 [modules](modules-matrix/) installed on Anselm : -* scorep/1.2.3-gcc-openmpi, for usage with [GNU Compiler](../compilers/) and [OpenMPI](../mpi/Running_OpenMPI/) -* scorep/1.2.3-icc-impi, for usage with [Intel Compiler](../compilers/)> and [Intel MPI](../mpi/running-mpich2/)>. +* scorep/1.2.3-gcc-openmpi, for usage with [GNU Compiler](software/compilers/) and [OpenMPI](software/mpi/Running_OpenMPI/) +* scorep/1.2.3-icc-impi, for usage with [Intel Compiler](software/compilers/)> and [Intel MPI](software/mpi/running-mpich2/)>. ## Instrumentation diff --git a/docs.it4i/software/debuggers/total-view.md b/docs.it4i/software/debuggers/total-view.md index aebe91a52..a59fe59f5 100644 --- a/docs.it4i/software/debuggers/total-view.md +++ b/docs.it4i/software/debuggers/total-view.md @@ -140,11 +140,11 @@ $ mpirun -tv -n 5 ./test_debug When following dialog appears click on "Yes" - + At this point the main TotalView GUI window will appear and you can insert the breakpoints and start debugging: - + ### Debugging a Parallel Code - Option 2 diff --git a/docs.it4i/software/debuggers/valgrind.md b/docs.it4i/software/debuggers/valgrind.md index 6acf83911..9591a5553 100644 --- a/docs.it4i/software/debuggers/valgrind.md +++ b/docs.it4i/software/debuggers/valgrind.md @@ -22,7 +22,7 @@ The main tools available in Valgrind are : There are two versions of Valgrind available on Anselm. * Version 3.6.0, installed by operating system vendor in /usr/bin/valgrind. This version is available by default, without the need to load any module. This version however does not provide additional MPI support. -* Version 3.9.0 with support for Intel MPI, available in [module](../../modules-matrix/) valgrind/3.9.0-impi. After loading the module, this version replaces the default valgrind. +* Version 3.9.0 with support for Intel MPI, available in [module](modules-matrix/) valgrind/3.9.0-impi. After loading the module, this version replaces the default valgrind. There are two versions of Valgrind available on the Salomon. diff --git a/docs.it4i/software/debuggers/vampir.md b/docs.it4i/software/debuggers/vampir.md index 66a9439ae..1956c9b26 100644 --- a/docs.it4i/software/debuggers/vampir.md +++ b/docs.it4i/software/debuggers/vampir.md @@ -1,8 +1,8 @@ # Vampir -Vampir is a commercial trace analysis and visualization tool. It can work with traces in OTF and OTF2 formats. It does not have the functionality to collect traces, you need to use a trace collection tool (such as [Score-P](score-p/)) first to collect the traces. +Vampir is a commercial trace analysis and visualization tool. It can work with traces in OTF and OTF2 formats. It does not have the functionality to collect traces, you need to use a trace collection tool (such as [Score-P](software/debuggers/score-p/)) first to collect the traces. - + ## Installed Versions @@ -21,4 +21,4 @@ You can find the detailed user manual in PDF format in $EBROOTVAMPIR/doc/vampir- ## References -1. <https://www.vampir.eu> +1. [https://www.vampir.eu](https://www.vampir.eu) diff --git a/docs.it4i/software/intel/intel-suite/intel-compilers.md b/docs.it4i/software/intel/intel-suite/intel-compilers.md index 92dd62849..853408ca5 100644 --- a/docs.it4i/software/intel/intel-suite/intel-compilers.md +++ b/docs.it4i/software/intel/intel-suite/intel-compilers.md @@ -32,5 +32,5 @@ Read more at <https://software.intel.com/en-us/intel-cplusplus-compiler-16.0-use Anselm nodes are currently equipped with Sandy Bridge CPUs, while Salomon compute nodes are equipped with Haswell based architecture. The UV1 SMP compute server has Ivy Bridge CPUs, which are equivalent to Sandy Bridge (only smaller manufacturing technology). The new processors are backward compatible with the Sandy Bridge nodes, so all programs that ran on the Sandy Bridge processors, should also run on the new Haswell nodes. To get optimal performance out of the Haswell processors a program should make use of the special AVX2 instructions for this processor. One can do this by recompiling codes with the compiler flags designated to invoke these instructions. For the Intel compiler suite, there are two ways of doing this: -* Using compiler flag (both for Fortran and C): -xCORE-AVX2. This will create a binary with AVX2 instructions, specifically for the Haswell processors. Note that the executable will not run on Sandy Bridge/Ivy Bridge nodes. -* Using compiler flags (both for Fortran and C): -xAVX -axCORE-AVX2. This will generate multiple, feature specific auto-dispatch code paths for Intel® processors, if there is a performance benefit. So this binary will run both on Sandy Bridge/Ivy Bridge and Haswell processors. During runtime it will be decided which path to follow, dependent on which processor you are running on. In general this will result in larger binaries. +* Using compiler flag (both for Fortran and C): **-xCORE-AVX2**. This will create a binary with AVX2 instructions, specifically for the Haswell processors. Note that the executable will not run on Sandy Bridge/Ivy Bridge nodes. +* Using compiler flags (both for Fortran and C): **-xAVX -axCORE-AVX2**. This will generate multiple, feature specific auto-dispatch code paths for Intel® processors, if there is a performance benefit. So this binary will run both on Sandy Bridge/Ivy Bridge and Haswell processors. During runtime it will be decided which path to follow, dependent on which processor you are running on. In general this will result in larger binaries. diff --git a/docs.it4i/software/intel/intel-suite/intel-debugger.md b/docs.it4i/software/intel/intel-suite/intel-debugger.md index 3317f814d..db2367535 100644 --- a/docs.it4i/software/intel/intel-suite/intel-debugger.md +++ b/docs.it4i/software/intel/intel-suite/intel-debugger.md @@ -4,7 +4,7 @@ IDB is no longer available since Intel Parallel Studio 2015 ## Debugging Serial Applications -The intel debugger version is available, via module intel/13.5.192. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment. Use [X display](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/) for running the GUI. +The intel debugger version is available, via module intel/13.5.192. The debugger works for applications compiled with C and C++ compiler and the ifort fortran 77/90/95 compiler. The debugger provides java GUI environment. Use [X display](general/accessing-the-clusters/graphical-user-interface/x-window-system/) for running the GUI. ```console $ ml intel/13.5.192 @@ -18,7 +18,7 @@ The debugger may run in text mode. To debug in text mode, use $ idbc ``` -To debug on the compute nodes, module intel must be loaded. The GUI on compute nodes may be accessed using the same way as in [the GUI section](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/) +To debug on the compute nodes, module intel must be loaded. The GUI on compute nodes may be accessed using the same way as in [the GUI section](general/accessing-the-clusters/graphical-user-interface/x-window-system/) Example: @@ -40,7 +40,7 @@ In this example, we allocate 1 full compute node, compile program myprog.c with ### Small Number of MPI Ranks -For debugging small number of MPI ranks, you may execute and debug each rank in separate xterm terminal (do not forget the [X display](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/)). Using Intel MPI, this may be done in following way: +For debugging small number of MPI ranks, you may execute and debug each rank in separate xterm terminal (do not forget the [X display](general/accessing-the-clusters/graphical-user-interface/x-window-system/)). Using Intel MPI, this may be done in following way: ```console $ qsub -q qexp -l select=2:ncpus=24 -X -I diff --git a/docs.it4i/software/intel/intel-suite/intel-mkl.md b/docs.it4i/software/intel/intel-suite/intel-mkl.md index d520f68cd..cc2a80c55 100644 --- a/docs.it4i/software/intel/intel-suite/intel-mkl.md +++ b/docs.it4i/software/intel/intel-suite/intel-mkl.md @@ -37,7 +37,7 @@ Intel MKL library provides number of interfaces. The fundamental once are the LP ### Linking -Linking Intel MKL libraries may be complex. Intel [mkl link line advisor](http://software.intel.com/en-us/articles/intel-mkl-link-line-advisor) helps. See also [examples](intel-mkl/#examples) below. +Linking Intel MKL libraries may be complex. Intel [mkl link line advisor](http://software.intel.com/en-us/articles/intel-mkl-link-line-advisor) helps. See also [examples](#examples) below. You will need the mkl module loaded to run the mkl enabled executable. This may be avoided, by compiling library search paths into the executable. Include rpath on the compile line: @@ -109,7 +109,7 @@ In this example, we compile, link and run the cblas_dgemm example, using LP64 in ## MKL and MIC Accelerators -The Intel MKL is capable to automatically offload the computations o the MIC accelerator. See section [Intel Xeon Phi](../intel-xeon-phi-salomon/) for details. +The Intel MKL is capable to automatically offload the computations o the MIC accelerator. See section [Intel Xeon Phi](software/intel/intel-xeon-phi-salomon/) for details. ## LAPACKE C Interface diff --git a/docs.it4i/software/intel/intel-suite/intel-parallel-studio-introduction.md b/docs.it4i/software/intel/intel-suite/intel-parallel-studio-introduction.md index 7b6ba956b..264b15e9d 100644 --- a/docs.it4i/software/intel/intel-suite/intel-parallel-studio-introduction.md +++ b/docs.it4i/software/intel/intel-suite/intel-parallel-studio-introduction.md @@ -23,7 +23,7 @@ $ icc -v $ ifort -v ``` -Read more at the [Intel Compilers](intel-compilers/) page. +Read more at the [Intel Compilers](software/intel/intel-suite/intel-compilers/) page. ## Intel Debugger @@ -36,7 +36,7 @@ $ ml intel $ idb ``` -Read more at the [Intel Debugger](intel-debugger/) page. +Read more at the [Intel Debugger](software/intel/intel-suite/intel-debugger/) page. ## Intel Math Kernel Library @@ -46,7 +46,7 @@ Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, e $ ml imkl ``` -Read more at the [Intel MKL](intel-mkl/) page. +Read more at the [Intel MKL](software/intel/intel-suite/intel-mkl/) page. ## Intel Integrated Performance Primitives @@ -56,7 +56,7 @@ Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX is avai $ ml ipp ``` -Read more at the [Intel IPP](intel-integrated-performance-primitives/) page. +Read more at the [Intel IPP](software/intel/intel-suite/intel-integrated-performance-primitives/) page. ## Intel Threading Building Blocks @@ -66,4 +66,4 @@ Intel Threading Building Blocks (Intel TBB) is a library that supports scalable $ ml tbb ``` -Read more at the [Intel TBB](intel-tbb/) page. +Read more at the [Intel TBB](software/intel/intel-suite/intel-tbb/) page. diff --git a/docs.it4i/software/intel/intel-suite/intel-tbb.md b/docs.it4i/software/intel/intel-suite/intel-tbb.md index e0de0d980..d28a92d24 100644 --- a/docs.it4i/software/intel/intel-suite/intel-tbb.md +++ b/docs.it4i/software/intel/intel-suite/intel-tbb.md @@ -2,7 +2,7 @@ ## Intel Threading Building Blocks -Intel Threading Building Blocks (Intel TBB) is a library that supports scalable parallel programming using standard ISO C++ code. It does not require special languages or compilers. To use the library, you specify tasks, not threads, and let the library map tasks onto threads in an efficient manner. The tasks are executed by a runtime scheduler and may be offloaded to [MIC accelerator](../intel-xeon-phi-salomon/). +Intel Threading Building Blocks (Intel TBB) is a library that supports scalable parallel programming using standard ISO C++ code. It does not require special languages or compilers. To use the library, you specify tasks, not threads, and let the library map tasks onto threads in an efficient manner. The tasks are executed by a runtime scheduler and may be offloaded to [MIC accelerator](software/intel//intel-xeon-phi-salomon/). Intel is available on the cluster. @@ -37,4 +37,4 @@ $ icc -O2 -o primes.x main.cpp primes.cpp -Wl,-rpath=$LIBRARY_PATH -ltbb ## Further Reading -Read more on Intel website, <http://software.intel.com/sites/products/documentation/doclib/tbb_sa/help/index.htm> +Read more on Intel website, [http://software.intel.com/sites/products/documentation/doclib/tbb_sa/help/index.htm](http://software.intel.com/sites/products/documentation/doclib/tbb_sa/help/index.htm) diff --git a/docs.it4i/software/intel/intel-suite/intel-trace-analyzer-and-collector.md b/docs.it4i/software/intel/intel-suite/intel-trace-analyzer-and-collector.md index 57e151899..308706a78 100644 --- a/docs.it4i/software/intel/intel-suite/intel-trace-analyzer-and-collector.md +++ b/docs.it4i/software/intel/intel-suite/intel-trace-analyzer-and-collector.md @@ -21,7 +21,7 @@ The trace will be saved in file myapp.stf in the current directory. ## Viewing Traces -To view and analyze the trace, open ITAC GUI in a [graphical environment](../../../general/accessing-the-clusters/graphical-user-interface/x-window-system/): +To view and analyze the trace, open ITAC GUI in a [graphical environment](general/accessing-the-clusters/graphical-user-interface/x-window-system/): ```console $ ml itac/9.1.2.024 @@ -30,7 +30,7 @@ $ traceanalyzer The GUI will launch and you can open the produced `*`.stf file. - + Please refer to Intel documenation about usage of the GUI tool. diff --git a/docs.it4i/software/isv_licenses.md b/docs.it4i/software/isv_licenses.md index ad804ef99..d7cbb7cc9 100644 --- a/docs.it4i/software/isv_licenses.md +++ b/docs.it4i/software/isv_licenses.md @@ -15,7 +15,7 @@ If an ISV application was purchased for educational (research) purposes and also ### Web Interface -For each license there is a table, which provides the information about the name, number of available (purchased/licensed), number of used and number of free license features <https://extranet.it4i.cz/anselm/licenses> +For each license there is a table, which provides the information about the name, number of available (purchased/licensed), number of used and number of free license features [https://extranet.it4i.cz/anselm/licenses](https://extranet.it4i.cz/anselm/licenses) ### Text Interface @@ -68,7 +68,7 @@ Names of applications (APP): matlab-edu ``` -To get the FEATUREs of a license take a look into the corresponding state file ([see above](isv_licenses/#Licence)), or use: +To get the FEATUREs of a license take a look into the corresponding state file ([see above](software/isv_licenses/#Licence)), or use: ### Application and List of Provided Features diff --git a/docs.it4i/software/lang/java.md b/docs.it4i/software/lang/java.md index 67e77ab26..f672ac73e 100644 --- a/docs.it4i/software/lang/java.md +++ b/docs.it4i/software/lang/java.md @@ -22,7 +22,7 @@ $ javac -version $ which javac ``` -Java applications may use MPI for inter-process communication, in conjunction with OpenMPI. Read more on <http://www.open-mpi.org/faq/?category=java>. This functionality is currently not supported on Anselm cluster. In case you require the java interface to MPI, contact [cluster support](https://support.it4i.cz/rt/). +Java applications may use MPI for inter-process communication, in conjunction with OpenMPI. Read more on [here](http://www.open-mpi.org/faq/?category=java). This functionality is currently not supported on Anselm cluster. In case you require the java interface to MPI, contact [cluster support](https://support.it4i.cz/rt/). ## Java With OpenMPI diff --git a/docs.it4i/software/machine-learning/introduction.md b/docs.it4i/software/machine-learning/introduction.md index e329ef64c..5c7288676 100644 --- a/docs.it4i/software/machine-learning/introduction.md +++ b/docs.it4i/software/machine-learning/introduction.md @@ -16,12 +16,12 @@ Test module: $ ml Tensorflow ``` -Read more about available versions at the [TensorFlow page](tensorflow/). +Read more about available versions at the [TensorFlow page](software/machine-learning/tensorflow/). ## Theano -Read more about [available versions](../../modules-matrix/). +Read more about [available versions](modules-matrix/). ## Keras -Read more about [available versions](../../modules-matrix/). +Read more about [available versions](modules-matrix/). diff --git a/docs.it4i/software/mic/mic_environment.md b/docs.it4i/software/mic/mic_environment.md index a3343ca17..7f36f256a 100644 --- a/docs.it4i/software/mic/mic_environment.md +++ b/docs.it4i/software/mic/mic_environment.md @@ -1,12 +1,12 @@ # Intel Xeon Phi Environment -Intel Xeon Phi (so-called MIC) accelerator can be used in several modes ([Offload](../intel/intel-xeon-phi-salomon/#offload-mode) and [Native](#native-mode)). The default mode on the cluster is offload mode, but all modes described in this document are supported. +Intel Xeon Phi (so-called MIC) accelerator can be used in several modes ([Offload](software/intel/intel-xeon-phi-salomon/#offload-mode) and [Native](#native-mode)). The default mode on the cluster is offload mode, but all modes described in this document are supported. See sections below for more details. ## Intel Utilities for Xeon Phi -Continue [here](../intel/intel-xeon-phi-salomon/) +Continue [here](software/intel/intel-xeon-phi-salomon/) ## GCC With [KNC](https://en.wikipedia.org/wiki/Xeon_Phi) Support @@ -434,4 +434,4 @@ Configure step (for `configure`,`make` and `make install` software) Modulefile and Lmod -* Read [Lmod](../modules/lmod/) +* Read [Lmod](software/modules/lmod/) diff --git a/docs.it4i/software/mpi/mpi.md b/docs.it4i/software/mpi/mpi.md index c65c0d8d2..b8a61b25e 100644 --- a/docs.it4i/software/mpi/mpi.md +++ b/docs.it4i/software/mpi/mpi.md @@ -136,6 +136,6 @@ In the previous two cases with one or two MPI processes per node, the operating ### Running OpenMPI -The [**OpenMPI 1.8.6**](http://www.open-mpi.org/) is based on OpenMPI. Read more on [how to run OpenMPI](Running_OpenMPI/) based MPI. +The [**OpenMPI 1.8.6**](http://www.open-mpi.org/) is based on OpenMPI. Read more on [how to run OpenMPI](software/mpi/Running_OpenMPI/) based MPI. -The Intel MPI may run on the [Intel Xeon Ph](../intel/intel-xeon-phi-salomon/) accelerators as well. Read more on [how to run Intel MPI on accelerators](../intel/intel-xeon-phi-salomon/). +The Intel MPI may run on the [Intel Xeon Ph](software/intel/intel-xeon-phi-salomon/) accelerators as well. Read more on [how to run Intel MPI on accelerators](software/intel/intel-xeon-phi-salomon/). diff --git a/docs.it4i/software/mpi/mpi4py-mpi-for-python.md b/docs.it4i/software/mpi/mpi4py-mpi-for-python.md index a3b3f69ed..ea39d0048 100644 --- a/docs.it4i/software/mpi/mpi4py-mpi-for-python.md +++ b/docs.it4i/software/mpi/mpi4py-mpi-for-python.md @@ -42,7 +42,7 @@ You need to import MPI to your python program. Include the following line to the from mpi4py import MPI ``` -The MPI4Py enabled python programs [execute as any other OpenMPI](Running_OpenMPI/) code.The simpliest way is to run +The MPI4Py enabled python programs [execute as any other OpenMPI](salomon/mpi/Running_OpenMPI/) code.The simpliest way is to run ```console $ mpiexec python <script>.py diff --git a/docs.it4i/software/mpi/ompi-examples.md b/docs.it4i/software/mpi/ompi-examples.md index 1a2be74bc..c61dee8c5 100644 --- a/docs.it4i/software/mpi/ompi-examples.md +++ b/docs.it4i/software/mpi/ompi-examples.md @@ -178,35 +178,35 @@ class Hello { } ``` -* C: [hello_c.c](../../src/ompi/hello_c.c) -* C++: [hello_cxx.cc](../../src/ompi/hello_cxx.cc) -* Fortran mpif.h: [hello_mpifh.f](../../src/ompi/hello_mpifh.f) -* Fortran use mpi: [hello_usempi.f90](../../src/ompi/hello_usempi.f90) -* Fortran use mpi_f08: [hello_usempif08.f90](../../src/ompi/hello_usempif08.f90) -* Java: [Hello.java](../../src/ompi/Hello.java) -* C shmem.h: [hello_oshmem_c.c](../../src/ompi/hello_oshmem_c.c) -* Fortran shmem.fh: [hello_oshmemfh.f90](../../src/ompi/hello_oshmemfh.f90) +* C: [hello_c.c](src/ompi/hello_c.c) +* C++: [hello_cxx.cc](src/ompi/hello_cxx.cc) +* Fortran mpif.h: [hello_mpifh.f](src/ompi/hello_mpifh.f) +* Fortran use mpi: [hello_usempi.f90](src/ompi/hello_usempi.f90) +* Fortran use mpi_f08: [hello_usempif08.f90](src/ompi/hello_usempif08.f90) +* Java: [Hello.java](src/ompi/Hello.java) +* C shmem.h: [hello_oshmem_c.c](src/ompi/hello_oshmem_c.c) +* Fortran shmem.fh: [hello_oshmemfh.f90](src/ompi/hello_oshmemfh.f90) ### Send a Trivial Message Around in a Ring -* C: [ring_c.c](../../src/ompi/ring_c.c) -* C++: [ring_cxx.cc](../../src/ompi/ring_cxx.cc) -* Fortran mpif.h: [ring_mpifh.f](../../src/ompi/ring_mpifh.f) -* Fortran use mpi: [ring_usempi.f90](../../src/ompi/ring_usempi.f90) -* Fortran use mpi_f08: [ring_usempif08.f90](../../src/ompi/ring_usempif08.f90) -* Java: [Ring.java](../../src/ompi/Ring.java) -* C shmem.h: [ring_oshmem_c.c](../../src/ompi/ring_oshmem_c.c) -* Fortran shmem.fh: [ring_oshmemfh.f90](../../src/ompi/ring_oshmemfh.f90) +* C: [ring_c.c](src/ompi/ring_c.c) +* C++: [ring_cxx.cc](src/ompi/ring_cxx.cc) +* Fortran mpif.h: [ring_mpifh.f](src/ompi/ring_mpifh.f) +* Fortran use mpi: [ring_usempi.f90](src/ompi/ring_usempi.f90) +* Fortran use mpi_f08: [ring_usempif08.f90](src/ompi/ring_usempif08.f90) +* Java: [Ring.java](src/ompi/Ring.java) +* C shmem.h: [ring_oshmem_c.c](src/ompi/ring_oshmem_c.c) +* Fortran shmem.fh: [ring_oshmemfh.f90](src/ompi/ring_oshmemfh.f90) Additionally, there's one further example application, but this one only uses the MPI C bindings: ### Test the Connectivity Between All Pross -* C: [connectivity_c.c](../../src/ompi/connectivity_c.c) +* C: [connectivity_c.c](src/ompi/connectivity_c.c) ## Build Examples -Download [examples](../../src/ompi/ompi.tar.gz). +Download [examples](src/ompi/ompi.tar.gz). The Makefile in this directory will build the examples for the supported languages (e.g., if you do not have the Fortran "use mpi" bindings compiled as part of OpenMPI, those examples will be skipped). diff --git a/docs.it4i/software/mpi/running-mpich2.md b/docs.it4i/software/mpi/running-mpich2.md index 30679021d..35452ed70 100644 --- a/docs.it4i/software/mpi/running-mpich2.md +++ b/docs.it4i/software/mpi/running-mpich2.md @@ -152,4 +152,4 @@ $ mpirun -bindto numa echo $OMP_NUM_THREADS ## Intel MPI on Xeon Phi -The[MPI section of Intel Xeon Phi chapter](../intel/intel-xeon-phi-salomon/) provides details on how to run Intel MPI code on Xeon Phi architecture. +The [MPI section of Intel Xeon Phi chapter](software/intel/intel-xeon-phi-salomon/) provides details on how to run Intel MPI code on Xeon Phi architecture. diff --git a/docs.it4i/software/numerical-languages/introduction.md b/docs.it4i/software/numerical-languages/introduction.md index 39ac8f738..50ef10460 100644 --- a/docs.it4i/software/numerical-languages/introduction.md +++ b/docs.it4i/software/numerical-languages/introduction.md @@ -15,7 +15,7 @@ $ ml MATLAB $ matlab ``` -Read more at the [Matlab page](matlab/). +Read more at the [Matlab page](software/numerical-languages/matlab/). ## Octave @@ -26,7 +26,7 @@ $ ml Octave $ octave ``` -Read more at the [Octave page](octave/). +Read more at the [Octave page](software/numerical-languages/octave/). ## R @@ -37,4 +37,4 @@ $ ml R $ R ``` -Read more at the [R page](r/). +Read more at the [R page](software/numerical-languages/r/). diff --git a/docs.it4i/software/numerical-languages/matlab.md b/docs.it4i/software/numerical-languages/matlab.md index 89446eb39..73a283ae9 100644 --- a/docs.it4i/software/numerical-languages/matlab.md +++ b/docs.it4i/software/numerical-languages/matlab.md @@ -21,9 +21,9 @@ $ ml av MATLAB If you need to use the Matlab GUI to prepare your Matlab programs, you can use Matlab directly on the login nodes. But for all computations use Matlab on the compute nodes via PBS Pro scheduler. -If you require the Matlab GUI, follow the general information about [running graphical applications](../../general/accessing-the-clusters/graphical-user-interface/x-window-system/). +If you require the Matlab GUI, follow the general information about [running graphical applications](general/accessing-the-clusters/graphical-user-interface/x-window-system/). -Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (see the "GUI Applications on Compute Nodes over VNC" part [here](../../general/accessing-the-clusters/graphical-user-interface/x-window-system/)) is recommended. +Matlab GUI is quite slow using the X forwarding built in the PBS (qsub -X), so using X11 display redirection either via SSH or directly by xauth (see the "GUI Applications on Compute Nodes over VNC" part [here](general/accessing-the-clusters/graphical-user-interface/x-window-system/)) is recommended. To run Matlab with GUI, use @@ -68,7 +68,7 @@ With the new mode, MATLAB itself launches the workers via PBS, so you can either ### Parallel Matlab Interactive Session -Following example shows how to start interactive session with support for Matlab GUI. For more information about GUI based applications on Anselm see [this page](../../general/accessing-the-clusters/graphical-user-interface/x-window-system/). +Following example shows how to start interactive session with support for Matlab GUI. For more information about GUI based applications on Anselm see [this page](general/accessing-the-clusters/graphical-user-interface/x-window-system/). ```console $ xhost + @@ -218,7 +218,7 @@ This method is a "hack" invented by us to emulate the mpiexec functionality foun !!! warning This method is experimental. -For this method, you need to use SalomonDirect profile, import it using [the same way as SalomonPBSPro](matlab.md#running-parallel-matlab-using-distributed-computing-toolbox---engine) +For this method, you need to use SalomonDirect profile, import it using [the same way as SalomonPBSPro](#running-parallel-matlab-using-distributed-computing-toolbox---engine) This is an example of m-script using direct mode: @@ -249,11 +249,11 @@ delete(pool) ### Non-Interactive Session and Licenses -If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the `-l __feature__matlab__MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, [look here](../isv_licenses/). +If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the `-l __feature__matlab__MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, [look here](software/isv_licenses/). The licensing feature of PBS is currently disabled. -In case of non-interactive session read the [following information](../isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation. +In case of non-interactive session read the [following information](software/isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation. ### Matlab Distributed Computing Engines Start Up Time @@ -278,4 +278,4 @@ Since this is a SMP machine, you can completely avoid using Parallel Toolbox and ### Local Cluster Mode -You can also use Parallel Toolbox on UV2000. Use [local cluster mode](matlab/#parallel-matlab-batch-job-in-local-mode), "SalomonPBSPro" profile will not work. +You can also use Parallel Toolbox on UV2000. Use [local cluster mode](#parallel-matlab-batch-job-in-local-mode), "SalomonPBSPro" profile will not work. diff --git a/docs.it4i/software/numerical-languages/matlab_1314.md b/docs.it4i/software/numerical-languages/matlab_1314.md index 69e666994..9dee602e8 100644 --- a/docs.it4i/software/numerical-languages/matlab_1314.md +++ b/docs.it4i/software/numerical-languages/matlab_1314.md @@ -3,7 +3,7 @@ ## Introduction !!! note - This document relates to the old versions R2013 and R2014. For MATLAB 2015 use [this documentation instead](matlab/). + This document relates to the old versions R2013 and R2014. For MATLAB 2015 use [this documentation instead](software/numerical-languages/matlab/). Matlab is available in the latest stable version. There are always two variants of the release: @@ -46,7 +46,7 @@ Plots, images, etc... will be still available. Recommended parallel mode for running parallel Matlab on Anselm is MPIEXEC mode. In this mode user allocates resources through PBS prior to starting Matlab. Once resources are granted the main Matlab instance is started on the first compute node assigned to job by PBS and workers are started on all remaining nodes. User can use both interactive and non-interactive PBS sessions. This mode guarantees that the data processing is not performed on login nodes, but all processing is on compute nodes. - + For the performance reasons Matlab should use system MPI. On Anselm the supported MPI implementation for Matlab is Intel MPI. To switch to system MPI user has to override default Matlab setting by creating new configuration file in its home directory. The path and file name has to be exactly the same as in the following listing: @@ -190,9 +190,9 @@ You can copy and paste the example in a .m file and execute. Note that the matla ### Non-Interactive Session and Licenses -If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the ` -l __feature__matlab__MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, [look here](../isv_licenses/). +If you want to run batch jobs with Matlab, be sure to request appropriate license features with the PBS Pro scheduler, at least the ` -l __feature__matlab__MATLAB=1` for EDU variant of Matlab. More information about how to check the license features states and how to request them with PBS Pro, [look here](software/isv_licenses/). -In case of non-interactive session read the [following information](../isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation. +In case of non-interactive session read the [following information](software/isv_licenses/) on how to modify the qsub command to test for available licenses prior getting the resource allocation. ### Matlab Distributed Computing Engines Start Up Time diff --git a/docs.it4i/software/numerical-languages/octave.md b/docs.it4i/software/numerical-languages/octave.md index b3ce19550..524611502 100644 --- a/docs.it4i/software/numerical-languages/octave.md +++ b/docs.it4i/software/numerical-languages/octave.md @@ -2,7 +2,7 @@ ## Introduction -GNU Octave is a high-level interpreted language, primarily intended for numerical computations. It provides capabilities for the numerical solution of linear and nonlinear problems, and for performing other numerical experiments. It also provides extensive graphics capabilities for data visualization and manipulation. Octave is normally used through its interactive command line interface, but it can also be used to write non-interactive programs. The Octave language is quite similar to Matlab so that most programs are easily portable. Read more on <http://www.gnu.org/software/octave/> +GNU Octave is a high-level interpreted language, primarily intended for numerical computations. It provides capabilities for the numerical solution of linear and nonlinear problems, and for performing other numerical experiments. It also provides extensive graphics capabilities for data visualization and manipulation. Octave is normally used through its interactive command line interface, but it can also be used to write non-interactive programs. The Octave language is quite similar to Matlab so that most programs are easily portable. Read more on [http://www.gnu.org/software/octave/](http://www.gnu.org/software/octave/) For looking for avaible modules, type: @@ -60,11 +60,11 @@ Octave may use MPI for interprocess communication This functionality is currentl ## Xeon Phi Support -Octave may take advantage of the Xeon Phi accelerators. This will only work on the [Intel Xeon Phi](../intel/intel-xeon-phi-salomon/) [accelerated nodes](../../salomon/compute-nodes/). +Octave may take advantage of the Xeon Phi accelerators. This will only work on the [Intel Xeon Phi](software/intel/intel-xeon-phi-salomon/) [accelerated nodes](salomon/compute-nodes/). ### Automatic Offload Support -Octave can accelerate BLAS type operations (in particular the Matrix Matrix multiplications] on the Xeon Phi accelerator, via [Automatic Offload using the MKL library](../intel/intel-xeon-phi-salomon/) +Octave can accelerate BLAS type operations (in particular the Matrix Matrix multiplications] on the Xeon Phi accelerator, via [Automatic Offload using the MKL library](software/intel/intel-xeon-phi-salomon/) Example @@ -88,7 +88,7 @@ In this example, the calculation was automatically divided among the CPU cores a ### Native Support -A version of [native](../intel/intel-xeon-phi-salomon/) Octave is compiled for Xeon Phi accelerators. Some limitations apply for this version: +A version of [native](software/intel/intel-xeon-phi-salomon/) Octave is compiled for Xeon Phi accelerators. Some limitations apply for this version: * Only command line support. GUI, graph plotting etc. is not supported. * Command history in interactive mode is not supported. diff --git a/docs.it4i/software/numerical-languages/opencoarrays.md b/docs.it4i/software/numerical-languages/opencoarrays.md index d6788ef14..2f08da82b 100644 --- a/docs.it4i/software/numerical-languages/opencoarrays.md +++ b/docs.it4i/software/numerical-languages/opencoarrays.md @@ -11,7 +11,7 @@ The variable syntax of Fortran language is extended with indexes in square brack By default, the CAF is using Message Passing Interface (MPI) for lower-level communication, so there are some similarities with MPI. -Read more on <http://www.opencoarrays.org/> +Read more on [http://www.opencoarrays.org/](http://www.opencoarrays.org/) ## Coarray Basics @@ -70,7 +70,7 @@ end program synchronization_test ``` * sync all - Synchronize all images between each other -* sync images(*) - Synchronize this image to all other +* sync images(\*) - Synchronize this image to all other * sync images(*index*) - Synchronize this image to image with *index* !!! note diff --git a/docs.it4i/software/numerical-languages/r.md b/docs.it4i/software/numerical-languages/r.md index e83388e60..81ba42081 100644 --- a/docs.it4i/software/numerical-languages/r.md +++ b/docs.it4i/software/numerical-languages/r.md @@ -10,7 +10,7 @@ Another convenience is the ease with which the C code or third party libraries m Extensive support for parallel computing is available within R. -Read more on <http://www.r-project.org/>, <http://cran.r-project.org/doc/manuals/r-release/R-lang.html> +Read more on [http://www.r-project.org/](http://www.r-project.org/), [http://cran.r-project.org/doc/manuals/r-release/R-lang.html](http://cran.r-project.org/doc/manuals/r-release/R-lang.html) ## Modules @@ -66,11 +66,11 @@ cp routput.out $PBS_O_WORKDIR/. exit ``` -This script may be submitted directly to the PBS workload manager via the qsub command. The inputs are in rscript.R file, outputs in routput.out file. See the single node jobscript example in the [Job execution section - Anselm](../../anselm/job-submission-and-execution/). +This script may be submitted directly to the PBS workload manager via the qsub command. The inputs are in rscript.R file, outputs in routput.out file. See the single node jobscript example in the [Job execution section - Anselm](anselm/job-submission-and-execution/). ## Parallel R -Parallel execution of R may be achieved in many ways. One approach is the implied parallelization due to linked libraries or specially enabled functions, as [described above](r/#interactive-execution). In the following sections, we focus on explicit parallelization, where parallel constructs are directly stated within the R script. +Parallel execution of R may be achieved in many ways. One approach is the implied parallelization due to linked libraries or specially enabled functions, as [described above](#interactive-execution). In the following sections, we focus on explicit parallelization, where parallel constructs are directly stated within the R script. ## Package Parallel @@ -144,9 +144,9 @@ Every evaluation of the integrad function runs in parallel on different process. package Rmpi provides an interface (wrapper) to MPI APIs. -It also provides interactive R slave environment. On the cluster, Rmpi provides interface to the [OpenMPI](../mpi/Running_OpenMPI/). +It also provides interactive R slave environment. On the cluster, Rmpi provides interface to the [OpenMPI](software/mpi/Running_OpenMPI/). -Read more on Rmpi at <http://cran.r-project.org/web/packages/Rmpi/>, reference manual is available at <http://cran.r-project.org/web/packages/Rmpi/Rmpi.pdf> +Read more on Rmpi at <http://cran.r-project.org/web/packages/Rmpi/>, reference manual is available at [here](http://cran.r-project.org/web/packages/Rmpi/Rmpi.pdf) When using package Rmpi, both openmpi and R modules must be loaded @@ -345,7 +345,7 @@ while (TRUE) mpi.quit() ``` -The above is the mpi.apply MPI example for calculating the number π. Only the slave processes carry out the calculation. Note the **mpi.parSapply()**, function call. The package parallel [example](r/#package-parallel) [above](r/#package-parallel) may be trivially adapted (for much better performance) to this structure using the mclapply() in place of mpi.parSapply(). +The above is the mpi.apply MPI example for calculating the number π. Only the slave processes carry out the calculation. Note the **mpi.parSapply()**, function call. The package parallel [example](#package-parallel) [above](#package-parallel) may be trivially adapted (for much better performance) to this structure using the mclapply() in place of mpi.parSapply(). Execute the example as: @@ -361,7 +361,7 @@ Currently, the two packages can not be combined for hybrid calculations. The R parallel jobs are executed via the PBS queue system exactly as any other parallel jobs. User must create an appropriate jobscript and submit via the **qsub** -Example jobscript for [static Rmpi](r/#static-rmpi) parallel R execution, running 1 process per core: +Example jobscript for [static Rmpi](#static-rmpi) parallel R execution, running 1 process per core: ```bash #!/bin/bash @@ -390,7 +390,7 @@ cp routput.out $PBS_O_WORKDIR/. exit ``` -For more information about jobscripts and MPI execution refer to the [Job submission](../../anselm/job-submission-and-execution/) and general [MPI](../mpi/mpi/) sections. +For more information about jobscripts and MPI execution refer to the [Job submission](anselm/job-submission-and-execution/) and general [MPI](software/mpi/mpi/) sections. ## Xeon Phi Offload @@ -400,4 +400,4 @@ By leveraging MKL, R can accelerate certain computations, most notably linear al $ export MKL_MIC_ENABLE=1 ``` -[Read more about automatic offload](../intel/intel-xeon-phi-salomon/) +[Read more about automatic offload](software/intel/intel-xeon-phi-salomon/) diff --git a/docs.it4i/software/numerical-libraries/fftw.md b/docs.it4i/software/numerical-libraries/fftw.md index f9470514b..0807bd4d8 100644 --- a/docs.it4i/software/numerical-libraries/fftw.md +++ b/docs.it4i/software/numerical-libraries/fftw.md @@ -68,6 +68,6 @@ $ ml fftw3-mpi $ mpicc testfftw3mpi.c -o testfftw3mpi.x -Wl,-rpath=$LIBRARY_PATH -lfftw3_mpi ``` -Run the example as [Intel MPI program](../mpi/running-mpich2/). +Run the example as [Intel MPI program](software/mpi/running-mpich2/). Read more on FFTW usage on the [FFTW website.](http://www.fftw.org/fftw3_doc/) diff --git a/docs.it4i/software/numerical-libraries/hdf5.md b/docs.it4i/software/numerical-libraries/hdf5.md index 6ebeebe60..11cd26da0 100644 --- a/docs.it4i/software/numerical-libraries/hdf5.md +++ b/docs.it4i/software/numerical-libraries/hdf5.md @@ -84,6 +84,6 @@ $ ml hdf5-parallel $ mpicc hdf5test.c -o hdf5test.x -Wl,-rpath=$LIBRARY_PATH $HDF5_INC $HDF5_SHLIB ``` -Run the example as [Intel MPI program](../mpi/running-mpich2/). +Run the example as [Intel MPI program](software/mpi/running-mpich2/). -For further information, see the website: <http://www.hdfgroup.org/HDF5/> +For further information, see the website: [http://www.hdfgroup.org/HDF5/](http://www.hdfgroup.org/HDF5/) diff --git a/docs.it4i/software/numerical-libraries/intel-numerical-libraries.md b/docs.it4i/software/numerical-libraries/intel-numerical-libraries.md index 5fbe5086f..f25d6edd5 100644 --- a/docs.it4i/software/numerical-libraries/intel-numerical-libraries.md +++ b/docs.it4i/software/numerical-libraries/intel-numerical-libraries.md @@ -10,7 +10,7 @@ Intel Math Kernel Library (Intel MKL) is a library of math kernel subroutines, e $ ml mkl **or** ml imkl ``` -Read more at the [Intel MKL](../intel/intel-suite/intel-mkl/) page. +Read more at the [Intel MKL](software/intel/intel-suite/intel-mkl/) page. ## Intel Integrated Performance Primitives @@ -20,7 +20,7 @@ Intel Integrated Performance Primitives, version 7.1.1, compiled for AVX is avai $ ml ipp ``` -Read more at the [Intel IPP](../intel/intel-suite/intel-integrated-performance-primitives/) page. +Read more at the [Intel IPP](software/intel/intel-suite/intel-integrated-performance-primitives/) page. ## Intel Threading Building Blocks @@ -30,4 +30,4 @@ Intel Threading Building Blocks (Intel TBB) is a library that supports scalable $ ml tbb ``` -Read more at the [Intel TBB](../intel/intel-suite/intel-tbb/) page. +Read more at the [Intel TBB](software/intel/intel-suite/intel-tbb/) page. diff --git a/docs.it4i/software/numerical-libraries/magma-for-intel-xeon-phi.md b/docs.it4i/software/numerical-libraries/magma-for-intel-xeon-phi.md index 64c443796..40499eaad 100644 --- a/docs.it4i/software/numerical-libraries/magma-for-intel-xeon-phi.md +++ b/docs.it4i/software/numerical-libraries/magma-for-intel-xeon-phi.md @@ -73,4 +73,4 @@ See more details at [MAGMA home page](http://icl.cs.utk.edu/magma/). ## References -[1] MAGMA MIC: Linear Algebra Library for Intel Xeon Phi Coprocessors, Jack Dongarra et. al, <http://icl.utk.edu/projectsfiles/magma/pubs/24-MAGMA_MIC_03.pdf> +[1] MAGMA MIC: Linear Algebra Library for Intel Xeon Phi Coprocessors, Jack Dongarra et. al, [http://icl.utk.edu/projectsfiles/magma/pubs/24-MAGMA_MIC_03.pdf](http://icl.utk.edu/projectsfiles/magma/pubs/24-MAGMA_MIC_03.pdf) diff --git a/docs.it4i/software/tools/ansys/ansys-cfx.md b/docs.it4i/software/tools/ansys/ansys-cfx.md index 45acd6f3e..48dac488a 100644 --- a/docs.it4i/software/tools/ansys/ansys-cfx.md +++ b/docs.it4i/software/tools/ansys/ansys-cfx.md @@ -47,9 +47,8 @@ echo Machines: $hl /ansys_inc/v145/CFX/bin/cfx5solve -def input.def -size 4 -size-ni 4x -part-large -start-method "Platform MPI Distributed Parallel" -par-dist $hl -P aa_r ``` -Header of the PBS file (above) is common and description can be find on [this site](../../../anselm/job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the PBS file (above) is common and description can be find on [this site](anselm/job-submission-and-execution/). SVS FEM recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. >Input file has to be defined by common CFX def file which is attached to the CFX solver via parameter -def **License** should be selected by parameter -P (Big letter **P**). Licensed products are the following: aa_r (ANSYS **Academic** Research), ane3fl (ANSYS Multiphysics)-**Commercial**. -[More about licensing here](licensing/) diff --git a/docs.it4i/software/tools/ansys/ansys-fluent.md b/docs.it4i/software/tools/ansys/ansys-fluent.md index e11b8597b..7ef64f99f 100644 --- a/docs.it4i/software/tools/ansys/ansys-fluent.md +++ b/docs.it4i/software/tools/ansys/ansys-fluent.md @@ -38,7 +38,7 @@ NCORES=`wc -l $PBS_NODEFILE |awk '{print $1}'` /ansys_inc/v145/fluent/bin/fluent 3d -t$NCORES -cnf=$PBS_NODEFILE -g -i fluent.jou ``` -Header of the pbs file (above) is common and description can be find on [this site](../../../salomon/resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the pbs file (above) is common and description can be find on [this site](salomon/resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. Working directory has to be created before sending pbs job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common Fluent journal file which is attached to the Fluent solver via parameter -i fluent.jou @@ -151,12 +151,12 @@ Fluent could be run in parallel only under Academic Research license. To do so t ANSLIC_ADMIN Utility will be run - + - + - + ANSYS Academic Research license should be moved up to the top of the list. - + diff --git a/docs.it4i/software/tools/ansys/ansys-ls-dyna.md b/docs.it4i/software/tools/ansys/ansys-ls-dyna.md index e3af0318c..cdc14b1f4 100644 --- a/docs.it4i/software/tools/ansys/ansys-ls-dyna.md +++ b/docs.it4i/software/tools/ansys/ansys-ls-dyna.md @@ -50,6 +50,6 @@ echo Machines: $hl /ansys_inc/v145/ansys/bin/ansys145 -dis -lsdynampp i=input.k -machines $hl ``` -Header of the PBS file (above) is common and description can be find on [this site](../../../anselm/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the PBS file (above) is common and description can be find on [this site](anselm/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA .**k** file which is attached to the ANSYS solver via parameter i= diff --git a/docs.it4i/software/tools/ansys/ansys-mechanical-apdl.md b/docs.it4i/software/tools/ansys/ansys-mechanical-apdl.md index 3db398c40..c16443778 100644 --- a/docs.it4i/software/tools/ansys/ansys-mechanical-apdl.md +++ b/docs.it4i/software/tools/ansys/ansys-mechanical-apdl.md @@ -49,9 +49,8 @@ echo Machines: $hl /ansys_inc/v145/ansys/bin/ansys145 -b -dis -p aa_r -i input.dat -o file.out -machines $hl -dir $WORK_DIR ``` -Header of the PBS file (above) is common and description can be found on [this site](../../../anselm/resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allow to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the PBS file (above) is common and description can be found on [this site](anselm/resources-allocation-policy/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allow to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common APDL file which is attached to the ANSYS solver via parameter -i **License** should be selected by parameter -p. Licensed products are the following: aa_r (ANSYS **Academic** Research), ane3fl (ANSYS Multiphysics)-**Commercial**, aa_r_dy (ANSYS **Academic** AUTODYN) -[More about licensing here](licensing/) diff --git a/docs.it4i/software/tools/ansys/ansys.md b/docs.it4i/software/tools/ansys/ansys.md index dcd195252..66b8e6f0e 100644 --- a/docs.it4i/software/tools/ansys/ansys.md +++ b/docs.it4i/software/tools/ansys/ansys.md @@ -2,7 +2,7 @@ **[SVS FEM](http://www.svsfem.cz/)** as **[ANSYS Channel partner](http://www.ansys.com/)** for Czech Republic provided all ANSYS licenses for ANSELM cluster and supports of all ANSYS Products (Multiphysics, Mechanical, MAPDL, CFX, Fluent, Maxwell, LS-DYNA...) to IT staff and ANSYS users. If you are challenging to problem of ANSYS functionality contact [hotline@svsfem.cz](mailto:hotline@svsfem.cz?subject=Ostrava%20-%20ANSELM) -Anselm provides commercial as well as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of license or by two letter preposition "**aa\_**" in the license feature name. Change of license is realized on command line respectively directly in user's PBS file (see individual products). [More about licensing here](licensing/) +Anselm provides commercial as well as academic variants. Academic variants are distinguished by "**Academic...**" word in the name of license or by two letter preposition "**aa\_**" in the license feature name. Change of license is realized on command line respectively directly in user's PBS file (see individual products). To load the latest version of any ANSYS product (Mechanical, Fluent, CFX, MAPDL,...) load the module: diff --git a/docs.it4i/software/tools/ansys/licensing.md b/docs.it4i/software/tools/ansys/licensing.md index 3792aa386..98884d9e7 100644 --- a/docs.it4i/software/tools/ansys/licensing.md +++ b/docs.it4i/software/tools/ansys/licensing.md @@ -17,9 +17,5 @@ The licence intended to be used for science and research, publications, students ## Available Versions * 16.1 -* 17.0 * 18.0 - -## License Preferences - -Please [see this page to set license preferences](setting-license-preferences/). +* 19.1 diff --git a/docs.it4i/software/tools/ansys/ls-dyna.md b/docs.it4i/software/tools/ansys/ls-dyna.md index 8492ddbf7..43c818c2c 100644 --- a/docs.it4i/software/tools/ansys/ls-dyna.md +++ b/docs.it4i/software/tools/ansys/ls-dyna.md @@ -30,6 +30,6 @@ ml lsdyna /apps/engineering/lsdyna/lsdyna700s i=input.k ``` -Header of the PBS file (above) is common and description can be find on [this site](../../../anselm/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. +Header of the PBS file (above) is common and description can be find on [this site](anselm/job-submission-and-execution/). [SVS FEM](http://www.svsfem.cz) recommends to utilize sources by keywords: nodes, ppn. These keywords allows to address directly the number of nodes (computers) and cores (ppn) which will be utilized in the job. Also the rest of code assumes such structure of allocated resources. Working directory has to be created before sending PBS job into the queue. Input file should be in working directory or full path to input file has to be specified. Input file has to be defined by common LS-DYNA **.k** file which is attached to the LS-DYNA solver via parameter i= diff --git a/docs.it4i/software/tools/ansys/setting-license-preferences.md b/docs.it4i/software/tools/ansys/setting-license-preferences.md index e758895a4..cd7aac3f5 100644 --- a/docs.it4i/software/tools/ansys/setting-license-preferences.md +++ b/docs.it4i/software/tools/ansys/setting-license-preferences.md @@ -12,12 +12,12 @@ $ANSYSLIC_DIR/lic_admin/anslic_admin ANSLIC_ADMIN Utility will be run - + - + - + ANSYS Academic Research license should be moved up to the top or down to the bottom of the list. - + diff --git a/docs.it4i/software/tools/ansys/workbench.md b/docs.it4i/software/tools/ansys/workbench.md index cdbc3c799..7d2e663ac 100644 --- a/docs.it4i/software/tools/ansys/workbench.md +++ b/docs.it4i/software/tools/ansys/workbench.md @@ -4,7 +4,7 @@ It is possible to run Workbench scripts in batch mode. You need to configure solvers of individual components to run in parallel mode. Open your project in Workbench. Then, for example, in Mechanical, go to Tools - Solve Process Settings ... - + Enable Distribute Solution checkbox and enter number of cores (e.g. 48 to run on two Salomon nodes). If you want the job to run on more then 1 node, you must also provide a so called MPI appfile. In the Additional Command Line Arguments input field, enter: diff --git a/docs.it4i/software/tools/singularity-it4i.md b/docs.it4i/software/tools/singularity-it4i.md index 768ed8936..62523b671 100644 --- a/docs.it4i/software/tools/singularity-it4i.md +++ b/docs.it4i/software/tools/singularity-it4i.md @@ -274,4 +274,4 @@ local:$ scp container.img login@login4.salomon.it4i.cz:/home/login/.singularity/ ``` * Load module Singularity (`ml Singularity`) -* Use your image \ No newline at end of file +* Use your image diff --git a/docs.it4i/software/tools/virtualization.md b/docs.it4i/software/tools/virtualization.md index 066d14b9c..9a4e44f7e 100644 --- a/docs.it4i/software/tools/virtualization.md +++ b/docs.it4i/software/tools/virtualization.md @@ -24,7 +24,7 @@ Anselm's virtualization does not provide performance and all features of native Virtualization has also some drawbacks, it is not so easy to setup efficient solution. -Solution described in chapter [HOWTO](virtualization/#howto) is suitable for single node tasks, does not introduce virtual machine clustering. +Solution described in chapter [HOWTO](#howto) is suitable for single node tasks, does not introduce virtual machine clustering. !!! note Please consider virtualization as last resort solution for your needs. @@ -47,7 +47,7 @@ IT4Innovations does not provide any licenses for operating systems and software We propose this job workflow: - + Our recommended solution is that job script creates distinct shared job directory, which makes a central point for data exchange between Anselm's environment, compute node (host) (e.g. HOME, SCRATCH, local scratch and other local or cluster file systems) and virtual machine (guest). Job script links or copies input data and instructions what to do (run script) for virtual machine to job directory and virtual machine process input data according instructions in job directory and store output back to job directory. We recommend, that virtual machine is running in so called [snapshot mode](virtualization/#snapshot-mode), image is immutable - image does not change, so one image can be used for many concurrent jobs. @@ -151,7 +151,7 @@ Example startup script maps shared job script as drive z: and looks for run scri Create job script according recommended -[Virtual Machine Job Workflow](virtualization#virtual-machine-job-workflow). +[Virtual Machine Job Workflow](#virtual-machine-job-workflow). Example job for Windows virtual machine: @@ -203,7 +203,7 @@ Run script runs application from shared job directory (mapped as drive z:), proc ### Run Jobs -Run jobs as usual, see [Resource Allocation and Job Execution](/salomon/job-submission-and-execution/). Use only full node allocation for virtualization jobs. +Run jobs as usual, see [Resource Allocation and Job Execution](salomon/job-submission-and-execution/). Use only full node allocation for virtualization jobs. ### Running Virtual Machines diff --git a/docs.it4i/software/viz/openfoam.md b/docs.it4i/software/viz/openfoam.md index 7765c5472..96e7213a3 100644 --- a/docs.it4i/software/viz/openfoam.md +++ b/docs.it4i/software/viz/openfoam.md @@ -6,7 +6,7 @@ a Free, Open Source CFD Software Package OpenFOAM is a free, open source CFD software package developed by [**OpenCFD Ltd**](http://www.openfoam.com/about) at [**ESI Group**](http://www.esi-group.com/) and distributed by the [**OpenFOAM Foundation **](http://www.openfoam.org/). It has a large user base across most areas of engineering and science, from both commercial and academic organisations. -Homepage: <http://www.openfoam.com/> +Homepage: [http://www.openfoam.com/>](http://www.openfoam.com/>) ### Installed Version @@ -45,7 +45,7 @@ In /opt/modules/modulefiles/engineering you can see installed engineering softwa lsdyna/7.x.x openfoam/2.2.1-gcc481-openmpi1.6.5-SP ``` -For information how to use modules [look here](../../environment-and-modules/). +For information how to use modules [look here](environment-and-modules/). ## Getting Started @@ -112,7 +112,7 @@ Job submission (example for Anselm): $ qsub -A OPEN-0-0 -q qprod -l select=1:ncpus=16,walltime=03:00:00 test.sh ``` -For information about job submission [look here](../../anselm/job-submission-and-execution/). +For information about job submission [look here](anselm/job-submission-and-execution/). ## Running Applications in Parallel diff --git a/docs.it4i/software/viz/paraview.md b/docs.it4i/software/viz/paraview.md index 3ef96099c..f425989b2 100644 --- a/docs.it4i/software/viz/paraview.md +++ b/docs.it4i/software/viz/paraview.md @@ -8,7 +8,7 @@ Open-Source, Multi-Platform Data Analysis and Visualization Application ParaView was developed to analyze extremely large datasets using distributed memory computing resources. It can be run on supercomputers to analyze datasets of exascale size as well as on laptops for smaller data. -Homepage : <http://www.paraview.org/> +Homepage : [http://www.paraview.org/](http://www.paraview.org/) ## Installed Version @@ -16,7 +16,7 @@ Currently, version 5.1.2 compiled with intel/2017a against intel MPI library and ## Usage -On the clusters, ParaView is to be used in client-server mode. A parallel ParaView server is launched on compute nodes by the user, and client is launched on your desktop PC to control and view the visualization. Download ParaView client application for your OS here: <http://paraview.org/paraview/resources/software.php>. +On the clusters, ParaView is to be used in client-server mode. A parallel ParaView server is launched on compute nodes by the user, and client is launched on your desktop PC to control and view the visualization. Download ParaView client application for your OS [here](http://paraview.org/paraview/resources/software.php). !!!Warning Your version must match the version number installed on the cluster. @@ -29,7 +29,7 @@ To launch the server, you must first allocate compute nodes, for example $ qsub -I -q qprod -A OPEN-0-0 -l select=2 ``` -to launch an interactive session on 2 nodes. Refer to [Resource Allocation and Job Execution](../../salomon/job-submission-and-execution/) for details. +to launch an interactive session on 2 nodes. Refer to [Resource Allocation and Job Execution](salomon/job-submission-and-execution/) for details. After the interactive session is opened, load the ParaView module (following examples for Salomon, Anselm instructions in comments): -- GitLab