Skip to content
Snippets Groups Projects
Commit 0ff1fae8 authored by Lukáš Krupčík's avatar Lukáš Krupčík
Browse files

Merge branch 'master' into 'hot_fix'

Master

See merge request sccs/docs.it4i.cz!196
parents cc0f807c a6f4ae2d
No related branches found
No related tags found
No related merge requests found
Showing
with 96 additions and 635 deletions
......@@ -6,25 +6,26 @@ stages:
docs:
stage: test
image: davidhrbac/docker-mdcheck:latest
allow_failure: true
script:
- mdl -r ~MD013,~MD033,~MD014,~MD026,~MD037 *.md docs.it4i/
- mdl -r ~MD013,~MD033,~MD014,~MD026,~MD037,~MD036,~MD010,~MD029 *.md docs.it4i # BUGS
two spaces:
stage: test
image: davidhrbac/docker-mdcheck:latest
allow_failure: true
script:
- echo "== Files having more than one space betwee two characters =="
- find docs.it4i/ -name '*.md' -exec grep "[[:alpha:]] [[:alpha:]]" -l {} + || true
# nefunkcni, zbytecne, bere to i vypisy konzole
#two spaces:
# stage: test
# image: davidhrbac/docker-mdcheck:latest
# allow_failure: true
# before_script:
# - echo "== Files having more than one space betwee two characters =="
# - find docs.it4i/ -name '*.md' ! -path "docs.it4i/software*" -exec grep -nr "[[:alpha:]] [[:alpha:]]" -l {} +
# script:
# - find docs.it4i/ -name '*.md' ! -path "docs.it4i/software*" -exec grep -nr "[[:alpha:]] [[:alpha:]]" -l {} +
capitalize:
stage: test
image: davidhrbac/docker-mkdocscheck:latest
allow_failure: true
# allow_failure: true
script:
- scripts/titlemd_test.py mkdocs.yml
- find docs.it4i/ -name '*.md' -print0 | xargs -0 -n1 scripts/titlemd_test.py
- find mkdocs.yml docs.it4i/ \( -name '*.md' -o -name '*.yml' \) -print0 | xargs -0 -n1 scripts/titlemd_test.py
spell check:
stage: test
......@@ -39,7 +40,7 @@ ext_links:
image: davidhrbac/docker-mdcheck:latest
allow_failure: true
after_script:
# remove JSON results
# remove JSON results
- rm *.json
script:
#- find docs.it4i/ -name '*.md' -exec grep --color -l http {} + | xargs awesome_bot -t 10
......@@ -59,42 +60,47 @@ mkdocs:
stage: build
image: davidhrbac/docker-mkdocscheck:latest
script:
- mkdocs -V
#- apt-get update
#- apt-get -y install git
# add version to footer
- bash scripts/add_version.sh
# get modules list from clusters
# get modules list from clusters
- bash scripts/get_modules.sh
# regenerate modules matrix
- python scripts/modules-matrix.py > docs.it4i/modules-matrix.md
- python scripts/modules-json.py > docs.it4i/modules-matrix.json
- curl -f0 https://scs-test.it4i.cz/devel/apidocs/master/scs_api.server_public.md -o docs.it4i/apiv1.md
# build pages
- mkdocs build
# compress search_index.json
#- bash scripts/clean_json.sh site/mkdocs/search_index.json
# replace broken links in 404.html
- sed -i 's,href="" title=",href="/" title=",g' site/404.html
- cp site/404.html site/403.html
- sed -i 's/404 - Not found/403 - Forbidden/g' site/403.html
# compress sitemap
- gzip < site/sitemap.xml > site/sitemap.xml.gz
- gzip < site/sitemap.xml > site/sitemap.xml.gz
artifacts:
paths:
- site
expire_in: 1 week
shellcheck:
stage: test
image: davidhrbac/docker-shellcheck:latest
allow_failure: true
script:
- which shellcheck || apt-get update && apt-get install -y shellcheck
- find . -name *.sh -not -path "./docs.it4i/*" -not -path "./site/*" -exec shellcheck {} +
## zbytecnost v dokumentaci
#shellcheck:
# stage: test
# image: davidhrbac/docker-shellcheck:latest
# allow_failure: true
# script:
# - which shellcheck || apt-get update && apt-get install -y shellcheck
# - find . -name *.sh -not -path "./docs.it4i/*" -not -path "./site/*" -exec shellcheck {} +
deploy to stage:
environment: stage
environment: stage
stage: deploy
image: davidhrbac/docker-mkdocscheck:latest
before_script:
# install ssh-agent
# install ssh-agent
- 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )'
- 'which rsync || ( apt-get update -y && apt-get install rsync -y )'
# run ssh-agent
......@@ -117,7 +123,7 @@ deploy to production:
stage: deploy
image: davidhrbac/docker-mkdocscheck:latest
before_script:
# install ssh-agent
# install ssh-agent
- 'which ssh-agent || ( apt-get update -y && apt-get install openssh-client -y )'
- 'which rsync || ( apt-get update -y && apt-get install rsync -y )'
# run ssh-agent
......@@ -127,7 +133,7 @@ deploy to production:
# disable host key checking (NOTE: makes you susceptible to man-in-the-middle attacks)
# WARNING: use only in docker container, if you use it with shell you will overwrite your user's ssh config
- mkdir -p ~/.ssh
- echo -e "Host *\n\tStrictHostKeyChecking no\n\n" > ~/.ssh/config
- echo -e "Host *\n\tStrictHostKeyChecking no\n\n" > ~/.ssh/config
- useradd -lM nginx
script:
- chown nginx:nginx site -R
......
......@@ -3,6 +3,14 @@
# global dictionary is at the start, file overrides afterwards
# one word per line, to define a file override use ' - filename'
# where filename is relative to this configuration file
CAE
CUBE
GPU
GSL
LMGC90
LS-DYNA
MAPDL
GPI-2
COM
.ssh
Anselm
......@@ -260,3 +268,5 @@ r37u31n1008
qsub
it4ifree
it4i.portal.clients
x86
x64
\ No newline at end of file
......@@ -29,8 +29,8 @@ Mellanox
### Formulas are made with:
* https://facelessuser.github.io/pymdown-extensions/extensions/arithmatex/
* https://www.mathjax.org/
* [https://facelessuser.github.io/pymdown-extensions/extensions/arithmatex/](https://facelessuser.github.io/pymdown-extensions/extensions/arithmatex/)
* [https://www.mathjax.org/](https://www.mathjax.org/)
You can add formula to page like this:
......
# Capacity computing
# Capacity Computing
## Introduction
......
......@@ -21,7 +21,7 @@ Anselm is cluster of x86-64 Intel based nodes built on Bull Extreme Computing bu
* two Intel Sandy Bridge E5-2470, 8-core, 2.3GHz processors per node
* 96 GB of physical memory per node
* one 500GB SATA 2,5” 7,2 krpm HDD per node
* GPU accelerator 1x NVIDIA Tesla Kepler K20 per node
* GPU accelerator 1x NVIDIA Tesla Kepler K20m per node
* bullx B515 blade servers
* cn[181-203]
......@@ -52,12 +52,12 @@ Anselm is cluster of x86-64 Intel based nodes built on Bull Extreme Computing bu
### Compute Nodes Summary
| Node type | Count | Range | Memory | Cores | [Access](resources-allocation-policy/) |
| -------------------------- | ----- | ----------- | ------ | ----------- | -------------------------------------- |
| Nodes without accelerator | 180 | cn[1-180] | 64GB | 16 @ 2.4GHz | qexp, qprod, qlong, qfree |
| Nodes with GPU accelerator | 23 | cn[181-203] | 96GB | 16 @ 2.3GHz | qgpu, qexp |
| Nodes with MIC accelerator | 4 | cn[204-207] | 96GB | 16 @ 2.3GHz | qmic, qexp |
| Fat compute nodes | 2 | cn[208-209] | 512GB | 16 @ 2.4GHz | qfat, qexp |
| Node type | Count | Range | Memory | Cores | [Access](resources-allocation-policy/) |
| -------------------------- | ----- | ----------- | ------ | ----------- | -------------------------------------- |
| Nodes without accelerator | 180 | cn[1-180] | 64GB | 16 @ 2.4GHz | qexp, qprod, qlong, qfree, qatlas, qprace |
| Nodes with GPU accelerator | 23 | cn[181-203] | 96GB | 16 @ 2.3GHz | qnvidia, qexp, qatlas |
| Nodes with MIC accelerator | 4 | cn[204-207] | 96GB | 16 @ 2.3GHz | qmic, qexp |
| Fat compute nodes | 2 | cn[208-209] | 512GB | 16 @ 2.4GHz | qfat, qexp |
## Processor Architecture
......
......@@ -42,13 +42,13 @@ The modules may be loaded, unloaded and switched, according to momentary needs.
To check available modules use
```console
$ module avail **or** ml av
$ ml av
```
To load a module, for example the octave module use
```console
$ module load octave **or** ml octave
$ ml octave
```
loading the octave module will set up paths and environment variables of your active shell such that you are ready to run the octave software
......@@ -56,19 +56,13 @@ loading the octave module will set up paths and environment variables of your ac
To check loaded modules use
```console
$ module list **or** ml
$ ml
```
To unload a module, for example the octave module use
```console
$ module unload octave **or** ml -octave
```
Learn more on modules by reading the module man page
```console
$ man module
$ ml -octave
```
Following modules set up the development environment
......@@ -79,10 +73,6 @@ PrgEnv-intel sets up the INTEL development environment in conjunction with the I
## Application Modules Path Expansion
All application modules on Salomon cluster (and further) will be build using tool called [EasyBuild](http://hpcugent.github.io/easybuild/ "EasyBuild"). In case that you want to use some applications that are build by EasyBuild already, you have to modify your MODULEPATH environment variable.
```console
export MODULEPATH=$MODULEPATH:/apps/easybuild/modules/all/
```
All application modules on Anselm cluster (and further) will be build using tool called [EasyBuild](http://hpcugent.github.io/easybuild/ "EasyBuild").
This command expands your searched paths to modules. You can also add this command to the .bashrc file to expand paths permanently. After this command, you can use same commands to list/add/remove modules as is described above.
# Hardware Overview
The Anselm cluster consists of 209 computational nodes named cn[1-209] of which 180 are regular compute nodes, 23 GPU Kepler K20 accelerated nodes, 4 MIC Xeon Phi 5110P accelerated nodes and 2 fat nodes. Each node is a powerful x86-64 computer, equipped with 16 cores (two eight-core Intel Sandy Bridge processors), at least 64 GB RAM, and local hard drive. The user access to the Anselm cluster is provided by two login nodes login[1,2]. The nodes are interlinked by high speed InfiniBand and Ethernet networks. All nodes share 320 TB /home disk storage to store the user files. The 146 TB shared /scratch storage is available for the scratch data.
The Anselm cluster consists of 209 computational nodes named cn[1-209] of which 180 are regular compute nodes, 23 GPU Kepler K20m accelerated nodes, 4 MIC Xeon Phi 5110P accelerated nodes and 2 fat nodes. Each node is a powerful x86-64 computer, equipped with 16 cores (two eight-core Intel Sandy Bridge processors), at least 64 GB RAM, and local hard drive. The user access to the Anselm cluster is provided by two login nodes login[1,2]. The nodes are interlinked by high speed InfiniBand and Ethernet networks. All nodes share 320 TB /home disk storage to store the user files. The 146 TB shared /scratch storage is available for the scratch data.
The Fat nodes are equipped with large amount (512 GB) of memory. Virtualization infrastructure provides resources to run long term servers and services in virtual mode. Fat nodes and virtual servers may access 45 TB of dedicated block storage. Accelerated nodes, fat nodes, and virtualization infrastructure are available [upon request](https://support.it4i.cz/rt) made by a PI.
The Fat nodes are equipped with large amount (512 GB) of memory. Fat nodes may access 45 TB of dedicated block storage. Accelerated nodes, fat nodes are available [upon request](https://support.it4i.cz/rt) made by a PI.
Schematic representation of the Anselm cluster. Each box represents a node (computer) or storage capacity:
......@@ -13,7 +13,7 @@ The cluster compute nodes cn[1-207] are organized within 13 chassis.
There are four types of compute nodes:
* 180 compute nodes without the accelerator
* 23 compute nodes with GPU accelerator - equipped with NVIDIA Tesla Kepler K20
* 23 compute nodes with GPU accelerator - equipped with NVIDIA Tesla Kepler K20m
* 4 compute nodes with MIC accelerator - equipped with Intel Xeon Phi 5110P
* 2 fat nodes - equipped with 512 GB RAM and two 100 GB SSD drives
......@@ -34,7 +34,7 @@ The parameters are summarized in the following tables:
| ------------------------------------------- | -------------------------------------------- |
| Primary purpose | High Performance Computing |
| Architecture of compute nodes | x86-64 |
| Operating system | Linux |
| Operating system | Linux (CentOS) |
| [**Compute nodes**](compute-nodes/) | |
| Totally | 209 |
| Processor cores | 16 (2 x 8 cores) |
......@@ -53,7 +53,7 @@ The parameters are summarized in the following tables:
| Node | Processor | Memory | Accelerator |
| ---------------- | --------------------------------------- | ------ | -------------------- |
| w/o accelerator | 2 x Intel Sandy Bridge E5-2665, 2.4 GHz | 64 GB | - |
| GPU accelerated | 2 x Intel Sandy Bridge E5-2470, 2.3 GHz | 96 GB | NVIDIA Kepler K20 |
| GPU accelerated | 2 x Intel Sandy Bridge E5-2470, 2.3 GHz | 96 GB | NVIDIA Kepler K20m |
| MIC accelerated | 2 x Intel Sandy Bridge E5-2470, 2.3 GHz | 96 GB | Intel Xeon Phi 5110P |
| Fat compute node | 2 x Intel Sandy Bridge E5-2665, 2.4 GHz | 512 GB | - |
......
# Job scheduling
# Job Scheduling
## Job Execution Priority
......@@ -54,7 +54,7 @@ Job execution priority (job sort formula) is calculated as:
---8<--- "job_sort_formula.md"
### Job backfilling
### Job Backfilling
Anselm cluster uses job backfilling.
......
# Job submission and execution
# Job Submission and Execution
## Job Submission
......@@ -324,10 +324,10 @@ cp $PBS_O_WORKDIR/input .
cp $PBS_O_WORKDIR/mympiprog.x .
# load the mpi module
module load openmpi
ml OpenMPI
# execute the calculation
mpiexec -pernode ./mympiprog.x
mpirun -pernode ./mympiprog.x
# copy output file to home
cp output $PBS_O_WORKDIR/.
......@@ -362,10 +362,10 @@ SCRDIR=/scratch/$USER/myjob
cd $SCRDIR || exit
# load the mpi module
module load openmpi
ml OpenMPI
# execute the calculation
mpiexec ./mympiprog.x
mpirun ./mympiprog.x
#exit
exit
......
......@@ -210,7 +210,7 @@ All system wide installed software on the cluster is made available to the users
PRACE users can use the "prace" module to use the [PRACE Common Production Environment](http://www.prace-ri.eu/prace-common-production-environment/).
```console
$ module load prace
$ ml prace
```
### Resource Allocation and Job Execution
......
# Remote visualization service
# Remote Visualization Service
## Introduction
......
# Resource Allocation and Job Execution
To run a [job](ob-submission-and-execution/), [computational resources](resources-allocation-policy/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [official documentation here](../pbspro/), especially in the PBS Pro User's Guide.
To run a [job](job-submission-and-execution/), [computational resources](resources-allocation-policy/) for this particular job must be allocated. This is done via the PBS Pro job workload manager software, which efficiently distributes workloads across the supercomputer. Extensive information about PBS Pro can be found in the [official documentation here](../pbspro/), especially in the PBS Pro User's Guide.
## Resources Allocation Policy
......
# Resources Allocation Policy
## Introduction
## Job Queue Policies
The resources are allocated to the job in a fair-share fashion, subject to constraints set by the queue and resources available to the Project. The Fair-share at Anselm ensures that individual users may consume approximately equal amount of resources per week. Detailed information in the [Job scheduling](job-priority/) section. The resources are accessible via several queues for queueing the jobs. The queues provide prioritized and exclusive access to the computational resources. Following table provides the queue partitioning overview:
......@@ -9,7 +9,7 @@ The resources are allocated to the job in a fair-share fashion, subject to const
| queue | active project | project resources | nodes | min ncpus | priority | authorization | walltime |
| ------------------- | -------------- | ----------------- | ---------------------------------------------------- | --------- | -------- | ------------- | -------- |
| qexp | no | none required | 2 reserved, 31 totalincluding MIC, GPU and FAT nodes | 1 | 150 | no | 1 h |
| qexp | no | none required | 2 reserved, 31 totalincluding MIC, GPU | 1 | 150 | no | 1 h |
| qprod | yes | 0 | 178 nodes w/o accelerator | 16 | 0 | no | 24/48 h |
| qlong | yes | 0 | 60 nodes w/o accelerator | 16 | 0 | no | 72/144 h |
| qnvidia, qmic | yes | 0 | 23 nvidia nodes, 4 mic nodes | 16 | 200 | yes | 24/48 h |
......@@ -21,13 +21,13 @@ The resources are allocated to the job in a fair-share fashion, subject to const
**The qexp queue is equipped with the nodes not having the very same CPU clock speed.** Should you need the very same CPU speed, you have to select the proper nodes during the PSB job submission.
* **qexp**, the Express queue: This queue is dedicated for testing and running very small jobs. It is not required to specify a project to enter the qexp. There are 2 nodes always reserved for this queue (w/o accelerator), maximum 8 nodes are available via the qexp for a particular user, from a pool of nodes containing Nvidia accelerated nodes (cn181-203), MIC accelerated nodes (cn204-207) and Fat nodes with 512GB RAM (cn208-209). This enables to test and tune also accelerated code or code with higher RAM requirements. The nodes may be allocated on per core basis. No special authorization is required to use it. The maximum runtime in qexp is 1 hour.
* **qexp**, the Express queue: This queue is dedicated for testing and running very small jobs. It is not required to specify a project to enter the qexp. There are 2 nodes always reserved for this queue (w/o accelerator), maximum 8 nodes are available via the qexp for a particular user, from a pool of nodes containing Nvidia accelerated nodes (cn181-203), MIC accelerated nodes (cn204-207). This enables to test and tune also accelerated code. The nodes may be allocated on per core basis. No special authorization is required to use it. The maximum runtime in qexp is 1 hour.
* **qprod**, the Production queue: This queue is intended for normal production runs. It is required that active project with nonzero remaining resources is specified to enter the qprod. All nodes may be accessed via the qprod queue, except the reserved ones. 178 nodes without accelerator are included. Full nodes, 16 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qprod is 48 hours.
* **qlong**, the Long queue: This queue is intended for long production runs. It is required that active project with nonzero remaining resources is specified to enter the qlong. Only 60 nodes without acceleration may be accessed via the qlong queue. Full nodes, 16 cores per node are allocated. The queue runs with medium priority and no special authorization is required to use it. The maximum runtime in qlong is 144 hours (three times of the standard qprod time - 3 x 48 h).
* **qnvidia**, qmic, qfat, the Dedicated queues: The queue qnvidia is dedicated to access the Nvidia accelerated nodes, the qmic to access MIC nodes and qfat the Fat nodes. It is required that active project with nonzero remaining resources is specified to enter these queues. 23 nvidia, 4 mic and 2 fat nodes are included. Full nodes, 16 cores per node are allocated. The queues run with very high priority, the jobs will be scheduled before the jobs coming from the qexp queue. An PI needs explicitly ask [support](https://support.it4i.cz/rt/) for authorization to enter the dedicated queues for all users associated to her/his Project.
* **qnvidia**, **qmic**, **qfat**, the Dedicated queues: The queue qnvidia is dedicated to access the Nvidia accelerated nodes, the qmic to access MIC nodes and qfat the Fat nodes. It is required that active project with nonzero remaining resources is specified to enter these queues. 23 nvidia, 4 mic and 2 fat nodes are included. Full nodes, 16 cores per node are allocated. The queues run with very high priority, the jobs will be scheduled before the jobs coming from the qexp queue. An PI needs explicitly ask [support](https://support.it4i.cz/rt/) for authorization to enter the dedicated queues for all users associated to her/his Project.
* **qfree**, The Free resource queue: The queue qfree is intended for utilization of free resources, after a Project exhausted all its allocated computational resources (Does not apply to DD projects by default. DD projects have to request for persmission on qfree after exhaustion of computational resources.). It is required that active project is specified to enter the queue, however no remaining resources are required. Consumed resources will be accounted to the Project. Only 178 nodes without accelerator may be accessed from this queue. Full nodes, 16 cores per node are allocated. The queue runs with very low priority and no special authorization is required to use it. The maximum runtime in qfree is 12 hours.
### Notes
## Queue Notes
The job wall clock time defaults to **half the maximum time**, see table above. Longer wall time limits can be [set manually, see examples](job-submission-and-execution/).
......@@ -35,7 +35,7 @@ Jobs that exceed the reserved wall clock time (Req'd Time) get killed automatica
Anselm users may check current queue configuration at <https://extranet.it4i.cz/anselm/queues>.
### Queue Status
## Queue Status
!!! tip
Check the status of jobs, queues and compute nodes at <https://extranet.it4i.cz/anselm/>
......@@ -106,24 +106,6 @@ Options:
--incl-finished Include finished jobs
```
## Resources Accounting Policy
---8<--- "resource_accounting.md"
### Core-Hours
The resources that are currently subject to accounting are the core-hours. The core-hours are accounted on the wall clock basis. The accounting runs whenever the computational cores are allocated or blocked via the PBS Pro workload manager (the qsub command), regardless of whether the cores are actually used for any calculation. 1 core-hour is defined as 1 processor core allocated for 1 hour of wall clock time. Allocating a full node (16 cores) for 1 hour accounts to 16 core-hours. See example in the [Job submission and execution](job-submission-and-execution/) section.
### Check Consumed Resources
!!! note
The **it4ifree** command is a part of it4i.portal.clients package, located here: <https://pypi.python.org/pypi/it4i.portal.clients>
User may check at any time, how many core-hours have been consumed by himself/herself and his/her projects. The command is available on clusters' login nodes.
```console
$ it4ifree
Password:
PID Total Used ...by me Free
-------- ------- ------ -------- -------
OPEN-0-0 1500000 400644 225265 1099356
DD-13-1 10000 2606 2606 7394
```
---8<--- "mathjax.md"
......@@ -15,21 +15,28 @@ The authentication is by the [private key](../general/accessing-the-clusters/she
!!! note
Please verify SSH fingerprints during the first logon. They are identical on all login nodes:
md5:
29:b3:f4:64:b0:73:f5:6f:a7:85:0f:e0:0d:be:76:bf (DSA)
d4:6f:5c:18:f4:3f:70:ef:bc:fc:cc:2b:fd:13:36:b7 (RSA)
sha256:
LX2034TYy6Lf0Q7Zf3zOIZuFlG09DaSGROGBz6LBUy4 (DSA)
+DcED3GDoA9piuyvQOho+ltNvwB9SJSYXbB639hbejY (RSA)
Private key authentication:
On **Linux** or **Mac**, use
```console
local $ ssh -i /path/to/id_rsa username@anselm.it4i.cz
$ ssh -i /path/to/id_rsa username@anselm.it4i.cz
```
If you see warning message "UNPROTECTED PRIVATE KEY FILE!", use this command to set lower permissions to private key file.
```console
local $ chmod 600 /path/to/id_rsa
$ chmod 600 /path/to/id_rsa
```
On **Windows**, use [PuTTY ssh client](../general/accessing-the-clusters/shell-access-and-data-transfer/putty.md).
......@@ -82,23 +89,23 @@ To achieve 160MB/s transfer rates, the end user must be connected by 10G line al
On linux or Mac, use scp or sftp client to transfer the data to Anselm:
```console
local $ scp -i /path/to/id_rsa my-local-file username@anselm.it4i.cz:directory/file
$ scp -i /path/to/id_rsa my-local-file username@anselm.it4i.cz:directory/file
```
```console
local $ scp -i /path/to/id_rsa -r my-local-dir username@anselm.it4i.cz:directory
$ scp -i /path/to/id_rsa -r my-local-dir username@anselm.it4i.cz:directory
```
or
```console
local $ sftp -o IdentityFile=/path/to/id_rsa username@anselm.it4i.cz
$ sftp -o IdentityFile=/path/to/id_rsa username@anselm.it4i.cz
```
Very convenient way to transfer files in and out of the Anselm computer is via the fuse filesystem [sshfs](http://linux.die.net/man/1/sshfs)
```console
local $ sshfs -o IdentityFile=/path/to/id_rsa username@anselm.it4i.cz:. mountpoint
$ sshfs -o IdentityFile=/path/to/id_rsa username@anselm.it4i.cz:. mountpoint
```
Using sshfs, the users Anselm home directory will be mounted on your local computer, just like an external disk.
......@@ -143,7 +150,7 @@ It works by tunneling the connection from Anselm back to users workstation and f
Pick some unused port on Anselm login node (for example 6000) and establish the port forwarding:
```console
local $ ssh -R 6000:remote.host.com:1234 anselm.it4i.cz
$ ssh -R 6000:remote.host.com:1234 anselm.it4i.cz
```
In this example, we establish port forwarding between port 6000 on Anselm and port 1234 on the remote.host.com. By accessing localhost:6000 on Anselm, an application will see response of remote.host.com:1234. The traffic will run via users local workstation.
......@@ -183,7 +190,7 @@ Port forwarding is static, each single port is mapped to a particular port on re
To establish local proxy server on your workstation, install and run SOCKS proxy server software. On Linux, sshd demon provides the functionality. To establish SOCKS proxy server listening on port 1080 run:
```console
local $ ssh -D 1080 localhost
$ ssh -D 1080 localhost
```
On Windows, install and run the free, open source [Sock Puppet](http://sockspuppet.com/) server.
......@@ -191,7 +198,7 @@ On Windows, install and run the free, open source [Sock Puppet](http://sockspupp
Once the proxy server is running, establish ssh port forwarding from Anselm to the proxy server, port 1080, exactly as [described above](#port-forwarding-from-login-nodes).
```console
local $ ssh -R 6000:localhost:1080 anselm.it4i.cz
$ ssh -R 6000:localhost:1080 anselm.it4i.cz
```
Now, configure the applications proxy settings to **localhost:6000**. Use port forwarding to access the [proxy server from compute nodes](#port-forwarding-from-compute-nodes) as well.
......
# Molpro
Molpro is a complete system of ab initio programs for molecular electronic structure calculations.
## About Molpro
Molpro is a software package used for accurate ab-initio quantum chemistry calculations. More information can be found at the [official webpage](http://www.molpro.net/).
## License
Molpro software package is available only to users that have a valid license. Please contact support to enable access to Molpro if you have a valid license appropriate for running on our cluster (eg. academic research group licence, parallel execution).
To run Molpro, you need to have a valid license token present in " $HOME/.molpro/token". You can download the token from [Molpro website](https://www.molpro.net/licensee/?portal=licensee).
## Installed Version
Currently on Anselm is installed version 2010.1, patch level 45, parallel version compiled with Intel compilers and Intel MPI.
Compilation parameters are default:
| Parameter | Value |
| ---------------------------------- | ------------ |
| max number of atoms | 200 |
| max number of valence orbitals | 300 |
| max number of basis functions | 4095 |
| max number of states per symmmetry | 20 |
| max number of state symmetries | 16 |
| max number of records | 200 |
| max number of primitives | maxbfn x [2] |
## Running
Molpro is compiled for parallel execution using MPI and OpenMP. By default, Molpro reads the number of allocated nodes from PBS and launches a data server on one node. On the remaining allocated nodes, compute processes are launched, one process per node, each with 16 threads. You can modify this behavior by using -n, -t and helper-server options. Please refer to the [Molpro documentation](http://www.molpro.net/info/2010.1/doc/manual/node9.html) for more details.
!!! note
The OpenMP parallelization in Molpro is limited and has been observed to produce limited scaling. We therefore recommend to use MPI parallelization only. This can be achieved by passing option mpiprocs=16:ompthreads=1 to PBS.
You are advised to use the -d option to point to a directory in [SCRATCH file system](../../storage/storage/). Molpro can produce a large amount of temporary data during its run, and it is important that these are placed in the fast scratch file system.
### Example jobscript
```bash
#PBS -A IT4I-0-0
#PBS -q qprod
#PBS -l select=1:ncpus=16:mpiprocs=16:ompthreads=1
cd $PBS_O_WORKDIR
# load Molpro module
module add molpro
# create a directory in the SCRATCH filesystem
mkdir -p /scratch/$USER/$PBS_JOBID
# copy an example input
cp /apps/chem/molpro/2010.1/molprop_2010_1_Linux_x86_64_i8/examples/caffeine_opt_diis.com .
# run Molpro with default options
molpro -d /scratch/$USER/$PBS_JOBID caffeine_opt_diis.com
# delete scratch directory
rm -rf /scratch/$USER/$PBS_JOBID
```
# NWChem
## Introduction
NWChem aims to provide its users with computational chemistry tools that are scalable both in their ability to treat large scientific computational chemistry problems efficiently, and in their use of available parallel computing resources from high-performance parallel supercomputers to conventional workstation clusters.
[Homepage](http://www.nwchem-sw.org/index.php/Main_Page)
## Installed Versions
The following versions are currently installed:
* 6.1.1, not recommended, problems have been observed with this version
* 6.3-rev2-patch1, current release with QMD patch applied. Compiled with Intel compilers, MKL and Intel MPI
* 6.3-rev2-patch1-openmpi, same as above, but compiled with OpenMPI and NWChem provided BLAS instead of MKL. This version is expected to be slower
* 6.3-rev2-patch1-venus, this version contains only libraries for VENUS interface linking. Does not provide standalone NWChem executable
For a current list of installed versions, execute:
```console
$ ml av nwchem
```
## Running
NWChem is compiled for parallel MPI execution. Normal procedure for MPI jobs applies. Sample jobscript:
```bash
#PBS -A IT4I-0-0
#PBS -q qprod
#PBS -l select=1:ncpus=16
module add nwchem/6.3-rev2-patch1
mpirun -np 16 nwchem h2o.nw
```
## Options
Please refer to [the documentation](http://www.nwchem-sw.org/index.php/Release62:Top-level) and in the input file set the following directives :
* MEMORY : controls the amount of memory NWChem will use
* SCRATCH_DIR : set this to a directory in [SCRATCH file system](../../storage/storage/#scratch) (or run the calculation completely in a scratch directory). For certain calculations, it might be advisable to reduce I/O by forcing "direct" mode, e.g.. "scf direct"
# Compilers
## Available Compilers, Including GNU, INTEL, and UPC Compilers
Currently there are several compilers for different programming languages available on the Anselm cluster:
* C/C++
* Fortran 77/90/95
* Unified Parallel C
* Java
* NVIDIA CUDA
The C/C++ and Fortran compilers are divided into two main groups GNU and Intel.
## Intel Compilers
For information about the usage of Intel Compilers and other Intel products, please read the [Intel Parallel studio](intel-suite/) page.
## GNU C/C++ and Fortran Compilers
For compatibility reasons there are still available the original (old 4.4.6-4) versions of GNU compilers as part of the OS. These are accessible in the search path by default.
It is strongly recommended to use the up to date version (4.8.1) which comes with the module gcc:
```console
$ ml gcc
$ gcc -v
$ g++ -v
$ gfortran -v
```
With the module loaded two environment variables are predefined. One for maximum optimizations on the Anselm cluster architecture, and the other for debugging purposes:
```console
$ echo $OPTFLAGS
-O3 -march=corei7-avx
$ echo $DEBUGFLAGS
-O0 -g
```
For more information about the possibilities of the compilers, please see the man pages.
## Unified Parallel C
UPC is supported by two compiler/runtime implementations:
* GNU - SMP/multi-threading support only
* Berkley - multi-node support as well as SMP/multi-threading support
### GNU UPC Compiler
To use the GNU UPC compiler and run the compiled binaries use the module gupc
```console
$ module add gupc
$ gupc -v
$ g++ -v
```
Simple program to test the compiler
```console
$ cat count.upc
/* hello.upc - a simple UPC example */
#include <upc.h>
#include <stdio.h>
int main() {
if (MYTHREAD == 0) {
printf("Welcome to GNU UPC!!!n");
}
upc_barrier;
printf(" - Hello from thread %in", MYTHREAD);
return 0;
}
```
To compile the example use
```console
$ gupc -o count.upc.x count.upc
```
To run the example with 5 threads issue
```console
$ ./count.upc.x -fupc-threads-5
```
For more information see the man pages.
### Berkley UPC Compiler
To use the Berkley UPC compiler and runtime environment to run the binaries use the module bupc
```console
$ module add bupc
$ upcc -version
```
As default UPC network the "smp" is used. This is very quick and easy way for testing/debugging, but limited to one node only.
For production runs, it is recommended to use the native Infiband implementation of UPC network "ibv". For testing/debugging using multiple nodes, the "mpi" UPC network is recommended.
!!! warning
Selection of the network is done at the compile time and not at runtime (as expected)!
Example UPC code:
```console
$ cat hello.upc
/* hello.upc - a simple UPC example */
#include <upc.h>
#include <stdio.h>
int main() {
if (MYTHREAD == 0) {
printf("Welcome to Berkeley UPC!!!n");
}
upc_barrier;
printf(" - Hello from thread %in", MYTHREAD);
return 0;
}
```
To compile the example with the "ibv" UPC network use
```console
$ upcc -network=ibv -o hello.upc.x hello.upc
```
To run the example with 5 threads issue
```console
$ upcrun -n 5 ./hello.upc.x
```
To run the example on two compute nodes using all 32 cores, with 32 threads, issue
```console
$ qsub -I -q qprod -A PROJECT_ID -l select=2:ncpus=16
$ module add bupc
$ upcrun -n 32 ./hello.upc.x
```
For more information see the man pages.
## Java
For information how to use Java (runtime and/or compiler), please read the [Java page](java/).
## NVIDIA CUDA
For information on how to work with NVIDIA CUDA, please read the [NVIDIA CUDA page](nvidia-cuda/).
# COMSOL Multiphysics
## Introduction
[COMSOL](http://www.comsol.com) is a powerful environment for modelling and solving various engineering and scientific problems based on partial differential equations. COMSOL is designed to solve coupled or multiphysics phenomena. For many
standard engineering problems COMSOL provides add-on products such as electrical, mechanical, fluid flow, and chemical
applications.
* [Structural Mechanics Module](http://www.comsol.com/structural-mechanics-module),
* [Heat Transfer Module](http://www.comsol.com/heat-transfer-module),
* [CFD Module](http://www.comsol.com/cfd-module),
* [Acoustics Module](http://www.comsol.com/acoustics-module),
* and [many others](http://www.comsol.com/products)
COMSOL also allows an interface support for equation-based modelling of partial differential equations.
## Execution
On the Anselm cluster COMSOL is available in the latest stable version. There are two variants of the release:
* **Non commercial** or so called **EDU variant**, which can be used for research and educational purposes.
* **Commercial** or so called **COM variant**, which can used also for commercial activities. **COM variant** has only subset of features compared to the **EDU variant** available. More about licensing will be posted here soon.
To load the of COMSOL load the module
```console
$ ml comsol
```
By default the **EDU variant** will be loaded. If user needs other version or variant, load the particular version. To obtain the list of available versions use
```console
$ ml av comsol
```
If user needs to prepare COMSOL jobs in the interactive mode it is recommend to use COMSOL on the compute nodes via PBS Pro scheduler. In order run the COMSOL Desktop GUI on Windows is recommended to use the Virtual Network Computing (VNC).
```console
$ xhost +
$ qsub -I -X -A PROJECT_ID -q qprod -l select=1:ncpus=16
$ ml comsol
$ comsol
```
To run COMSOL in batch mode, without the COMSOL Desktop GUI environment, user can utilized the default (comsol.pbs) job script and execute it via the qsub command.
```bash
#!/bin/bash
#PBS -l select=3:ncpus=16
#PBS -q qprod
#PBS -N JOB_NAME
#PBS -A PROJECT_ID
cd /scratch/$USER/ || exit
echo Time is `date`
echo Directory is `pwd`
echo '**PBS_NODEFILE***START*******'
cat $PBS_NODEFILE
echo '**PBS_NODEFILE***END*********'
text_nodes < cat $PBS_NODEFILE
module load comsol
# module load comsol/43b-COM
ntask=$(wc -l $PBS_NODEFILE)
comsol -nn ${ntask} batch -configuration /tmp –mpiarg –rmk –mpiarg pbs -tmpdir /scratch/$USER/ -inputfile name_input_f.mph -outputfile name_output_f.mph -batchlog name_log_f.log
```
Working directory has to be created before sending the (comsol.pbs) job script into the queue. Input file (name_input_f.mph) has to be in working directory or full path to input file has to be specified. The appropriate path to the temp directory of the job has to be set by command option (-tmpdir).
## LiveLink for MATLAB
COMSOL is the software package for the numerical solution of the partial differential equations. LiveLink for MATLAB allows connection to the COMSOL API (Application Programming Interface) with the benefits of the programming language and computing environment of the MATLAB.
LiveLink for MATLAB is available in both **EDU** and **COM** **variant** of the COMSOL release. On Anselm 1 commercial (**COM**) license and the 5 educational (**EDU**) licenses of LiveLink for MATLAB (please see the [ISV Licenses](isv_licenses/)) are available.
Following example shows how to start COMSOL model from MATLAB via LiveLink in the interactive mode.
```console
$ xhost +
$ qsub -I -X -A PROJECT_ID -q qexp -l select=1:ncpus=16
$ ml matlab
$ ml comsol
$ comsol server matlab
```
At the first time to launch the LiveLink for MATLAB (client-MATLAB/server-COMSOL connection) the login and password is requested and this information is not requested again.
To run LiveLink for MATLAB in batch mode with (comsol_matlab.pbs) job script you can utilize/modify the following script and execute it via the qsub command.
```bash
#!/bin/bash
#PBS -l select=3:ncpus=16
#PBS -q qprod
#PBS -N JOB_NAME
#PBS -A PROJECT_ID
cd /scratch/$USER || exit
echo Time is `date`
echo Directory is `pwd`
echo '**PBS_NODEFILE***START*******'
cat $PBS_NODEFILE
echo '**PBS_NODEFILE***END*********'
text_nodes < cat $PBS_NODEFILE
module load matlab
module load comsol/43b-EDU
ntask=$(wc -l $PBS_NODEFILE)
comsol -nn ${ntask} server -configuration /tmp -mpiarg -rmk -mpiarg pbs -tmpdir /scratch/$USER &
cd /apps/engineering/comsol/comsol43b/mli
matlab -nodesktop -nosplash -r "mphstart; addpath /scratch/$USER; test_job"
```
This example shows, how to run LiveLink for MATLAB with following configuration: 3 nodes and 16 cores per node. Working directory has to be created before submitting (comsol_matlab.pbs) job script into the queue. Input file (test_job.m) has to be in working directory or full path to input file has to be specified. The MATLAB command option (-r ”mphstart”) created a connection with a COMSOL server using the default port number.
# Allinea Forge (DDT,MAP)
Allinea Forge consist of two tools - debugger DDT and profiler MAP.
Allinea DDT, is a commercial debugger primarily for debugging parallel MPI or OpenMP programs. It also has a support for GPU (CUDA) and Intel Xeon Phi accelerators. DDT provides all the standard debugging features (stack trace, breakpoints, watches, view variables, threads etc.) for every thread running as part of your program, or for every process - even if these processes are distributed across a cluster using an MPI implementation.
Allinea MAP is a profiler for C/C++/Fortran HPC codes. It is designed for profiling parallel code, which uses pthreads, OpenMP or MPI.
## License and Limitations for Anselm Users
On Anselm users can debug OpenMP or MPI code that runs up to 64 parallel processes. In case of debugging GPU or Xeon Phi accelerated codes the limit is 8 accelerators. These limitation means that:
* 1 user can debug up 64 processes, or
* 32 users can debug 2 processes, etc.
In case of debugging on accelerators:
* 1 user can debug on up to 8 accelerators, or
* 8 users can debug on single accelerator.
## Compiling Code to Run With DDT
### Modules
Load all necessary modules to compile the code. For example:
```console
$ ml intel
$ ml impi ... or ... module load openmpi/X.X.X-icc
```
Load the Allinea DDT module:
```console
$ ml Forge
```
Compile the code:
```console
$ mpicc -g -O0 -o test_debug test.c
$ mpif90 -g -O0 -o test_debug test.f
```
### Compiler Flags
Before debugging, you need to compile your code with theses flags:
!!! note
\* **g** : Generates extra debugging information usable by GDB. -g3 includes even more debugging information. This option is available for GNU and INTEL C/C++ and Fortran compilers.
\* **O0** : Suppress all optimizations.
## Starting a Job With DDT
Be sure to log in with an X window forwarding enabled. This could mean using the -X in the ssh:
```console
$ ssh -X username@anselm.it4i.cz
```
Other options is to access login node using VNC. Please see the detailed information on how to [use graphic user interface on Anselm](/general/accessing-the-clusters/graphical-user-interface/x-window-system/)
From the login node an interactive session **with X windows forwarding** (-X option) can be started by following command:
```console
$ qsub -I -X -A NONE-0-0 -q qexp -lselect=1:ncpus=16:mpiprocs=16,walltime=01:00:00
```
Then launch the debugger with the ddt command followed by the name of the executable to debug:
```console
$ ddt test_debug
```
A submission window that appears have a prefilled path to the executable to debug. You can select the number of MPI processors and/or OpenMP threads on which to run and press run. Command line arguments to a program can be entered to the "Arguments " box.
![](../../../img/ddt1.png)
To start the debugging directly without the submission window, user can specify the debugging and execution parameters from the command line. For example the number of MPI processes is set by option "-np 4". Skipping the dialog is done by "-start" option. To see the list of the "ddt" command line parameters, run "ddt --help".
```console
ddt -start -np 4 ./hello_debug_impi
```
## Documentation
Users can find original User Guide after loading the DDT module:
```console
$DDTPATH/doc/userguide.pdf
```
[1] Discipline, Magic, Inspiration and Science: Best Practice Debugging with Allinea DDT, Workshop conducted at LLNL by Allinea on May 10, 2013, [link](https://computing.llnl.gov/tutorials/allineaDDT/index.html)
# Allinea Performance Reports
## Introduction
Allinea Performance Reports characterize the performance of HPC application runs. After executing your application through the tool, a synthetic HTML report is generated automatically, containing information about several metrics along with clear behavior statements and hints to help you improve the efficiency of your runs.
The Allinea Performance Reports is most useful in profiling MPI programs.
Our license is limited to 64 MPI processes.
## Modules
Allinea Performance Reports version 6.0 is available
```console
$ ml PerformanceReports/6.0
```
The module sets up environment variables, required for using the Allinea Performance Reports. This particular command loads the default module, which is performance reports version 4.2.
## Usage
!!! note
Use the the perf-report wrapper on your (MPI) program.
Instead of [running your MPI program the usual way](../mpi/), use the the perf report wrapper:
```console
$ perf-report mpirun ./mympiprog.x
```
The mpi program will run as usual. The perf-report creates two additional files, in \*.txt and \*.html format, containing the performance report. Note that [demanding MPI codes should be run within the queue system](../../job-submission-and-execution/).
## Example
In this example, we will be profiling the mympiprog.x MPI program, using Allinea performance reports. Assume that the code is compiled with Intel compilers and linked against Intel MPI library:
First, we allocate some nodes via the express queue:
```console
$ qsub -q qexp -l select=2:ncpus=16:mpiprocs=16:ompthreads=1 -I
qsub: waiting for job 262197.dm2 to start
qsub: job 262197.dm2 ready
```
Then we load the modules and run the program the usual way:
```console
$ ml intel impi allinea-perf-report/4.2
$ mpirun ./mympiprog.x
```
Now lets profile the code:
```console
$ perf-report mpirun ./mympiprog.x
```
Performance report files [mympiprog_32p\*.txt](../../../src/mympiprog_32p_2014-10-15_16-56.txt) and [mympiprog_32p\*.html](../../../src/mympiprog_32p_2014-10-15_16-56.html) were created. We can see that the code is very efficient on MPI and is CPU bounded.
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment