Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision

Target

Select target project
  • sccs/docs.it4i.cz
  • soj0018/docs.it4i.cz
  • lszustak/docs.it4i.cz
  • jarosjir/docs.it4i.cz
  • strakpe/docs.it4i.cz
  • beranekj/docs.it4i.cz
  • tab0039/docs.it4i.cz
  • davidciz/docs.it4i.cz
  • gui0013/docs.it4i.cz
  • mrazek/docs.it4i.cz
  • lriha/docs.it4i.cz
  • it4i-vhapla/docs.it4i.cz
  • hol0598/docs.it4i.cz
  • sccs/docs-it-4-i-cz-fumadocs
  • siw019/docs-it-4-i-cz-fumadocs
15 results
Select Git revision
Show changes
Commits on Source (546)
Showing
with 2292 additions and 82 deletions
...@@ -12,7 +12,7 @@ docs: ...@@ -12,7 +12,7 @@ docs:
image: it4innovations/docker-mdcheck:latest image: it4innovations/docker-mdcheck:latest
allow_failure: true allow_failure: true
script: script:
- mdl -r ~MD013,~MD010,~MD014,~MD024,~MD026,~MD029,~MD033,~MD036,~MD037,~MD046 *.md docs.it4i # BUGS - find content/docs -name "*.mdx" | xargs mdl -r ~MD002,~MD007,~MD013,~MD010,~MD014,~MD024,~MD026,~MD029,~MD033,~MD036,~MD037,~MD046
pylint: pylint:
stage: test stage: test
...@@ -22,20 +22,16 @@ pylint: ...@@ -22,20 +22,16 @@ pylint:
script: script:
- pylint $(find . -name "*.py" -not -name "feslicescript.py") - pylint $(find . -name "*.py" -not -name "feslicescript.py")
pysafety: capitalize:
stage: test stage: test
image: it4innovations/docker-pycheck:latest image: it4innovations/docker-mkdocscheck:latest
allow_failure: true allow_failure: true
before_script: before_script:
- source /opt/.venv3/bin/activate - source /opt/.venv3/bin/activate
- python -V # debug
- pip list | grep titlecase
script: script:
- cat requirements.txt | safety check --stdin --full-report - find content/docs/ \( -name '*.mdx' -o -name '*.yml' \) ! -path '*einfracz*' -print0 | xargs -0 -n1 scripts/titlemd.py --test
capitalize:
stage: test
image: it4innovations/docker-mkdocscheck:latest
script:
- find mkdocs.yml docs.it4i/ \( -name '*.md' -o -name '*.yml' \) -print0 | xargs -0 -n1 scripts/titlemd.py --test
ext_links: ext_links:
stage: after_test stage: after_test
...@@ -45,7 +41,7 @@ ext_links: ...@@ -45,7 +41,7 @@ ext_links:
# remove JSON results # remove JSON results
- rm *.json - rm *.json
script: script:
- find docs.it4i/ -name '*.md' -exec grep --color -l http {} + | xargs awesome_bot -t 10 --allow-dupe --allow-redirect - find content/docs -name '*.mdx' -exec grep --color -l http {} + | xargs awesome_bot -t 10 --allow-dupe --allow-redirect
only: only:
- master - master
...@@ -55,8 +51,8 @@ ext_links: ...@@ -55,8 +51,8 @@ ext_links:
before_script: before_script:
- echo "192.168.101.10 docs.it4i.cz" >> /etc/hosts - echo "192.168.101.10 docs.it4i.cz" >> /etc/hosts
- wget -V - wget -V
- echo https://docs.it4i.cz/devel/$CI_BUILD_REF_NAME/ - echo https://docs.it4i.cz/devel/$CI_COMMIT_REF_NAME/
- wget --spider -e robots=off -o wget.log -r -p https://docs.it4i.cz/devel/$CI_BUILD_REF_NAME/ || true - wget --spider -e robots=off -o wget.log -r -p https://docs.it4i.cz/devel/$CI_COMMIT_REF_NAME/ || true
script: script:
- cat wget.log | awk '/^Found [0-9]+ broken link[s]?.$/,/FINISHED/ { rc=-1; print $0 }; END { exit rc }' - cat wget.log | awk '/^Found [0-9]+ broken link[s]?.$/,/FINISHED/ { rc=-1; print $0 }; END { exit rc }'
...@@ -75,7 +71,7 @@ mkdocs: ...@@ -75,7 +71,7 @@ mkdocs:
# get modules list from clusters # get modules list from clusters
- bash scripts/get_modules.sh - bash scripts/get_modules.sh
# generate site_url # generate site_url
- (if [ "${CI_BUILD_REF_NAME}" != 'master' ]; then sed -i "s/\(site_url.*$\)/\1devel\/$CI_BUILD_REF_NAME\//" mkdocs.yml;fi); - (if [ "${CI_COMMIT_REF_NAME}" != 'master' ]; then sed -i "s/\(site_url.*$\)/\1devel\/$CI_COMMIT_REF_NAME\//" mkdocs.yml;fi);
# generate ULT for code link # generate ULT for code link
# - sed -i "s/master/$CI_BUILD_REF_NAME/g" material/partials/toc.html # - sed -i "s/master/$CI_BUILD_REF_NAME/g" material/partials/toc.html
# regenerate modules matrix # regenerate modules matrix
...@@ -113,7 +109,7 @@ deploy to stage: ...@@ -113,7 +109,7 @@ deploy to stage:
- echo -e "Host *\n\tStrictHostKeyChecking no\n\n" > ~/.ssh/config - echo -e "Host *\n\tStrictHostKeyChecking no\n\n" > ~/.ssh/config
script: script:
- chown nginx:nginx site -R - chown nginx:nginx site -R
- rsync -a --delete site/ root@"$SSH_HOST_STAGE":/srv/docs.it4i.cz/devel/$CI_BUILD_REF_NAME/ - rsync -a --delete site/ root@"$SSH_HOST_STAGE":/srv/docs.it4i.cz/devel/$CI_COMMIT_REF_NAME/
only: only:
- branches@sccs/docs.it4i.cz - branches@sccs/docs.it4i.cz
......
Quantum Scalar I6
JAN
LUMI
AI
CI/CD
AWS
CLI
FAQ
s3cmd
GUI
EESSI
hipBlas hipBlas
hipSolver hipSolver
LUMI LUMI
...@@ -822,3 +833,19 @@ e-INFRA CZ ...@@ -822,3 +833,19 @@ e-INFRA CZ
DICE DICE
qgpu qgpu
qcpu qcpu
it4i-portal-clients
it4icheckaccess
it4idedicatedtime
it4ifree
it4ifsusage
it4iuserfsusage
it4iprojectfsusage
it4imotd
e-INFRA
it4i-portal-clients
s3cmd
s5cmd
title:
e-INFRA CZ Cloud Ostrava
e-INFRA CZ Account
# User Documentation # IT4Inovations Documentation
This project contains IT4Innovations user documentation source. This project contains IT4Innovations user documentation source.
## Development ## Migration
### Install * [fumadocs](https://fumadocs.vercel.app/)
\ No newline at end of file
```console
$ sudo apt install libpython-dev
$ virtualenv venv
$ source venv/bin/activate
$ pip install -r requirements.txt
```
### Package Upgrade With pip
```console
$ pip list -o
$ pip install --upgrade package
$ pip freeze | sed '/pkg-resources==/d' > requirements.txt
```
## Environments
* [https://docs.it4i.cz - master branch](https://docs.it4i.cz - master branch)
* [https://docs.it4i.cz/devel/$BRANCH_NAME](https://docs.it4i.cz/devel/$BRANCH_NAME) - maps the branches, available only with VPN access
## URLs
* [http://facelessuser.github.io/pymdown-extensions/](http://facelessuser.github.io/pymdown-extensions/)
* [http://squidfunk.github.io/mkdocs-material/](http://squidfunk.github.io/mkdocs-material/)
```
fair-share
InfiniBand
RedHat
CentOS
Mellanox
```
## Mathematical Formulae
### Formulas Are Made With:
* [https://facelessuser.github.io/pymdown-extensions/extensions/arithmatex/](https://facelessuser.github.io/pymdown-extensions/extensions/arithmatex/)
* [https://www.mathjax.org/](https://www.mathjax.org/)
You can add formula to page like this:
```
$$
MAX\_FAIRSHARE * ( 1 - \frac{usage_{Project}}{usage_{Total}} )
$$
```
To enable the MathJX on page you need to enable it by adding line ```---8<--- "mathjax.md"``` at the end of file.
# SCS API v2
## Info
- **OpenAPI:** 3.1.0
- **Title:** scs-api-2
- **Version:** 0.1.0
- **Server URL:** `https://scs.it4i.cz/api/v2`
## Paths
### `/dedicated-time`
**GET**
- **Summary:** Get dedicated times
- **Description:** Retrieves dedicated time entries, optionally filtered by cluster name or period preset
- **OperationId:** `dedicated_time_handler`
**Parameters:**
- `cluster` (query): Filter by cluster name; Available values: karolina, barbora, dgx *(optional)*
- `period` (query): Filter by time period preset; Available values: planned, active *(optional)*
**Responses:**
- `200`: List of dedicated time entries
- `400`: Failed to deserialize query, Invalid cluster, Invalid period
Example:
```json
{
"message": "Invalid cluster: el_gordo"
}
```
- `500`: Failed to retrieve dedicated time due to a server error
Example:
```json
{
"message": "Failed to retreive dedicated time"
}
```
### `/dedicated-time-calendar`
**GET**
- **Summary:** Get dedicated times
- **Description:** Retrieves dedicated time entries and generates a VCalendar response.
- **OperationId:** `dedicated_time_calendar`
**Responses:**
- `200`: Dedicated time VCalendar
Example:
```
BEGIN:VCALENDAR
VERSION:2.0
PRODID:-//SUTD Timetable Calendar//randName//EN
CALSCALE:GREGORIAN
BEGIN:VEVENT
UID:1234@example.com
DTSTAMP:20230101T000000Z
DTSTART:20230101T000000Z
DTEND:20230102T000000Z
SUMMARY:Sample Dedicated Time - Cluster Outage
DESCRIPTION:Sample Dedicated Time - Cluster Outage
END:VEVENT
END:VCALENDAR
```
- `500`: Failed to retrieve dedicated time calendar
Example:
```json
{
"message": "Failed to retreive dedicated time calendar"
}
```
### `/motd`
**GET**
- **Summary:** Get messages of the day
- **Description:** Retrieves messages of the day, optionally filtered by category
- **OperationId:** `motd`
**Parameters:**
- `category` (query): *(optional)*
**Responses:**
- `200`: List of motd entries
- `400`: Failed to deserialize query, Invalid motd category
- `500`: Failed to retrieve motd entries due to a server error
Example:
```json
{
"message": "Failed to retrieve motd"
}
```
## Components
### Schemas
#### DedicatedTime
```yaml
type: object
required:
- updated_at
properties:
cluster_type:
type: [string, 'null']
date_efficiency:
type: [string, 'null']
format: date-time
date_expiration:
type: [string, 'null']
format: date-time
updated_at:
type: string
format: date-time
```
#### Motd
```yaml
type: object
required:
- id
- author
- category
- created_at
- updated_at
- date_modification
- title
- message_body
- systems
properties:
id:
type: integer
format: int32
examples: [1]
author:
type: string
examples: [Admin]
category:
type: string
examples: [public-service-announcement]
created_at:
type: string
format: date-time
updated_at:
type: string
format: date-time
date_modification:
type: string
format: date-time
date_efficiency:
type: [string, 'null']
format: date-time
date_expiration:
type: [string, 'null']
format: date-time
date_outage_efficiency:
type: [string, 'null']
format: date-time
date_outage_expiration:
type: [string, 'null']
format: date-time
title:
type: string
examples: [Important Update]
message_body:
type: string
examples: [We are experiencing some service disruptions.]
systems:
type: array
items:
type: string
examples: [Karolina]
```
#### MsgResponse
```yaml
type: object
description: |
Common struct for DTO-less responses
eg. ```200 {"message":"Operation succeeded"}```
required:
- message
properties:
message:
type: string
examples: [API response]
```
# Hardware Overview
!!!important Work in progress
Barbora NG documentation is a WIP.
The documentation is still being developed (reflecting changes in technical specifications) and may be updated frequently.
The launch of Barbora NG is planned for October/November.
In the meantime, the first computational resources have already been allocated in the latest Open Access Grant Competition.
Barbora NG consists of 141 non-accelerated compute nodes named **cn[?-???]**.
Each node is a powerful x86-64 computer equipped with 192 cores
(2x Intel Xeon 6952P with 96 CPU cores) and 768 GB RAM.
User access to the Barbora NG cluster is provided by two login nodes **login[1-2]**.
The nodes are interlinked through high speed InfiniBand NDR and Ethernet networks.
The parameters are summarized in the following tables:
| **In general** | |
| ------------------------------------ | --------------------- |
| Architecture of compute nodes | x86-64 |
| Operating system | Linux |
| [**Compute nodes**][1] | |
| Total | 141 |
| Processor Type | [Intel Xeon 6952P][b] |
| Architecture | Granite Rapids |
| Processor cores | 96 |
| Processors per node | 2 |
| RAM | 768 GB |
| Local disk drive | no |
| Compute network | InfiniBand HDR |
| non-accelerated | 141, cn[001-141] |
| **In total** | |
| Theoretical peak performance (Rpeak) | ??? TFLOP/s |
| Cores | 27072 |
| RAM | 108.288 TB |
[1]: compute-nodes.md
[2]: ../general/resources-allocation-policy.md
[3]: network.md
[4]: storage.md
[5]: ../general/shell-and-data-access.md
[6]: visualization.md
[a]: https://support.it4i.cz/rt
[b]: https://www.intel.com/content/www/us/en/products/sku/241643/intel-xeon-6952p-processor-480m-cache-2-10-ghz/specifications.html
\ No newline at end of file
# Introduction
!!!important Work in progress
Barbora NG documentation is a WIP.
The documentation is still being developed (reflecting changes in technical specifications) and may be updated frequently.
The launch of Barbora NG is planned for October/November.
In the meantime, the first computational resources have already been allocated in the latest Open Access Grant Competition.
Welcome to Barbora Next Gen (NG) supercomputer cluster.
Barbora NG is our latest supercomputer which consists of 141 compute nodes,
totaling 27072 compute cores with 108288 GB RAM, giving over ??? TFLOP/s theoretical peak performance.
Nodes are interconnected through a fully non-blocking fat-tree InfiniBand NDR network
and are equipped with Intel Granite Rapids processors.
Read more in [Hardware Overview][1].
The cluster runs with an operating system compatible with the Red Hat [Linux family][a]. We have installed a wide range of software packages targeted at different scientific domains.
These packages are accessible via the [modules environment][2].
The user data shared file system and job data shared file system are available to users.
The [Slurm][b] workload manager provides [computing resources allocations and job execution][3].
Read more on how to [apply for resources][4], [obtain login credentials][5] and [access the cluster][6].
[1]: hardware-overview.md
[2]: ../environment-and-modules.md
[3]: ../general/resources-allocation-policy.md
[4]: ../general/applying-for-resources.md
[5]: ../general/obtaining-login-credentials/obtaining-login-credentials.md
[6]: ../general/shell-and-data-access.md
[a]: http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg
[b]: https://slurm.schedmd.com/
...@@ -8,7 +8,7 @@ The cluster runs with an operating system compatible with the Red Hat [Linux fam ...@@ -8,7 +8,7 @@ The cluster runs with an operating system compatible with the Red Hat [Linux fam
The user data shared file system and job data shared file system are available to users. The user data shared file system and job data shared file system are available to users.
The [PBS Professional Open Source Project][b] workload manager provides [computing resources allocations and job execution][3]. The [Slurm][b] workload manager provides [computing resources allocations and job execution][3].
Read more on how to [apply for resources][4], [obtain login credentials][5] and [access the cluster][6]. Read more on how to [apply for resources][4], [obtain login credentials][5] and [access the cluster][6].
...@@ -22,4 +22,4 @@ Read more on how to [apply for resources][4], [obtain login credentials][5] and ...@@ -22,4 +22,4 @@ Read more on how to [apply for resources][4], [obtain login credentials][5] and
[6]: ../general/shell-and-data-access.md [6]: ../general/shell-and-data-access.md
[a]: http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg [a]: http://upload.wikimedia.org/wikipedia/commons/1/1b/Linux_Distribution_Timeline.svg
[b]: https://www.pbspro.org/ [b]: https://slurm.schedmd.com/
...@@ -120,7 +120,7 @@ The filesystem is backed up, so that it can be restored in case of a catastrophi ...@@ -120,7 +120,7 @@ The filesystem is backed up, so that it can be restored in case of a catastrophi
The SCRATCH is realized as Lustre parallel file system and is available from all login and computational nodes. There are 5 OSTs dedicated for the SCRATCH file system. The SCRATCH is realized as Lustre parallel file system and is available from all login and computational nodes. There are 5 OSTs dedicated for the SCRATCH file system.
The SCRATCH filesystem is mounted in directory /scratch. Users may freely create subdirectories and files on the filesystem. Accessible capacity is 310TB, shared among all users. Individual users are restricted by filesystem usage quotas, set to 10TB per user. The purpose of this quota is to prevent runaway programs from filling the entire filesystem and deny service to other users. Should 10TB prove insufficient, contact [support][d], the quota may be lifted upon request. The SCRATCH filesystem is mounted in the `/scratch/project/PROJECT_ID` directory created automatically with the `PROJECT_ID` project. Accessible capacity is 310TB, shared among all users. Individual users are restricted by filesystem usage quotas, set to 10TB per user. The purpose of this quota is to prevent runaway programs from filling the entire filesystem and deny service to other users. Should 10TB prove insufficient, contact [support][d], the quota may be lifted upon request.
!!! note !!! note
The Scratch filesystem is intended for temporary scratch data generated during the calculation as well as for high-performance access to input and output files. All I/O intensive jobs must use the SCRATCH filesystem as their working directory. The Scratch filesystem is intended for temporary scratch data generated during the calculation as well as for high-performance access to input and output files. All I/O intensive jobs must use the SCRATCH filesystem as their working directory.
......
# e-INFRA CZ Cloud Ostrava # e-INFRA CZ Cloud Ostrava
Ostrava cloud consists of 28 nodes from [Karolina][a] supercomputer. Ostrava cloud consists of 22 nodes from the [Karolina][a] supercomputer.
The cloud site is built on top of OpenStack, The cloud site is built on top of OpenStack,
which is a free open standard cloud computing platform. which is a free open standard cloud computing platform.
...@@ -61,15 +61,15 @@ For the list of deployed OpenStack services, see the [list of components][1]. ...@@ -61,15 +61,15 @@ For the list of deployed OpenStack services, see the [list of components][1].
More information can be found on the [e-INFRA CZ website][2]. More information can be found on the [e-INFRA CZ website][2].
[1]: https://docs.e-infra.cz/compute/openstack/technical-reference/ostrava-site/openstack-components/ [1]: https://docs.platforms.cloud.e-infra.cz/en/docs/technical-reference/ostrava-g2-site/openstack-components
[2]: https://docs.e-infra.cz/compute/openstack/technical-reference/ostrava-site/ [2]: https://docs.platforms.cloud.e-infra.cz/en/docs/technical-reference/ostrava-g2-site
[3]: https://docs.e-infra.cz/account/ [3]: https://docs.account.e-infra.cz/en/docs/access/account#how-to-apply-for-the-first-time
[4]: https://docs.e-infra.cz/compute/openstack/getting-started/creating-first-infrastructure/ [4]: https://docs.platforms.cloud.e-infra.cz/en/docs/getting-started/creating-first-infrastructure
[5]: https://docs.e-infra.cz/compute/openstack/technical-reference/ostrava-site/quota-limits/ [5]: https://docs.platforms.cloud.e-infra.cz/en/docs/technical-reference/ostrava-g2-site/quota-limits
[6]: https://ostrava.openstack.cloud.e-infra.cz/ [6]: https://ostrava.openstack.cloud.e-infra.cz/
[7]: https://docs.fuga.cloud/how-to-use-the-openstack-cli-tools-on-linux [7]: https://cyso.cloud/docs/cloud/extra/how-to-use-the-openstack-cli-tools-on-linux/
[8]: https://code.it4i.cz/dvo0012/infrastructure-by-script/-/tree/main/openstack-infrastructure-as-code-automation/clouds/g2/ostrava/general/terraform [8]: https://code.it4i.cz/dvo0012/infrastructure-by-script/-/tree/main/openstack-infrastructure-as-code-automation/clouds/g2/ostrava/general/terraform
[9]: https://docs.e-infra.cz/compute/openstack/how-to-guides/obtaining-api-key/ [9]: https://docs.platforms.cloud.e-infra.cz/en/docs/how-to-guides/obtaining-api-key
[10]: https://code.it4i.cz/dvo0012/infrastructure-by-script/-/tree/main/openstack-infrastructure-as-code-automation/clouds/g2/ostrava/general/commandline [10]: https://code.it4i.cz/dvo0012/infrastructure-by-script/-/tree/main/openstack-infrastructure-as-code-automation/clouds/g2/ostrava/general/commandline
[a]: ../karolina/introduction.md [a]: ../karolina/introduction.md
......
# IT4I Cloud
IT4I cloud consists of 14 nodes from the [Karolina][a] supercomputer.
The cloud site is built on top of OpenStack,
which is a free open standard cloud computing platform.
!!! Note
The guide describes steps for personal projects.<br>
Some steps may differ for large projects.<br>
For large project, apply for resources to the [Allocation Committee][11].
## Access
To access the cloud you must be a member of an active EUROHPC project,
or fall into the **Access Category B**, i.e. [Access For Thematic HPC Resource Utilisation][11].
A personal OpenStack project is required. Request one by contacting [IT4I Support][12].
The dashboard is available at [https://cloud.it4i.cz][6].
You can see quotas set for the IT4I Cloud in the [Quota Limits][f] section.
## Creating First Instance
To create your first VM instance, follow the steps below:
### Log In
Go to [https://cloud.it4i.cz][6], enter your LDAP username and password and choose the `IT4I_LDAP` domain. After you sign in, you will be redirected to the dashboard.
![](../img/login.png)
### Create Key Pair
SSH key is required for remote access to your instance.
1. Go to **Project > Compute > Key Pairs** and click the **Create Key Pair** button.
![](../img/keypairs.png)
1. In the Create Key Pair window, name your key pair, select `SSH Key` for key type and confirm by clicking Create Key Pair.
![](../img/keypairs1.png)
1. Download and manage the private key according to your operating system.
### Update Security Group
To be able to remotely access your VM instance, you have to allow access in the security group.
1. Go to **Project > Network > Security Groups** and click on **Manage Rules**, for the default security group.
![](../img/securityg.png)
1. Click on **Add Rule**, choose **SSH**, and leave the remaining fields unchanged.
![](../img/securityg1.png)
### Create VM Instance
1. In **Compute > Instances**, click **Launch Instance**.
![](../img/instance.png)
1. Choose Instance Name, Description, and number of instances. Click **Next**.
![](../img/instance1.png)
1. Choose an image from which to boot the instance. Choose to delete the volume after instance delete. Click **Next**.
![](../img/instance2.png)
1. Choose the hardware resources of the instance by selecting a flavor. Additional volumes for data can be attached later on. Click **Next**.
![](../img/instance3.png)
1. Select the network and continue to **Security Groups**.
![](../img/instance4.png)
1. Allocate the security group with SSH rule that you added in the [Update Security Group](it4i-cloud.md#update-security-group) step. Then click **Next** to go to the **Key Pair**.
![](../img/securityg2.png)
1. Select the key that you created in the [Create Key Pair][g] section and launch the instance.
![](../img/instance5.png)
### Associate Floating IP
1. Click on the **Associate** button next to the floating IP.
![](../img/floatingip.png)
1. Select Port to be associated with the instance, then click the **Associate** button.
Now you can join the VM using your preferred SSH client.
## Process Automatization
You can automate the process using Openstack.
### OpenStack
Prerequisites:
* Linux/Mac/WSL terminal BASH shell
* installed [OpenStack client][7]
Follow the guide: [https://code.it4i.cz/commandline][10]
Run commands:
```console
source project_openrc.sh.inc
```
```console
./cmdline-demo.sh basic-infrastructure-1
```
[1]: https://docs.e-infra.cz/compute/openstack/technical-reference/ostrava-site/openstack-components/
[2]: https://docs.e-infra.cz/compute/openstack/technical-reference/ostrava-site/
[3]: https://docs.e-infra.cz/account/
[4]: https://docs.e-infra.cz/compute/openstack/getting-started/creating-first-infrastructure/
[5]: https://docs.e-infra.cz/compute/openstack/technical-reference/ostrava-g2-site/quota-limits/
[6]: https://cloud.it4i.cz
[7]: https://docs.fuga.cloud/how-to-use-the-openstack-cli-tools-on-linux
[8]: https://code.it4i.cz/dvo0012/infrastructure-by-script/-/tree/main/openstack-infrastructure-as-code-automation/clouds/g2/ostrava/general/terraform
[9]: https://docs.e-infra.cz/compute/openstack/how-to-guides/obtaining-api-key/
[10]: https://code.it4i.cz/dvo0012/infrastructure-by-script/-/tree/main/openstack-infrastructure-as-code-automation/clouds/g2/ostrava/general/commandline
[11]: https://www.it4i.cz/en/for-users/computing-resources-allocation
[12]: mailto:support@it4i.cz @@
[a]: ../karolina/introduction.md
[b]: ../general/access/project-access.md
[c]: einfracz-cloud.md
[d]: ../general/accessing-the-clusters/vpn-access.md
[e]: ../general/obtaining-login-credentials/obtaining-login-credentials.md
[f]: it4i-quotas.md
[g]: it4i-cloud.md#create-key-pair
# IT4I Cloud Quotas
| Resource | Quota |
|---------------------------------------|-------|
| Instances | 10 |
| VCPUs | 20 |
| RAM | 32GB |
| Volumes | 20 |
| Volume Snapshots | 12 |
| Volume Storage | 500 |
| Floating-IPs | 1 |
| Security Groups | 10 |
| Security Group Rules | 100 |
| Networks | 1 |
| Ports | 10 |
| Routers | 1 |
| Backups | 12 |
| Groups | 10 |
| rbac_policies | 10 |
| Subnets | 1 |
| Subnet_pools | -1 |
| Fixed-ips | -1 |
| Injected-file-size | 10240 |
| Injected-path-size | 255 |
| Injected-files | 5 |
| Key-pairs | 100 |
| Properties | 128 |
| Server-groups | 10 |
| Server-group-members | 10 |
| Backup-gigabytes | 1002 |
| Per-volume-gigabytes | -1 |
File moved
File moved
# Using NVIDIA Grace Partition
For testing your application on the NVIDIA Grace Partition,
you need to prepare a job script for that partition or use the interactive job:
```console
salloc -N 1 -c 144 -A PROJECT-ID -p p11-grace --time=08:00:00
```
where:
- `-N 1` means allocation single node,
- `-c 144` means allocation 144 cores,
- `-p p11-grace` is NVIDIA Grace partition,
- `--time=08:00:00` means allocation for 8 hours.
## Available Toolchains
The platform offers three toolchains:
- Standard GCC (as a module `ml GCC`)
- [NVHPC](https://developer.nvidia.com/hpc-sdk) (as a module `ml NVHPC`)
- [Clang for NVIDIA Grace](https://developer.nvidia.com/grace/clang) (installed in `/opt/nvidia/clang`)
!!! note
The NVHPC toolchain showed strong results with minimal amount of tuning necessary in our initial evaluation.
### GCC Toolchain
The GCC compiler seems to struggle with vectorization of short (constant length) loops, which tend to get completely unrolled/eliminated instead of being vectorized. For example simple nested loop such as
```cpp
for(int i = 0; i < 1000000; ++i) {
// Iterations dependent in "i"
// ...
for(int j = 0; j < 8; ++j) {
// but independent in "j"
// ...
}
}
```
may emit scalar code for the inner loop leading to no vectorization being used at all.
### Clang (For Grace) Toolchain
The Clang/LLVM tends to behave similarly, but can be guided to properly vectorize the inner loop with either flags `-O3 -ffast-math -march=native -fno-unroll-loops -mllvm -force-vector-width=8` or pragmas such as `#pragma clang loop vectorize_width(8)` and `#pragma clang loop unroll(disable)`.
```cpp
for(int i = 0; i < 1000000; ++i) {
// Iterations dependent in "i"
// ...
#pragma clang loop unroll(disable) vectorize_width(8)
for(int j = 0; j < 8; ++j) {
// but independent in "j"
// ...
}
}
```
!!! note
Our basic experiments show that fixed width vectorization (NEON) tends to perform better in the case of short (register-length) loops than SVE. In cases (like above), where specified `vectorize_width` is larger than availiable vector unit width, Clang will emit multiple NEON instructions (eg. 4 instructions will be emitted to process 8 64-bit operations in 128-bit units of Grace).
### NVHPC Toolchain
The NVHPC toolchain handled aforementioned case without any additional tuning. Simple `-O3 -march=native -fast` should be therefore sufficient.
## Basic Math Libraries
The basic libraries (BLAS and LAPACK) are included in NVHPC toolchain and can be used simply as `-lblas` and `-llapack` for BLAS and LAPACK respectively (`lp64` and `ilp64` versions are also included).
!!! note
The Grace platform doesn't include CUDA-capable GPU, therefore `nvcc` will fail with an error. This means that `nvc`, `nvc++` and `nvfortran` should be used instead.
### NVIDIA Performance Libraries
The [NVPL](https://developer.nvidia.com/nvpl) package includes more extensive set of libraries in both sequential and multi-threaded versions:
- BLACS: `-lnvpl_blacs_{lp64,ilp64}_{mpich,openmpi3,openmpi4,openmpi5}`
- BLAS: `-lnvpl_blas_{lp64,ilp64}_{seq,gomp}`
- FFTW: `-lnvpl_fftw`
- LAPACK: `-lnvpl_lapack_{lp64,ilp64}_{seq,gomp}`
- ScaLAPACK: `-lnvpl_scalapack_{lp64,ilp64}`
- RAND: `-lnvpl_rand` or `-lnvpl_rand_mt`
- SPARSE: `-lnvpl_sparse`
This package should be compatible with all availiable toolchains and includes CMake module files for easy integration into CMake-based projects. For further documentation see also [NVPL](https://docs.nvidia.com/nvpl).
### Recommended BLAS Library
We recommend to use the multi-threaded BLAS library from the NVPL package.
!!! note
It is important to pin the processes using **OMP_PROC_BIND=spread**
Example:
```console
$ ml NVHPC
$ nvc -O3 -march=native myprog.c -o myprog -lnvpl_blas_lp64_gomp
$ OMP_PROC_BIND=spread ./myprog
```
## Basic Communication Libraries
The OpenMPI 4 implementation is included with NVHPC toolchain and is exposed as a module (`ml OpenMPI`). The following example
```cpp
#include <mpi.h>
#include <sched.h>
#include <omp.h>
int main(int argc, char **argv)
{
int rank;
MPI_Init(&argc, &argv);
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
#pragma omp parallel
{
printf("Hello on rank %d, thread %d on CPU %d\n", rank, omp_get_thread_num(), sched_getcpu());
}
MPI_Finalize();
}
```
can be compiled and run as follows
```console
ml OpenMPI
mpic++ -fast -fopenmp hello.cpp -o hello
OMP_PROC_BIND=close OMP_NUM_THREADS=4 mpirun -np 4 --map-by slot:pe=36 ./hello
```
In this configuration we run 4 ranks bound to one quarter of cores each with 4 OpenMP threads.
## Simple BLAS Application
The `hello world` example application (written in `C++` and `Fortran`) uses simple stationary probability vector estimation to illustrate use of GEMM (BLAS 3 routine).
Stationary probability vector estimation in `C++`:
```cpp
#include <iostream>
#include <vector>
#include <chrono>
#include "cblas.h"
const size_t ITERATIONS = 32;
const size_t MATRIX_SIZE = 1024;
int main(int argc, char *argv[])
{
const size_t matrixElements = MATRIX_SIZE*MATRIX_SIZE;
std::vector<float> a(matrixElements, 1.0f / float(MATRIX_SIZE));
for(size_t i = 0; i < MATRIX_SIZE; ++i)
a[i] = 0.5f / (float(MATRIX_SIZE) - 1.0f);
a[0] = 0.5f;
std::vector<float> w1(matrixElements, 0.0f);
std::vector<float> w2(matrixElements, 0.0f);
std::copy(a.begin(), a.end(), w1.begin());
std::vector<float> *t1, *t2;
t1 = &w1;
t2 = &w2;
auto c1 = std::chrono::steady_clock::now();
for(size_t i = 0; i < ITERATIONS; ++i)
{
std::fill(t2->begin(), t2->end(), 0.0f);
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, MATRIX_SIZE, MATRIX_SIZE, MATRIX_SIZE,
1.0f, t1->data(), MATRIX_SIZE,
a.data(), MATRIX_SIZE,
1.0f, t2->data(), MATRIX_SIZE);
std::swap(t1, t2);
}
auto c2 = std::chrono::steady_clock::now();
for(size_t i = 0; i < MATRIX_SIZE; ++i)
{
std::cout << (*t1)[i*MATRIX_SIZE + i] << " ";
}
std::cout << std::endl;
std::cout << "Elapsed Time: " << std::chrono::duration<double>(c2 - c1).count() << std::endl;
return 0;
}
```
Stationary probability vector estimation in `Fortran`:
```fortran
program main
implicit none
integer :: matrix_size, iterations
integer :: i
real, allocatable, target :: a(:,:), w1(:,:), w2(:,:)
real, dimension(:,:), contiguous, pointer :: t1, t2, tmp
real, pointer :: out_data(:), out_diag(:)
integer :: cr, cm, c1, c2
iterations = 32
matrix_size = 1024
call system_clock(count_rate=cr)
call system_clock(count_max=cm)
allocate(a(matrix_size, matrix_size))
allocate(w1(matrix_size, matrix_size))
allocate(w2(matrix_size, matrix_size))
a(:,:) = 1.0 / real(matrix_size)
a(:,1) = 0.5 / real(matrix_size - 1)
a(1,1) = 0.5
w1 = a
w2(:,:) = 0.0
t1 => w1
t2 => w2
call system_clock(c1)
do i = 0, iterations
t2(:,:) = 0.0
call sgemm('N', 'N', matrix_size, matrix_size, matrix_size, 1.0, t1, matrix_size, a, matrix_size, 1.0, t2, matrix_size)
tmp => t1
t1 => t2
t2 => tmp
end do
call system_clock(c2)
out_data(1:size(t1)) => t1
out_diag => out_data(1::matrix_size+1)
print *, out_diag
print *, "Elapsed Time: ", (c2 - c1) / real(cr)
deallocate(a)
deallocate(w1)
deallocate(w2)
end program main
```
### Using NVHPC Toolchain
The C++ version of the example can be compiled with NVHPC and ran as follows
```console
ml NVHPC
nvc++ -O3 -march=native -fast -I$NVHPC/Linux_aarch64/$EBVERSIONNVHPC/compilers/include/lp64 -lblas main.cpp -o main
OMP_NUM_THREADS=144 OMP_PROC_BIND=spread ./main
```
The Fortran version is just as simple:
```console
ml NVHPC
nvfortran -O3 -march=native -fast -lblas main.f90 -o main.x
OMP_NUM_THREADS=144 OMP_PROC_BIND=spread ./main
```
!!! note
It may be advantageous to use NVPL libraries instead NVHPC ones. For example DGEMM BLAS 3 routine from NVPL is almost 30% faster than NVHPC one.
### Using Clang (For Grace) Toolchain
Similarly Clang for Grace toolchain with NVPL BLAS can be used to compile C++ version of the example.
```console
ml NVHPC
/opt/nvidia/clang/17.23.11/bin/clang++ -O3 -march=native -ffast-math -I$NVHPC/Linux_aarch64/$EBVERSIONNVHPC/compilers/include/lp64 -lnvpl_blas_lp64_gomp main.cpp -o main
```
!!! note
NVHPC module is used just for the `cblas.h` include in this case. This can be avoided by changing the code to use `nvpl_blas.h` instead.
## Additional Resources
- [https://www.nvidia.com/en-us/data-center/grace-cpu-superchip/][1]
- [https://developer.nvidia.com/hpc-sdk][2]
- [https://developer.nvidia.com/grace/clang][3]
- [https://docs.nvidia.com/nvpl][4]
[1]: https://www.nvidia.com/en-us/data-center/grace-cpu-superchip/
[2]: https://developer.nvidia.com/hpc-sdk
[3]: https://developer.nvidia.com/grace/clang
[4]: https://docs.nvidia.com/nvpl
# Heterogeneous Memory Management on Intel Platforms
Partition `p10-intel` offser heterogeneous memory directly exposed to the user. This allows to manually pick appropriate kind of memory to be used at process or even single allocation granularity. Both kinds of memory are exposed as memory-only NUMA nodes. This allows both coarse (process level) and fine (allocation level) grained control over memory type used.
## Overview
At the process level the `numactl` facilities can be utilized, while Intel provided `memkind` library allows for finer control. Both `memkind` library and `numactl` can be accessed by loading `memkind` module or `OpenMPI` module (only `numactl`).
```bash
ml memkind
```
### Process Level (NUMACTL)
The `numactl` allows to either restrict memory pool of the process to specific set of memory NUMA nodes
```bash
numactl --membind <node_ids_set>
```
or select single preffered node
```bash
numactl --preffered <node_id>
```
where `<node_ids_set>` is comma separated list (eg. `0,2,5,...`) in combination with ranges (such as `0-5`). The `membind` option kills the process if it requests more memory than can be satisfied from specified nodes. The `preffered` option just reverts to using other nodes according to their NUMA distance in the same situation.
Convenient way to check `numactl` configuration is
```bash
numactl -s
```
which prints configuration in its execution environment eg.
```bash
numactl --membind 8-15 numactl -s
policy: bind
preferred node: 0
physcpubind: 0 1 2 ... 189 190 191
cpubind: 0 1 2 3 4 5 6 7
nodebind: 0 1 2 3 4 5 6 7
membind: 8 9 10 11 12 13 14 15
```
The last row shows allocations memory are restricted to NUMA nodes `8-15`.
### Allocation Level (MEMKIND)
The `memkind` library (in its simplest use case) offers new variant of `malloc/free` function pair, which allows to specify kind of memory to be used for given allocation. Moving specific allocation from default to HBM memory pool then can be achieved by replacing:
```cpp
void *pData = malloc(<SIZE>);
/* ... */
free(pData);
```
with
```cpp
#include <memkind.h>
void *pData = memkind_malloc(MEMKIND_HBW, <SIZE>);
/* ... */
memkind_free(NULL, pData); // "kind" parameter is deduced from the address
```
Similarly other memory types can be chosen.
!!! note
The allocation will return `NULL` pointer when memory of specified kind is not available.
## High Bandwidth Memory (HBM)
Intel Sapphire Rapids (partition `p10-intel`) consists of two sockets each with `128GB` of DDR and `64GB` on-package HBM memory. The machine is configured in FLAT mode and therefore exposes HBM memory as memory-only NUMA nodes (`16GB` per 12-core tile). The configuration can be verified by running
```bash
numactl -H
```
which should show 16 NUMA nodes (`0-7` should contain 12 cores and `32GB` of DDR DRAM, while `8-15` should have no cores and `16GB` of HBM each).
![](../../img/cs/guides/p10_numa_sc4_flat.png)
### Process Level
With this we can easily restrict application to DDR DRAM or HBM memory:
```bash
# Only DDR DRAM
numactl --membind 0-7 ./stream
# ...
Function Best Rate MB/s Avg time Min time Max time
Copy: 369745.8 0.043355 0.043273 0.043588
Scale: 366989.8 0.043869 0.043598 0.045355
Add: 378054.0 0.063652 0.063483 0.063899
Triad: 377852.5 0.063621 0.063517 0.063884
# Only HBM
numactl --membind 8-15 ./stream
# ...
Function Best Rate MB/s Avg time Min time Max time
Copy: 1128430.1 0.015214 0.014179 0.015615
Scale: 1045065.2 0.015814 0.015310 0.016309
Add: 1096992.2 0.022619 0.021878 0.024182
Triad: 1065152.4 0.023449 0.022532 0.024559
```
The DDR DRAM achieves bandwidth of around 400GB/s, while the HBM clears 1TB/s bar.
Some further improvements can be achieved by entirely isolating a process to a single tile. This can be useful for MPI jobs, where `$OMPI_COMM_WORLD_RANK` can be used to bind each process individually. The simple wrapper script to do this may look like
```bash
#!/bin/bash
numactl --membind $((8 + $OMPI_COMM_WORLD_RANK)) $@
```
and can be used as
```bash
mpirun -np 8 --map-by slot:pe=12 membind_wrapper.sh ./stream_mpi
```
(8 tiles with 12 cores each). However, this approach assumes `16GB` of HBM memory local to the tile is sufficient for each process (memory cannot spill between tiles). This approach may be significantly more useful in combination with `--preferred` instead of `--membind` to force preference of local HBM with spill to DDR DRAM. Otherwise
```bash
mpirun -n 8 --map-by slot:pe=12 numactl --membind 8-15 ./stream_mpi
```
is most likely preferable even for MPI workloads. Applying above approach to MPI Stream with 8 ranks and 1-24 threads per rank we can expect these results:
![](../../img/cs/guides/p10_stream_dram.png)
![](../../img/cs/guides/p10_stream_hbm.png)
### Allocation Level
Allocation level memory kind selection using `memkind` library can be illustrated using modified stream benchmark. The stream benchmark uses three working arrays (A, B and C), whose allocation can be changed to `memkind_malloc` as follows
```cpp
#include <memkind.h>
// ...
STREAM_TYPE *a = (STREAM_TYPE *)memkind_malloc(MEMKIND_HBW_ALL, STREAM_ARRAY_SIZE * sizeof(STREAM_TYPE));
STREAM_TYPE *b = (STREAM_TYPE *)memkind_malloc(MEMKIND_REGULAR, STREAM_ARRAY_SIZE * sizeof(STREAM_TYPE));
STREAM_TYPE *c = (STREAM_TYPE *)memkind_malloc(MEMKIND_HBW_ALL, STREAM_ARRAY_SIZE * sizeof(STREAM_TYPE));
// ...
memkind_free(NULL, a);
memkind_free(NULL, b);
memkind_free(NULL, c);
```
Arrays A and C are allocated from HBM (`MEMKIND_HBW_ALL`), while DDR DRAM (`MEMKIND_REGULAR`) is used for B.
The code then has to be linked with `memkind` library
```bash
gcc -march=native -O3 -fopenmp -lmemkind memkind_stream.c -o memkind_stream
```
and can be run as
```bash
export MEMKIND_HBW_NODES=8,9,10,11,12,13,14,15
OMP_NUM_THREADS=$((N*12)) OMP_PROC_BIND=spread ./memkind_stream
```
While the `memkind` library should be able to detect HBM memory on its own (through `HMAT` and `hwloc`) this is not supported on `p10-intel`. This means that NUMA nodes representing HBM have to be specified manually using `MEMKIND_HBW_NODES` environment variable.
![](../../img/cs/guides/p10_stream_memkind.png)
With this setup we can see that simple copy operation (C[i] = A[i]) achieves bandwidth comparable to the application bound entirely to HBM memory. On the other hand the scale operation (B[i] = s*C[i]) is mostly limited by DDR DRAM bandwidth. Its also worth noting that operations combining all three arrays are performing close to HBM-only configuration.
## Simple Application
One of applications that can greatly benefit from availability of large slower and faster smaller memory is computing histogram with many bins over large dataset.
```cpp
#include <iostream>
#include <vector>
#include <chrono>
#include <cmath>
#include <cstring>
#include <omp.h>
#include <memkind.h>
const size_t N_DATA_SIZE = 2 * 1024 * 1024 * 1024ull;
const size_t N_BINS_COUNT = 1 * 1024 * 1024ull;
const size_t N_ITERS = 10;
#if defined(HBM)
#define DATA_MEMKIND MEMKIND_REGULAR
#define BINS_MEMKIND MEMKIND_HBW_ALL
#else
#define DATA_MEMKIND MEMKIND_REGULAR
#define BINS_MEMKIND MEMKIND_REGULAR
#endif
int main(int argc, char *argv[])
{
const double binWidth = 1.0 / double(N_BINS_COUNT + 1);
double *pData = (double *)memkind_malloc(DATA_MEMKIND, N_DATA_SIZE * sizeof(double));
size_t *pBins = (size_t *)memkind_malloc(BINS_MEMKIND, N_BINS_COUNT * omp_get_max_threads() * sizeof(double));
#pragma omp parallel
{
drand48_data state;
srand48_r(omp_get_thread_num(), &state);
#pragma omp for
for(size_t i = 0; i < N_DATA_SIZE; ++i)
drand48_r(&state, &pData[i]);
}
auto c1 = std::chrono::steady_clock::now();
for(size_t it = 0; it < N_ITERS; ++it)
{
#pragma omp parallel
{
for(size_t i = 0; i < N_BINS_COUNT; ++i)
pBins[omp_get_thread_num()*N_BINS_COUNT + i] = size_t(0);
#pragma omp for
for(size_t i = 0; i < N_DATA_SIZE; ++i)
{
const size_t idx = size_t(pData[i] / binWidth) % N_BINS_COUNT;
pBins[omp_get_thread_num()*N_BINS_COUNT + idx]++;
}
}
}
auto c2 = std::chrono::steady_clock::now();
#pragma omp parallel for
for(size_t i = 0; i < N_BINS_COUNT; ++i)
{
for(size_t j = 1; j < omp_get_max_threads(); ++j)
pBins[i] += pBins[j*N_BINS_COUNT + i];
}
std::cout << "Elapsed Time [s]: " << std::chrono::duration<double>(c2 - c1).count() << std::endl;
size_t total = 0;
#pragma omp parallel for reduction(+:total)
for(size_t i = 0; i < N_BINS_COUNT; ++i)
total += pBins[i];
std::cout << "Total Items: " << total << std::endl;
memkind_free(NULL, pData);
memkind_free(NULL, pBins);
return 0;
}
```
### Using HBM Memory (P10-Intel)
Following commands can be used to compile and run example application above
```bash
ml GCC memkind
export MEMKIND_HBW_NODES=8,9,10,11,12,13,14,15
g++ -O3 -fopenmp -lmemkind histogram.cpp -o histogram_dram
g++ -O3 -fopenmp -lmemkind -DHBM histogram.cpp -o histogram_hbm
OMP_PROC_BIND=spread GOMP_CPU_AFFINITY=0-95 OMP_NUM_THREADS=96 ./histogram_dram
OMP_PROC_BIND=spread GOMP_CPU_AFFINITY=0-95 OMP_NUM_THREADS=96 ./histogram_hbm
```
Moving histogram bins data into HBM memory should speedup the algorithm more than twice. It should be noted that moving also `pData` array into HBM memory worsens this result (presumably because the algorithm can saturate both memory interfaces).
## Additional Resources
- [https://linux.die.net/man/8/numactl][1]
- [http://memkind.github.io/memkind/man_pages/memkind.html][2]
- [https://lenovopress.lenovo.com/lp1738-implementing-intel-high-bandwidth-memory][3]
[1]: https://linux.die.net/man/8/numactl
[2]: http://memkind.github.io/memkind/man_pages/memkind.html
[3]: https://lenovopress.lenovo.com/lp1738-implementing-intel-high-bandwidth-memory
\ No newline at end of file
# Using VMware Horizon
VMware Horizon is a virtual desktop infrastructure (VDI) solution
that enables users to access virtual desktops and applications from any device and any location.
It provides a comprehensive end-to-end solution for managing and delivering virtual desktops and applications,
including features such as session management, user authentication, and virtual desktop provisioning.
![](../../img/horizon.png)
## How to Access VMware Horizon
!!! important
Access to VMware Horizon requires IT4I VPN.
1. Contact [IT4I support][a] with a request for an access and VM allocation.
1. [Download][1] and install the VMware Horizon Client for Windows.
1. Add a new server `https://vdi-cs01.msad.it4i.cz/` in the Horizon client.
1. Connect to the server using your IT4I username and password.
Username is in the `domain\username` format and the domain is `msad.it4i.cz`.
For example: `msad.it4i.cz\user123`
## Example
Below is an example of how to mount a remote folder and check the conection on Windows OS:
### Prerequsities
3D applications
* [Blender][3]
SSHFS for remote access
* [sshfs-win][4]
* [winfsp][5]
* [shfs-win-manager][6]
* ssh keys for access to clusters
### Steps
1. Start the VPN and connect to the server via VMware Horizon Client.
![](../../img/vmware.png)
1. Mount a remote folder.
* Run sshfs-win-manager.
![](../../img/sshfs.png)
* Add a new connection.
![](../../img/sshfs1.png)
* Click on **Connect**.
![](../../img/sshfs2.png)
1. Check that the folder is mounted.
![](../../img/mount.png)
1. Check the GPU resources.
![](../../img/gpu.png)
### Blender
Now if you run, for example, Blender, you can check the available GPU resources in Blender Preferences.
![](../../img/blender.png)
[a]: mailto:support@it4i.cz
[1]: https://vdi-cs01.msad.it4i.cz/
[2]: https://www.paraview.org/download/
[3]: https://www.blender.org/download/
[4]: https://github.com/winfsp/sshfs-win/releases
[5]: https://github.com/winfsp/winfsp/releases/
[6]: https://github.com/evsar3/sshfs-win-manager/releases
# Using IBM Power Partition
For testing your application on the IBM Power partition,
you need to prepare a job script for that partition or use the interactive job:
```console
scalloc -N 1 -c 192 -A PROJECT-ID -p p07-power --time=08:00:00
```
where:
- `-N 1` means allocation single node,
- `-c 192` means allocation 192 cores (threads),
- `-p p07-power` is IBM Power partition,
- `--time=08:00:00` means allocation for 8 hours.
On the partition, you should reload the list of modules:
```
ml architecture/ppc64le
```
The platform offers both `GNU` based and proprietary IBM toolchains for building applications. IBM also provides optimized BLAS routines library ([ESSL](https://www.ibm.com/docs/en/essl/6.1)), which can be used by both toolchain.
## Building Applications
Our sample application depends on `BLAS`, therefore we start by loading following modules (regardless of which toolchain we want to use):
```
ml GCC OpenBLAS
```
### GCC Toolchain
In the case of GCC toolchain we can go ahead and compile the application as usual using either `g++`
```
g++ -lopenblas hello.cpp -o hello
```
or `gfortran`
```
gfortran -lopenblas hello.f90 -o hello
```
as usual.
### IBM Toolchain
The IBM toolchain requires additional environment setup as it is installed in `/opt/ibm` and is not exposed as a module
```
IBM_ROOT=/opt/ibm
OPENXLC_ROOT=$IBM_ROOT/openxlC/17.1.1
OPENXLF_ROOT=$IBM_ROOT/openxlf/17.1.1
export PATH=$OPENXLC_ROOT/bin:$PATH
export LD_LIBRARY_PATH=$OPENXLC_ROOT/lib:$LD_LIBRARY_PATH
export PATH=$OPENXLF_ROOT/bin:$PATH
export LD_LIBRARY_PATH=$OPENXLF_ROOT/lib:$LD_LIBRARY_PATH
```
from there we can use either `ibm-clang++`
```
ibm-clang++ -lopenblas hello.cpp -o hello
```
or `xlf`
```
xlf -lopenblas hello.f90 -o hello
```
to build the application as usual.
!!! note
Combination of `xlf` and `openblas` seems to cause severe performance degradation. Therefore `ESSL` library should be preferred (see below).
### Using ESSL Library
The [ESSL](https://www.ibm.com/docs/en/essl/6.1) library is installed in `/opt/ibm/math/essl/7.1` so we define additional environment variables
```
IBM_ROOT=/opt/ibm
ESSL_ROOT=${IBM_ROOT}math/essl/7.1
export LD_LIBRARY_PATH=$ESSL_ROOT/lib64:$LD_LIBRARY_PATH
```
The simplest way to utilize `ESSL` in application, which already uses `BLAS` or `CBLAS` routines is to link with the provided `libessl.so`. This can be done by replacing `-lopenblas` with `-lessl` or `-lessl -lopenblas` (in case `ESSL` does not provide all required `BLAS` routines).
In practice this can look like
```
g++ -L${ESSL_ROOT}/lib64 -lessl -lopenblas hello.cpp -o hello
```
or
```
gfortran -L${ESSL_ROOT}/lib64 -lessl -lopenblas hello.f90 -o hello
```
and similarly for IBM compilers (`ibm-clang++` and `xlf`).
## Hello World Applications
The `hello world` example application (written in `C++` and `Fortran`) uses simple stationary probability vector estimation to illustrate use of GEMM (BLAS 3 routine).
Stationary probability vector estimation in `C++`:
```c++
#include <iostream>
#include <vector>
#include <chrono>
#include "cblas.h"
const size_t ITERATIONS = 32;
const size_t MATRIX_SIZE = 1024;
int main(int argc, char *argv[])
{
const size_t matrixElements = MATRIX_SIZE*MATRIX_SIZE;
std::vector<float> a(matrixElements, 1.0f / float(MATRIX_SIZE));
for(size_t i = 0; i < MATRIX_SIZE; ++i)
a[i] = 0.5f / (float(MATRIX_SIZE) - 1.0f);
a[0] = 0.5f;
std::vector<float> w1(matrixElements, 0.0f);
std::vector<float> w2(matrixElements, 0.0f);
std::copy(a.begin(), a.end(), w1.begin());
std::vector<float> *t1, *t2;
t1 = &w1;
t2 = &w2;
auto c1 = std::chrono::steady_clock::now();
for(size_t i = 0; i < ITERATIONS; ++i)
{
std::fill(t2->begin(), t2->end(), 0.0f);
cblas_sgemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, MATRIX_SIZE, MATRIX_SIZE, MATRIX_SIZE,
1.0f, t1->data(), MATRIX_SIZE,
a.data(), MATRIX_SIZE,
1.0f, t2->data(), MATRIX_SIZE);
std::swap(t1, t2);
}
auto c2 = std::chrono::steady_clock::now();
for(size_t i = 0; i < MATRIX_SIZE; ++i)
{
std::cout << (*t1)[i*MATRIX_SIZE + i] << " ";
}
std::cout << std::endl;
std::cout << "Elapsed Time: " << std::chrono::duration<double>(c2 - c1).count() << std::endl;
return 0;
}
```
Stationary probability vector estimation in `Fortran`:
```fortran
program main
implicit none
integer :: matrix_size, iterations
integer :: i
real, allocatable, target :: a(:,:), w1(:,:), w2(:,:)
real, dimension(:,:), contiguous, pointer :: t1, t2, tmp
real, pointer :: out_data(:), out_diag(:)
integer :: cr, cm, c1, c2
iterations = 32
matrix_size = 1024
call system_clock(count_rate=cr)
call system_clock(count_max=cm)
allocate(a(matrix_size, matrix_size))
allocate(w1(matrix_size, matrix_size))
allocate(w2(matrix_size, matrix_size))
a(:,:) = 1.0 / real(matrix_size)
a(:,1) = 0.5 / real(matrix_size - 1)
a(1,1) = 0.5
w1 = a
w2(:,:) = 0.0
t1 => w1
t2 => w2
call system_clock(c1)
do i = 0, iterations
t2(:,:) = 0.0
call sgemm('N', 'N', matrix_size, matrix_size, matrix_size, 1.0, t1, matrix_size, a, matrix_size, 1.0, t2, matrix_size)
tmp => t1
t1 => t2
t2 => tmp
end do
call system_clock(c2)
out_data(1:size(t1)) => t1
out_diag => out_data(1::matrix_size+1)
print *, out_diag
print *, "Elapsed Time: ", (c2 - c1) / real(cr)
deallocate(a)
deallocate(w1)
deallocate(w2)
end program main
```
This diff is collapsed.
# Complementary Systems # Introduction
Complementary systems offer development environment for users Complementary systems offer development environment for users
that need to port and optimize their code and applications that need to port and optimize their code and applications
...@@ -26,6 +26,8 @@ Second stage of complementary systems implementation comprises of these partitio ...@@ -26,6 +26,8 @@ Second stage of complementary systems implementation comprises of these partitio
- compute partition 7 - based on IBM Power10 architecture - compute partition 7 - based on IBM Power10 architecture
- compute partition 8 - modern CPU with a very high L3 cache capacity (over 750MB) - compute partition 8 - modern CPU with a very high L3 cache capacity (over 750MB)
- compute partition 9 - virtual GPU accelerated workstations - compute partition 9 - virtual GPU accelerated workstations
- compute partition 10 - Sapphire Rapids-HBM server
- compute partition 11 - NVIDIA Grace CPU Superchip
![](../img/cs2_2.png) ![](../img/cs2_2.png)
......
...@@ -20,6 +20,7 @@ p05-synt up 1-00:00:00 0/1/0/1 p05-synt01 ...@@ -20,6 +20,7 @@ p05-synt up 1-00:00:00 0/1/0/1 p05-synt01
p06-arm up 1-00:00:00 0/2/0/2 p06-arm[01-02] p06-arm up 1-00:00:00 0/2/0/2 p06-arm[01-02]
p07-power up 1-00:00:00 0/1/0/1 p07-power01 p07-power up 1-00:00:00 0/1/0/1 p07-power01
p08-amd up 1-00:00:00 0/1/0/1 p08-amd01 p08-amd up 1-00:00:00 0/1/0/1 p08-amd01
p10-intel up 1-00:00:00 0/1/0/1 p10-intel01
``` ```
## Getting Job Information ## Getting Job Information
...@@ -89,7 +90,7 @@ set | grep ^SLURM ...@@ -89,7 +90,7 @@ set | grep ^SLURM
| variable name | description | example | | variable name | description | example |
| ------ | ------ | ------ | | ------ | ------ | ------ |
| SLURM_JOBID | job id of the executing job| 593 | | SLURM_JOB_ID | job id of the executing job| 593 |
| SLURM_JOB_NODELIST | nodes allocated to the job | p03-amd[01-02] | | SLURM_JOB_NODELIST | nodes allocated to the job | p03-amd[01-02] |
| SLURM_JOB_NUM_NODES | number of nodes allocated to the job | 2 | | SLURM_JOB_NUM_NODES | number of nodes allocated to the job | 2 |
| SLURM_STEP_NODELIST | nodes allocated to the job step | p03-amd01 | | SLURM_STEP_NODELIST | nodes allocated to the job step | p03-amd01 |
...@@ -145,6 +146,7 @@ $ scancel JOBID ...@@ -145,6 +146,7 @@ $ scancel JOBID
| p06-arm | 2 | yes | 80 | aarch64,ib | | p06-arm | 2 | yes | 80 | aarch64,ib |
| p07-power | 1 | yes | 192 | ppc64le,ib | | p07-power | 1 | yes | 192 | ppc64le,ib |
| p08-amd | 1 | yes | 128 | x86_64,amd,milan-x,ib,ht | | p08-amd | 1 | yes | 128 | x86_64,amd,milan-x,ib,ht |
| p10-intel | 1 | yes | 96 | x86_64,intel,sapphire_rapids,ht|
Use `-t`, `--time` option to specify job run time limit. Default job time limit is 2 hours, maximum job time limit is 24 hours. Use `-t`, `--time` option to specify job run time limit. Default job time limit is 2 hours, maximum job time limit is 24 hours.
...@@ -312,6 +314,14 @@ Whole node allocation: ...@@ -312,6 +314,14 @@ Whole node allocation:
salloc -A PROJECT-ID -p p08-amd salloc -A PROJECT-ID -p p08-amd
``` ```
## Partition 10 - Intel Sapphire Rapids
Whole node allocation:
```console
salloc -A PROJECT-ID -p p10-intel
```
## Features ## Features
Nodes have feature tags assigned to them. Nodes have feature tags assigned to them.
...@@ -326,6 +336,7 @@ Users can select nodes based on the feature tags using --constraint option. ...@@ -326,6 +336,7 @@ Users can select nodes based on the feature tags using --constraint option.
| intel | manufacturer | | intel | manufacturer |
| icelake | processor family | | icelake | processor family |
| broadwell | processor family | | broadwell | processor family |
| sapphire_rapids | processor family |
| milan | processor family | | milan | processor family |
| milan-x | processor family | | milan-x | processor family |
| ib | Infiniband | | ib | Infiniband |
...@@ -342,10 +353,14 @@ p00-arm01 aarch64,cortex-a72 ...@@ -342,10 +353,14 @@ p00-arm01 aarch64,cortex-a72
p01-arm[01-08] aarch64,a64fx,ib p01-arm[01-08] aarch64,a64fx,ib
p02-intel01 x86_64,intel,icelake,ib,fpga,bitware,nvdimm,ht p02-intel01 x86_64,intel,icelake,ib,fpga,bitware,nvdimm,ht
p02-intel02 x86_64,intel,icelake,ib,fpga,bitware,nvdimm,noht p02-intel02 x86_64,intel,icelake,ib,fpga,bitware,nvdimm,noht
p03-amd01 x86_64,amd,milan,ib,gpu,mi100,fpga,xilinx,ht
p03-amd02 x86_64,amd,milan,ib,gpu,mi100,fpga,xilinx,noht p03-amd02 x86_64,amd,milan,ib,gpu,mi100,fpga,xilinx,noht
p03-amd01 x86_64,amd,milan,ib,gpu,mi100,fpga,xilinx,ht
p04-edge01 x86_64,intel,broadwell,ib,ht p04-edge01 x86_64,intel,broadwell,ib,ht
p05-synt01 x86_64,amd,milan,ib,ht p05-synt01 x86_64,amd,milan,ib,ht
p06-arm[01-02] aarch64,ib
p07-power01 ppc64le,ib
p08-amd01 x86_64,amd,milan-x,ib,ht
p10-intel01 x86_64,intel,sapphire_rapids,ht
``` ```
``` ```
......