From 09e33d9b112319178e51ebf314433c8ceb269f96 Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Roman=20Sl=C3=ADva?= <roman.sliva@vsb.cz>
Date: Mon, 4 Sep 2023 15:23:48 +0200
Subject: [PATCH] Update karolina-slurm.md

---
 docs.it4i/general/karolina-slurm.md | 49 ++++++++++++++++++++++++-----
 1 file changed, 42 insertions(+), 7 deletions(-)

diff --git a/docs.it4i/general/karolina-slurm.md b/docs.it4i/general/karolina-slurm.md
index 60fd828ec..ca15d616f 100644
--- a/docs.it4i/general/karolina-slurm.md
+++ b/docs.it4i/general/karolina-slurm.md
@@ -54,9 +54,10 @@ There is no need to specify the number of cores and memory size.
 ## Using GPU Queues
 
 Access [GPU accelerated nodes][5].
-Every GPU accelerated node is divided into 8 fractions, one fraction per GPU.
-By default only one fraction i.e. 1/8 of the node - one GPU and corresponding CPU cores and memory is allocated.
+Every GPU accelerated node is divided into eight parts, each part contains one GPU.
+By default only one part i.e. 1/8 of the node - one GPU and corresponding CPU cores and memory is allocated.
 There is no need to specify the number of cores and memory size, on the contrary, it is undesirable.
+There are emloyed some restrictions which aim to provide fair division and efficient use of resources.
 
 ```console
 #!/usr/bin/bash
@@ -68,12 +69,46 @@ There is no need to specify the number of cores and memory size, on the contrary
 ```
 
 To allocate more GPUs use `--gpus` option.
+The default behavior is to allocate enough nodes to satisfy the requested resources as expressed by --gpus option and without delaying the initiation of the job.
+
+Following code requests four gpus, scheduler can allocate one to four nodes depending on cluster state to fulfil the request.
+
+```console
+#SBATCH --gpus 4
+```
+
+Following code requests 16 gpus, scheduler can allocate one to sixteen nodes depending on cluster state to fulfil the request.
+
+```console
+#SBATCH --gpus 16
+```
+
+To allocate GPUs within one node you have to specify `--nodes` option.
+
+Following code requests four gpus on exactly one node.
 
 ```console
 #SBATCH --gpus 4
+#SBATCH --nodes 1
+```
+
+Following code requests 16 gpus on exactly two nodes.
+
+```console
+#SBATCH --gpus 16
+#SBATCH --nodes 2
+```
+
+Alternatively you can use `--gpus-per-node` option. Only value 8 is allowed for multi-node allocation to prevent fragmenting nodes.
+
+Following code requests 16 gpus on exactly two nodes.
+
+```console
+#SBATCH --gpus-per-node 8
+#SBATCH --nodes 2
 ```
 
-To allocate whole GPU accelerated node use `--exclusive` option
+To allocate whole GPU accelerated node you can also use `--exclusive` option
 
 ```console
 #SBATCH --exclusive
@@ -82,8 +117,8 @@ To allocate whole GPU accelerated node use `--exclusive` option
 ## Using Fat Queue
 
 Access [data analytics aka fat node][6].
-Fat node is divided into 32 fractions, one fraction per CPU.
-By default only one fraction i.e. 1/32 of the node - one CPU and corresponding memory is allocated.
+Fat node is divided into 32 parts, one part per CPU.
+By default only one part i.e. 1/32 of the node - one CPU and corresponding memory is allocated.
 
 To allocate requested memory use `--mem` option.
 Corresponding CPUs wil be allocated. Fat node has about 23TB of memory available for jobs.
@@ -107,8 +142,8 @@ To allocate whole fat node use `--exclusive` option
 ## Using Viz Queue
 
 Access [visualisation node][7].
-Every visualisation node is divided into eight fractions.
-By default only one fraction i.e. 1/8 of the node is allocated.
+Every visualisation node is divided into eight parts.
+By default only one part i.e. 1/8 of the node is allocated.
 
 ```console
 $ salloc -A PROJECT-ID -p qviz
-- 
GitLab