diff --git a/docs.it4i/software/numerical-languages/r.md b/docs.it4i/software/numerical-languages/r.md
index a562fa5ae4d47a24d1b46afa0497612c413bd9bb..c6caf43db2a9db44226c44cabc630846e5a68350 100644
--- a/docs.it4i/software/numerical-languages/r.md
+++ b/docs.it4i/software/numerical-languages/r.md
@@ -1,6 +1,3 @@
-!!!warning
-    This page has not been updated yet. The page does not reflect the transition from PBS to Slurm.
-
 # R
 
 ## Introduction
@@ -42,7 +39,7 @@ $ rstudio
 
 ### Batch Execution
 
-To run R in batch mode, write an R script, then write a bash jobscript and execute via the `qsub` command. By default, R will use 24 threads on Salomon when running MKL kernels.
+To run R in batch mode, write an R script, then write a bash jobscript and execute via the `sbatch` command. By default, R will use 24 threads on Salomon when running MKL kernels.
 
 Example jobscript:
 
@@ -50,12 +47,12 @@ Example jobscript:
 #!/bin/bash
 
 # change to local scratch directory
-DIR=/scratch/project/PROJECT_ID/$PBS_JOBID
+DIR=/scratch/project/PROJECT_ID/$SLURM_JOBID
 mkdir -p "$DIR"
 cd "$DIR" || exit
 
 # copy input file to scratch
-cp $PBS_O_WORKDIR/rscript.R .
+cp $SLURM_SUBMIT_DIR/rscript.R .
 
 # load R module
 ml R
@@ -64,13 +61,14 @@ ml R
 R CMD BATCH rscript.R routput.out
 
 # copy output file to home
-cp routput.out $PBS_O_WORKDIR/.
+cp routput.out $SLURM_SUBMIT_DIR/.
 
 #exit
 exit
 ```
 
-This script may be submitted directly to the PBS workload manager via the `qsub` command. The inputs are in the rscript.R file, the outputs in the routput.out file. See the single node jobscript example in the [Job execution section][1].
+The inputs are in the `rscript.R` file, the outputs in the `routput.out` file.
+See the single node jobscript example in the [Job execution section][1].
 
 ## Parallel R
 
@@ -363,23 +361,23 @@ Currently, the two packages cannot be combined for hybrid calculations.
 
 ## Parallel Execution
 
-R parallel jobs are executed via the PBS queue system exactly as any other parallel jobs. The user must create an appropriate jobscript and submit it via `qsub`
+R parallel jobs are executed via the SLURM partition system exactly as any other parallel jobs. The user must create an appropriate jobscript and submit it via `sbatch`
 
 An example jobscript for [static Rmpi][4] parallel R execution, running 1 process per core:
 
 ```bash
 #!/bin/bash
-#PBS -q qprod
-#PBS -N Rjob
-#PBS -l select=100:ncpus=24:mpiprocs=24:ompthreads=1
+#SBATCH -q qprod
+#SBATCH -N Rjob
+#SBATCH --nodes=100 --ntasks-per-node=24 --cpus-per-task=1
 
 # change to scratch directory
-DIR=/scratch/project/PROJECT_ID/$PBS_JOBID
+DIR=/scratch/project/PROJECT_ID/$SLURM_JOBID
 mkdir -p "$DIR"
 cd "$DIR" || exit
 
 # copy input file to scratch
-cp $PBS_O_WORKDIR/rscript.R .
+cp $SLURM_SUBMIT_DIR/rscript.R .
 
 # load R and openmpi module
 ml R OpenMPI
@@ -388,7 +386,7 @@ ml R OpenMPI
 mpirun -bycore -bind-to-core R --slave --no-save --no-restore -f rscript.R
 
 # copy output file to home
-cp routput.out $PBS_O_WORKDIR/.
+cp routput.out $SLURM_SUBMIT_DIR/.
 
 #exit
 exit