diff --git a/docs.it4i/software/chemistry/orca.md b/docs.it4i/software/chemistry/orca.md
index e6fc8bcf312c779ac39bcedb2ac54309cf5399dc..83386c4d63dd8e55fff1a2e28d5d9c4146a071f2 100644
--- a/docs.it4i/software/chemistry/orca.md
+++ b/docs.it4i/software/chemistry/orca.md
@@ -1,6 +1,3 @@
-!!!warning
-    This page has not been updated yet. The page does not reflect the transition from PBS to Slurm.
-
 # ORCA
 
 ## Introduction
@@ -17,7 +14,8 @@ $ ml av orca
 
 ## Serial Computation With ORCA
 
-You can test a serial computation with this simple input file. Create a file called orca_serial.inp and fill it with the following ORCA commands:
+You can test a serial computation with this simple input file.
+Create a file called `orca_serial.inp` and paste into it the following ORCA commands:
 
 ```bash
     ! HF SVP
@@ -27,25 +25,26 @@ You can test a serial computation with this simple input file. Create a file cal
     *
 ```
 
-Next, create a PBS submission file for Karolina cluster (interactive job can be used too):
+Next, create a Slurm submission file for Karolina cluster (interactive job can be used too):
 
 ```bash
 #!/bin/bash
-#PBS -S /bin/bash
-#PBS -N ORCA_SERIAL
-#PBS -l select=1:ncpus=128
-#PBS -q qexp
-#PBS -A OPEN-0-0
+#SBATCH --job-name=ORCA_SERIAL
+#SBATCH --nodes=1
+#SBATCH --ntasks-per-node=128
+#SBATCH --partition=qexp
+#SBATCH --account=OPEN-0-0
 
 ml ORCA/5.0.1-OpenMPI-4.1.1
-orca orca_serial.inp
+srun orca orca_serial.inp
 ```
 
-Submit the job to the queue and wait before it ends. Then you can find an output log in your working directory:
+Submit the job to the queue.
+After the job ends, you can find an output log in your working directory:
 
 ```console
-$ qsub submit_serial.pbs
-1417552.infra-pbs
+sbatch submit_serial.slurm
+1417552
 
 $ ll ORCA_SERIAL.*
 -rw------- 1 user user     0 Aug 21 12:24 ORCA_SERIAL.e1417552
@@ -82,7 +81,9 @@ TOTAL RUN TIME: 0 days 0 hours 0 minutes 1 seconds 47 msec
 
 ## Running ORCA in Parallel
 
-Your serial computation can be easily converted to parallel. Simply specify the number of parallel processes by the `%pal` directive. In this example, 4 nodes, 128 cores each are used.
+Your serial computation can be easily converted to parallel.
+Simply specify the number of parallel processes by the `%pal` directive.
+In this example, 4 nodes, 128 cores each are used.
 
 !!! warning
     Do not use the `! PAL` directive as only PAL2 to PAL8 is recognized.
@@ -98,28 +99,31 @@ Your serial computation can be easily converted to parallel. Simply specify the
     *
 ```
 
-You also need to edit the previously used PBS submission file. You have to specify number of nodes, cores and MPI-processes to run:
+You also need to edit the previously used Slurm submission file.
+You have to specify number of nodes, cores, and MPI-processes to run:
 
 ```bash
+
 #!/bin/bash
-#PBS -S /bin/bash
-#PBS -N ORCA_PARALLEL
-#PBS -l select=4:ncpus=128:mpiprocs=128
-#PBS -q qexp
-#PBS -A OPEN-0-0
+#SBATCH --job-name=ORCA_PARALLEL
+#SBATCH --nodes=4
+#SBATCH --ntasks-per-node=128
+#SBATCH --partition=qexp
+#SBATCH --account=OPEN-0-0
 
 ml ORCA/5.0.1-OpenMPI-4.1.1
-orca orca_parallel.inp > output.out
+srun orca orca_parallel.inp > output.out
 ```
 
 !!! note
-    When running ORCA in parallel, ORCA should **NOT** be started with `mpirun` (e.g. `mpirun -np 4 orca`, etc.) like many MPI programs and **has to be called with a full pathname**.
+    When running ORCA in parallel, ORCA should **NOT** be started with `mpirun` (e.g. `mpirun -np 4 orca`, etc.)
+    like many MPI programs and **has to be called with a full pathname**.
 
 Submit this job to the queue and see the output file.
 
 ```console
-$ qsub submit_parallel.pbs
-1417598.infra-pbs
+$ srun submit_parallel.slurm
+1417598
 
 $ ll ORCA_PARALLEL.*
 -rw-------  1 user user     0 Aug 21 13:12 ORCA_PARALLEL.e1417598
@@ -159,7 +163,8 @@ $ cat ORCA_PARALLEL.o1417598
 TOTAL RUN TIME: 0 days 0 hours 0 minutes 11 seconds 859 msec
 ```
 
-You can see, that the program was running with 512 parallel MPI-processes. In version 5.0.1, only the following modules are parallelized:
+You can see, that the program was running with 512 parallel MPI-processes.
+In version 5.0.1, only the following modules are parallelized:
 
 * ANOINT
 * CASSCF / NEVPT2
@@ -181,36 +186,38 @@ You can see, that the program was running with 512 parallel MPI-processes. In ve
 
 ## Example Submission Script
 
-The following script contains all of the necessary instructions to run an ORCA job, including copying of the files to and from /scratch to utilize the InfiniBand network:
+The following script contains all of the necessary instructions to run an ORCA job,
+including copying of the files to and from `/scratch` to utilize the InfiniBand network:
 
 ```bash
 #!/bin/bash
-#PBS -S /bin/bash
-#PBS -A OPEN-00-00
-#PBS -N example-CO
-#PBS -q qexp
-#PBS -l select=2:ncpus=128:mpiprocs=128:ompthreads=1
-#PBS -l walltime=00:05:00
+#SBATCH --account=OPEN-00-00
+#SBATCH --job-name=example-CO
+#SBATCH --partition=qexp
+#SBATCH --nodes=2
+#SBATCH --ntasks-per-node=128
+#SBATCH --cpus-per-task=1
+#SBATCH --time=00:05:00
 
 ml purge
 ml ORCA/5.0.1-OpenMPI-4.1.1
 
-echo $PBS_O_WORKDIR
-cd $PBS_O_WORKDIR
+echo $SLURM_O_WORKDIR
+cd $SLURM_O_WORKDIR
 
 # create /scratch dir
-b=$(basename $PBS_O_WORKDIR)
-SCRDIR=/scratch/project/OPEN-00-00/$USER/${b}_${PBS_JOBID}/
+b=$(basename $SLURM_O_WORKDIR)
+SCRDIR=/scratch/project/OPEN-00-00/$USER/${b}_${SLURM_JOBID}/
 echo $SCRDIR
 mkdir -p $SCRDIR
 cd $SCRDIR || exit
 
 # get number of cores used for our job
-ncpus=$(qstat -f $PBS_JOBID | grep resources_used.ncpus | awk '{print $3}')
+ncpus=$(sacct -j 727825 --format=AllocCPUS --noheader | head -1)
 
 
 ### create ORCA input file
-cat > ${PBS_JOBNAME}.inp <<EOF
+cat > ${SLURM_JOBNAME}.inp <<EOF
 ! HF def2-TZVP
 %pal
   nprocs $ncpus
@@ -225,20 +232,21 @@ EOF
 ###
 
 # copy input files to /scratch
-cp -r $PBS_O_WORKDIR/* .
+cp -r $SLURM_O_WORKDIR/* .
 
 # run calculations
-$(which orca) ${PBS_JOBNAME}.inp > $PBS_O_WORKDIR/${PBS_JOBNAME}.out
+$(which orca) ${SLURM_JOBNAME}.inp > $SLURM_O_WORKDIR/${SLURM_JOBNAME}.out
 
 # copy output files to home, delete the rest
-cp * $PBS_O_WORKDIR/ && cd $PBS_O_WORKDIR
+cp * $SLURM_O_WORKDIR/ && cd $SLURM_O_WORKDIR
 rm -rf $SCRDIR
 exit
 ```
 
-## Register as a User
+## Register as User
 
-You are encouraged to register as a user of ORCA [here][a] in order to take advantage of updates, announcements, and the users forum.
+You are encouraged to register as a user of ORCA [here][a]
+in order to take advantage of updates, announcements, and the users forum.
 
 ## Documentation