Skip to content
Snippets Groups Projects
Commit fe47fbbb authored by Ondrej Vysocky's avatar Ondrej Vysocky
Browse files

Taurus modules

parent d9f8520a
No related branches found
No related tags found
No related merge requests found
#!/bin/sh
# SELECT MACHINE ####################################################
MERIC_LIBS_COMMON="-L$HOME/OpenFOAM/meric/lib/ -lmericmpi "
MERIC_LIBS_TAURUS="-lhdeem -lfreeipmi -lcpufreq -lx86_adapt -DWITHREADEX -DUSE_MERIC -DENDKILL "
MERIC_LIBS_SALOMON="-L$HOME/readex/MERIC/x86adaptGCC/x86_adapt/build/ -lx86_adapt -DWITHREADEX -DUSE_MERIC "
export READEX_LIBS=$MERIC_LIBS_COMMON$MERIC_LIBS_TAURUS
#####################################################################
# OpenFoam modules
#module load intel/2016.2.181
#module load intel/2016.1.150
module load boost/1.61.0-intel2017.0.020-intelmpi2017
#module load mpirt/5.1.2.150
module load zlib
module load flex
module load cmake
source ~/OpenFOAM/OpenFOAM-v1612+/etc/bashrc
export WM_NCOMPPROCS=24
export BOOST_ARCH_PATH=$BOOST_ROOT
# Meric modules
module load hdeem
module load papi/5.4.1
export LD_LIBRARY_PATH+=:$HOME/OpenFOAM/meric/lib # meric without openMP !
export LD_LIBRARY_PATH+=:/usr/local/lib #x86adapt
export MERIC_NUM_THREADS=0
export MERIC_FREQUENCY=25
export MERIC_UNCORE_FREQUENCY=30
return # << << << <<
# vim /ThirdParty-v1612+/scotch_6.0.3/src/Makefile.inc
# "-shared-intel" to LDFLAGS
BOOST_INC=/sw/taurus/libraries/boost/1.61.0-intel2017.0.020-intelmpi2017/include
BOOST_ARCH_PATH=/home/h6/vysocky/OpenFOAM/ThirdParty-v1612+/platforms/linux64Icc/boost_1_62_0
BOOST_LIB=/sw/taurus/libraries/boost/1.61.0-intel2017.0.020-intelmpi2017/lib
BOOST_ROOT=/sw/taurus/libraries/boost/1.61.0-intel2017.0.020-intelmpi2017
bash-4.1$ export BOOST_ARCH_PATH=/sw/taurus/libraries/boost/1.61.0-intel2017.0.020-intelmpi2017
################################################################################
# OpenFOAM + Intel
# 1
module load intel/2016.2.181
module load mpirt/5.1.2.150
module load zlib
module load flex
module load cmake
module load boost/1.62.0-intel2016.2.181
# 2
# module load intel/2015.3.187
# module load zlib
# module load flex
# module load cmake
# module load boost/1.61.0-intel2015.3.187-python2.7
# 3
# module load intel/2017.0.020
# module load zlib
# module load flex
# module load cmake
# module load boost/1.61.0-intel2017.0.020-intelmpi2017
#source ~/OpenFOAM/OpenFOAM-4.1/etc/bashrc
export MPI_ARCH_FLAGS="-DOMPI_SKIP_MPICXX"
export MPI_ARCH_INC="-isystem $MPI_ROOT/include"
export MPI_ARCH_LIBS="-L$MPI_ROOT/lib -lmpi"
source ~/OpenFOAM/OpenFOAM-v1612+/etc/bashrc
export WM_CXX=mpiicpc
exit 0
#################################################################### instalace #
srun -N 1 --tasks-per-node=1 --cpus-per-task=24 --exclusive -p haswell --x11 --pty bash
source ~/OpenFOAM/OpenFOAM-v1612+/etc/bashrc
export WM_NCOMPPROCS=22
printenv | grep icpc # kdyz nevis, tak vsude nastav mpiicpc
export WM_CXX=mpiicpc
# export MPICXX_CXX=mpiicpc
# export MPI_ROOT=$I_MPI_ROOT
# vim $WM_PROJECT_DIR/etc/config/settings.sh
foam
vim etc/bashrc
# export WM_COMPILER=Icc
# export WM_MPLIB=SYSTEMMPI
vim wmake/rules/linux64Icc/c++
# CC = mpiicpc -std=c++11 -fp-trap=common -fp-model precise
./Allwmake
################################################################################
# post-compilation test
foamInstallationTest
mkdir -p $FOAM_RUN
run
cp -r $FOAM_TUTORIALS/incompressible/simpleFoam/pitzDaily ./
cd pitzDaily
blockMesh
simpleFoam
# paraFoam
################################################################################
# run example:
/home/vysocky/OpenFOAM/OpenFOAM-v1612+/tutorials/incompressible/icoFoam/cavity/cavity
mpirun -n 4 icoFoam -parallel
system/blockMeshDict - velikost problemu
system/decomposeParDict - dekompozice problemu
mpirun -n 4 --bind-to-none icoFoam -parallel
################################################################################
# zdrojaky
# icoFoam
vim applications/solvers/incompressible/icoFoam/icoFoam.C
vim tutorials/incompressible/icoFoam/cavity/cavity/system/fvSolution
# solvers
vim src/OpenFOAM/matrices/lduMatrix/solvers/PCG/PCG.C
vim src/OpenFOAM/matrices/lduMatrix/solvers/PBiCG/PBiCG.C
# matrix
vim src/OpenFOAM/matrices/lduMatrix/lduMatrix/lduMatrix.C
vim src/OpenFOAM/matrices/lduMatrix/lduMatrix/lduMatrixATmul.C
# simpleFoam
vim applications/solvers/incompressible/simpleFoam/UEqn.H
vim applications/solvers/incompressible/simpleFoam/pEqn.H
vim applications/solvers/incompressible/simpleFoam/simpleFoam.C
vim applications/solvers/incompressible/simpleFoam/Make/options # nastaveni prekladu
# pitzDaily
vim tutorials/incompressible/simpleFoam/pitzDaily/system/fvSolution
vim tutorials/incompressible/simpleFoam/pitzDaily/system/controlDict
vim tutorials/incompressible/simpleFoam/pitzDaily/system/blockMeshDict
# motorBike
vim tutorials/incompressible/simpleFoam/motorBike/system/fvSolution
################################## motorBike
- GAMG
- PBiCG + DILU
mpiicpc -std=c++11 -fp-trap=common -fp-model precise -DOPENFOAM_PLUS=1612 -Dlinux64 -DWM_ARCH_OPTION=64 -DWM_DP -DWM_LABEL_SIZE=32 -Wall -Wextra -Wnon-virtual-dtor -Wno-unused-parameter -Wno-invalid-offsetof -diag-disable 327,654,1125,2289,2304,11062,11074,11076 -O3 -DNoRepository -I/home/h6/vysocky/OpenFOAM/OpenFOAM-v1612+/src/TurbulenceModels/turbulenceModels/lnInclude -I/home/h6/vysocky/OpenFOAM/OpenFOAM-v1612+/src/TurbulenceModels/incompressible/lnInclude -I/home/h6/vysocky/OpenFOAM/OpenFOAM-v1612+/src/transportModels -I/home/h6/vysocky/OpenFOAM/OpenFOAM-v1612+/src/transportModels/incompressible/singlePhaseTransportModel -I/home/h6/vysocky/OpenFOAM/OpenFOAM-v1612+/src/finiteVolume/lnInclude -I/home/h6/vysocky/OpenFOAM/OpenFOAM-v1612+/src/meshTools/lnInclude -I/home/h6/vysocky/OpenFOAM/OpenFOAM-v1612+/src/sampling/lnInclude -I/home/vysocky/meric/include -IlnInclude -I. -I/home/h6/vysocky/OpenFOAM/OpenFOAM-v1612+/src/OpenFOAM/lnInclude -I/home/h6/vysocky/OpenFOAM/OpenFOAM-v1612+/src/OSspecific/POSIX/lnInclude -fPIC -c simpleFoam.C -o /home/h6/vysocky/OpenFOAM/OpenFOAM-v1612+/platforms/linux64IccDPInt32Opt/applications/solvers/incompressible/simpleFoam/simpleFoam.o
################################################################################
# OpenFOAM + gcc
module load gcc/6.2.0
module load zlib
module load flex
module load cmake
module load boost/1.62.0-gnu6.2
export WM_NCOMPPROCS=22
source ~/OpenFOAM/OpenFOAM-v1612+/etc/bashrc
module load boost/1.62.0-gnu6.2
# gcc/6.2.0
# boost/1.62.0-gnu6.2
# bullxmpi/1.2.8.4
module load zlib
module load flex
module load cmake
module load hdeem
################################################################################
openfoam/2.2.2
1) intel/2016.1.150 2) bullxmpi/1.2.8.4 3) gcc/5.3.0 4) openfoam/2.2.2
openfoam/2.3.0(default)
1) intel/2013 2) bullxmpi/1.2.8.4 3) openfoam/2.3.0
openfoam/2.4.0
1) intel/2013 2) bullxmpi/1.2.8.4 3) openfoam/2.4.0
openfoam/4.0
1) intel/2017.0.020 2) intelmpi/2017.0.098 3) openfoam/4.0
# MPI ERROR ####################################################################
bash-4.1$ mpirun -n 4 icoFoam -parallel
--------------------------------------------------------------------------
Not enough processors were found on the local host to meet the requested
binding action:
Local host: taurusi5217
Action requested: bind-to-core
Application name: /home/h6/vysocky/OpenFOAM/OpenFOAM-v1612+/platforms/linux64GccDPInt32Opt/bin/icoFoam
Please revise the request and try again.
--------------------------------------------------------------------------
--------------------------------------------------------------------------
mpirun was unable to start the specified application as it encountered an error
on node taurusi5217. More information may be available above.
--------------------------------------------------------------------------
4 total processes failed to start
################################################################################
bash-4.1$ srun icoFoam -parallel
--------------------------------------------------------------------------
It looks like orte_init failed for some reason; your parallel process is
likely to abort. There are many reasons that a parallel process can
fail during orte_init; some of which are due to configuration or
environment problems. This failure appears to be an internal failure;
here's some additional information (which may only be relevant to an
Open MPI developer):
PMI2_Job_GetId failed failed
--> Returned value (null) (14) instead of ORTE_SUCCESS
--------------------------------------------------------------------------
--------------------------------------------------------------------------
It looks like orte_init failed for some reason; your parallel process is
likely to abort. There are many reasons that a parallel process can
fail during orte_init; some of which are due to configuration or
environment problems. This failure appears to be an internal failure;
here's some additional information (which may only be relevant to an
Open MPI developer):
PMI2_Job_GetId failed failed
--> Returned value (null) (14) instead of ORTE_SUCCESS
--------------------------------------------------------------------------
--------------------------------------------------------------------------
It looks like orte_init failed for some reason; your parallel process is
likely to abort. There are many reasons that a parallel process can
fail during orte_init; some of which are due to configuration or
environment problems. This failure appears to be an internal failure;
here's some additional information (which may only be relevant to an
Open MPI developer):
PMI2_Job_GetId failed failed
--> Returned value (null) (14) instead of ORTE_SUCCESS
--------------------------------------------------------------------------
--------------------------------------------------------------------------
It looks like orte_init failed for some reason; your parallel process is
likely to abort. There are many reasons that a parallel process can
fail during orte_init; some of which are due to configuration or
environment problems. This failure appears to be an internal failure;
here's some additional information (which may only be relevant to an
Open MPI developer):
PMI2_Job_GetId failed failed
--> Returned value (null) (14) instead of ORTE_SUCCESS
--------------------------------------------------------------------------
--------------------------------------------------------------------------
It looks like orte_init failed for some reason; your parallel process is
likely to abort. There are many reasons that a parallel process can
fail during orte_init; some of which are due to configuration or
environment problems. This failure appears to be an internal failure;
here's some additional information (which may only be relevant to an
Open MPI developer):
orte_ess_init failed
--> Returned value (null) (14) instead of ORTE_SUCCESS
--------------------------------------------------------------------------
--------------------------------------------------------------------------
It looks like orte_init failed for some reason; your parallel process is
likely to abort. There are many reasons that a parallel process can
fail during orte_init; some of which are due to configuration or
environment problems. This failure appears to be an internal failure;
here's some additional information (which may only be relevant to an
Open MPI developer):
orte_ess_init failed
--> Returned value (null) (14) instead of ORTE_SUCCESS
--------------------------------------------------------------------------
--------------------------------------------------------------------------
It looks like orte_init failed for some reason; your parallel process is
likely to abort. There are many reasons that a parallel process can
fail during orte_init; some of which are due to configuration or
environment problems. This failure appears to be an internal failure;
here's some additional information (which may only be relevant to an
Open MPI developer):
orte_ess_init failed
--> Returned value (null) (14) instead of ORTE_SUCCESS
--------------------------------------------------------------------------
--------------------------------------------------------------------------
It looks like orte_init failed for some reason; your parallel process is
likely to abort. There are many reasons that a parallel process can
fail during orte_init; some of which are due to configuration or
environment problems. This failure appears to be an internal failure;
here's some additional information (which may only be relevant to an
Open MPI developer):
orte_ess_init failed
--> Returned value (null) (14) instead of ORTE_SUCCESS
--------------------------------------------------------------------------
--------------------------------------------------------------------------
It looks like MPI_INIT failed for some reason; your parallel process is
likely to abort. There are many reasons that a parallel process can
fail during MPI_INIT; some of which are due to configuration or environment
problems. This failure appears to be an internal failure; here's some
additional information (which may only be relevant to an Open MPI
developer):
ompi_mpi_init: ompi_rte_init failed
--> Returned "(null)" (14) instead of "Success" (0)
--------------------------------------------------------------------------
*** An error occurred in MPI_Init
*** on a NULL communicator
*** MPI_ERRORS_ARE_FATAL (processes in this communicator will now abort,
*** and potentially your MPI job)
--------------------------------------------------------------------------
It looks like MPI_INIT failed for some reason; your parallel process is
likely to abort. There are many reasons that a parallel process can
fail during MPI_INIT; some of which are due to configuration or environment
problems. This failure appears to be an internal failure; here's some
additional information (which may only be relevant to an Open MPI
developer):
ompi_mpi_init: ompi_rte_init failed
--> Returned "(null)" (14) instead of "Success" (0)
--------------------------------------------------------------------------
[taurusi4160:10295] Local abort before MPI_INIT completed successfully; not able to aggregate error messages, and not able to guarantee that all other processes were killed!
--------------------------------------------------------------------------
It looks like MPI_INIT failed for some reason; your parallel process is
likely to abort. There are many reasons that a parallel process can
fail during MPI_INIT; some of which are due to configuration or environment
problems. This failure appears to be an internal failure; here's some
additional information (which may only be relevant to an Open MPI
developer):
ompi_mpi_init: ompi_rte_init failed
--> Returned "(null)" (14) instead of "Success" (0)
--------------------------------------------------------------------------
*** An error occurred in MPI_Init
*** on a NULL communicator
*** MPI_ERRORS_ARE_FATAL (processes in this communicator will now abort,
*** and potentially your MPI job)
[taurusi4160:10298] Local abort before MPI_INIT completed successfully; not able to aggregate error messages, and not able to guarantee that all other processes were killed!
*** An error occurred in MPI_Init
*** on a NULL communicator
*** MPI_ERRORS_ARE_FATAL (processes in this communicator will now abort,
*** and potentially your MPI job)
[taurusi4160:10296] Local abort before MPI_INIT completed successfully; not able to aggregate error messages, and not able to guarantee that all other processes were killed!
--------------------------------------------------------------------------
It looks like MPI_INIT failed for some reason; your parallel process is
likely to abort. There are many reasons that a parallel process can
fail during MPI_INIT; some of which are due to configuration or environment
problems. This failure appears to be an internal failure; here's some
additional information (which may only be relevant to an Open MPI
developer):
ompi_mpi_init: ompi_rte_init failed
--> Returned "(null)" (14) instead of "Success" (0)
--------------------------------------------------------------------------
*** An error occurred in MPI_Init
*** on a NULL communicator
*** MPI_ERRORS_ARE_FATAL (processes in this communicator will now abort,
*** and potentially your MPI job)
[taurusi4160:10297] Local abort before MPI_INIT completed successfully; not able to aggregate error messages, and not able to guarantee that all other processes were killed!
srun: error: taurusi4160: tasks 0-3: Exited with exit code 1
boost/1.49
boost/1.54.0-gnu4.6(default)
boost/1.54.0-gnu4.6.2
boost/1.54.0-intel12.1
boost/1.54.0-pgi13.6
boost/1.55.0-gnu4.8
boost/1.55.0-pgi14.1
boost/1.56.0-gnu4.9.1
boost/1.56.0-intel2015.3.187-python2.7
boost/1.57.0-intel2013-sp1
boost/1.58.0-gnu5.1
boost/1.59.0-intel2015.3.187
boost/1.60.0
boost/1.60.0-gnu5.3
boost/1.60.0-gnu5.3-intelmpi5.1
boost/1.60.0-intel2016.1.150
boost/1.60.0-intel2016.2.181-intelmpi5.1-knc
boost/1.61.0-intel2015.3.187-python2.7
boost/1.61.0-intel2017.0.020-intelmpi2017
boost/1.62.0-gnu6.2
boost/1.62.0-intel2016.2.181
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment