From 36d02f60a40190d711df0e4818aecc2f40f1b827 Mon Sep 17 00:00:00 2001 From: easybuild <you@example.com> Date: Thu, 8 Jun 2023 14:52:19 +0200 Subject: [PATCH] fix --- easyblocks/a/ansysem.py | 111 ++++ easyblocks/a/aomp.py | 195 +++++++ easyblocks/c/cp2k.py | 970 ++++++++++++++++++++++++++++++++ easyblocks/i/imod.py | 95 ++++ easyblocks/n/nvhpc.py | 312 ++++++++++ easyblocks/o/openfoam.py | 556 ++++++++++++++++++ easyblocks/q/quantumespresso.py | 526 +++++++++++++++++ easyblocks/r/relion.py | 125 ++++ easyblocks/w/wien2k.py | 249 +++++--- 9 files changed, 3048 insertions(+), 91 deletions(-) create mode 100644 easyblocks/a/ansysem.py create mode 100644 easyblocks/a/aomp.py create mode 100644 easyblocks/c/cp2k.py create mode 100644 easyblocks/i/imod.py create mode 100644 easyblocks/n/nvhpc.py create mode 100644 easyblocks/o/openfoam.py create mode 100644 easyblocks/q/quantumespresso.py create mode 100644 easyblocks/r/relion.py diff --git a/easyblocks/a/ansysem.py b/easyblocks/a/ansysem.py new file mode 100644 index 0000000..59e7a48 --- /dev/null +++ b/easyblocks/a/ansysem.py @@ -0,0 +1,111 @@ +## +# Copyright 2009-2020 Ghent University +# +# This file is part of EasyBuild, +# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), +# with support of Ghent University (http://ugent.be/hpc), +# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), +# Flemish Research Foundation (FWO) (http://www.fwo.be/en) +# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). +# +# https://github.com/easybuilders/easybuild +# +# EasyBuild is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation v2. +# +# EasyBuild is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. +## +""" +EasyBuild support for installing ANSYS Eletromagnetics + +@author: Alexi Rivera (Chalmers University of Technology) +@author: Mikael OEhman (Chalmers University of Technology) +""" +import os +import glob + +from easybuild.easyblocks.generic.packedbinary import PackedBinary +from easybuild.tools.build_log import EasyBuildError +from easybuild.tools.run import run_cmd + + +class EB_ANSYSEM(PackedBinary): + """Support for installing Ansys Electromagnetics.""" + + def __init__(self, *args, **kwargs): + """Initialize Ansys Electromagnetics specific variables.""" + super(EB_ANSYSEM, self).__init__(*args, **kwargs) + self.replayfile = None + + def configure_step(self): + """Configure Ansys Electromagnetics installation.""" + licserv = os.getenv('EB_ANSYS_EM_LICENSE_SERVER') + licport = os.getenv('EB_ANSYS_EM_LICENSE_SERVER_PORT') + licservers = ['', '', ''] + licservs = licserv.split(',') + servercount = len(licservs) + licservers[:servercount] = licservs + try: + self.replayfile = os.path.join(self.builddir, "installer.properties") + txt = '\n'.join([ + "-W Agree.selection=1", + "-P installLocation=\"%s\"" % self.installdir, + "-W TempDirectory.tempPath=\"/tmp\"", + "-W TempDirectory.ChangeTempPermission=\"0\"", + "-W LibraryOption.libraryOption=0", + "-W LibraryOption.libraryPath=\"\"", + "-W LicenseOption.licenseOption=2", + "-W LicenseOption.licenseFileName=\"\"", + "-W LicenseOption.serverCount=%s" % servercount, + "-W LicenseOption.serverName1=\"%s\"" % licservers[0], + "-W LicenseOption.serverName2=\"%s\"" % licservers[1], + "-W LicenseOption.serverName3=\"%s\"" % licservers[2], + "-W LicenseOption.tcpPort=%s" % licport, + ]) + with open(self.replayfile, "w") as f: + f.write(txt) + except IOError as err: + raise EasyBuildError("Failed to create install properties file used for replaying installation: %s", err) + + def install_step(self): + """Install Ansys Electromagnetics using its setup tool.""" + cmd = "./Linux/AnsysEM/disk1/setup.exe -options \"%s\" -silent" % (self.replayfile) + run_cmd(cmd, log_all=True, simple=True) + + def make_module_extra(self): + """Extra module entries for Ansys Electromagnetics.""" + idirs = glob.glob(os.path.join(self.installdir, 'AnsysEM*/Linux*/')) + if len(idirs) == 1: + subdir = os.path.relpath(idirs[0], self.installdir) + else: + raise EasyBuildError("Failed to locate single install subdirectory AnsysEM*/Linux*/") + + txt = super(EB_ANSYSEM, self).make_module_extra() + txt += self.module_generator.prepend_paths('PATH', subdir) + # Not sure if these are needed; + # txt += self.module_generator.prepend_paths('LD_LIBRARY_PATH', + # [os.path.join(ansysdir, 'mainwin540', 'Linux64', 'mw', 'lib-amd64_linux_optimized')]) + # txt += self.module_generator.prepend_paths('LIBRARY_PATH', + # [os.path.join('ansysdir', 'mainwin540', 'Linux64', 'mw', 'lib-amd64_linux_optimized')]) + return txt + + def sanity_check_step(self): + """Custom sanity check for Ansys Electromagnetics.""" + idirs = glob.glob(os.path.join(self.installdir, 'AnsysEM*/Linux*/')) + if len(idirs) == 1: + subdir = os.path.relpath(idirs[0], self.installdir) + else: + raise EasyBuildError("Failed to locate single install subdirectory AnsysEM*/Linux*/") + + custom_paths = { + 'files': [os.path.join(subdir, 'ansysedt')], + 'dirs': [subdir], + } + super(EB_ANSYSEM, self).sanity_check_step(custom_paths=custom_paths) diff --git a/easyblocks/a/aomp.py b/easyblocks/a/aomp.py new file mode 100644 index 0000000..7dd557c --- /dev/null +++ b/easyblocks/a/aomp.py @@ -0,0 +1,195 @@ +## +# Copyright 2021-2023 Ghent University +# +# This file is part of EasyBuild, +# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), +# with support of Ghent University (http://ugent.be/hpc), +# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), +# Flemish Research Foundation (FWO) (http://www.fwo.be/en) +# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). +# +# https://github.com/easybuilders/easybuild +# +# EasyBuild is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation v2. +# +# EasyBuild is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. +## +""" +Support for building and installing AOMP - AMD OpenMP compiler, implemented as +an EasyBlock + +@author: Jorgen Nordmoen (University Center for Information Technology - UiO) +""" +import os + +from easybuild.easyblocks.generic.binary import Binary +from easybuild.framework.easyconfig import CUSTOM +from easybuild.tools.build_log import EasyBuildError, print_warning +from easybuild.tools.config import build_option +from easybuild.tools.filetools import move_file, remove_file +from easybuild.tools.modules import get_software_root +from easybuild.tools.systemtools import AARCH64, POWER, X86_64 +from easybuild.tools.systemtools import get_cpu_architecture, get_shared_lib_ext + +AOMP_ALL_COMPONENTS = ['roct', 'rocr', 'project', 'libdevice', 'openmp', + 'extras', 'pgmath', 'flang', 'flang_runtime', 'comgr', + 'rocminfo', 'vdi', 'hipvdi', 'ocl', 'rocdbgapi', + 'rocgdb', 'roctracer', 'rocprofiler'] +AOMP_DEFAULT_COMPONENTS = ['roct', 'rocr', 'project', 'libdevice', 'openmp', + 'extras', 'pgmath', 'flang', 'flang_runtime', + 'comgr', 'rocminfo'] +AOMP_X86_COMPONENTS = ['vdi', 'hipvdi', 'ocl'] +AOMP_DBG_COMPONENTS = ['rocdbgapi', 'rocgdb'] +AOMP_PROF_COMPONENTS = ['roctracer', 'rocprofiler'] + + +class EB_AOMP(Binary): + """Support for installing AOMP""" + + @staticmethod + def extra_options(): + extra_vars = Binary.extra_options() + extra_vars.update({ + 'components': [None, "AOMP components to build. Possible components: " + + ', '.join(AOMP_ALL_COMPONENTS), CUSTOM], + }) + return extra_vars + + def __init__(self, *args, **kwargs): + """Initialize custom class variables for Clang.""" + super(EB_AOMP, self).__init__(*args, **kwargs) + self.cfg['extract_sources'] = True + self.cfg['dontcreateinstalldir'] = True + + def configure_step(self): + """Configure AOMP build and let 'Binary' install""" + # Setup install command + self.cfg['install_cmd'] = './aomp/bin/build_aomp.sh' + # Setup 'preinstallopts' + version_major = self.version.split('-')[0] + install_options = [ + 'AOMP={!s}'.format(self.installdir), + 'AOMP_REPOS="{!s}/aomp{!s}"'.format(self.builddir, version_major), + 'AOMP_CMAKE={!s}/bin/cmake'.format(get_software_root('CMake')), + 'AOMP_CHECK_GIT_BRANCH=0', + 'AOMP_APPLY_ROCM_PATCHES=0', + 'AOMP_STANDALONE_BUILD=1', + ] + if self.cfg['parallel']: + install_options.append( + 'NUM_THREADS={!s}'.format(self.cfg['parallel'])) + else: + install_options.append('NUM_THREADS=1') + # Check if CUDA is loaded and alternatively build CUDA backend + if get_software_root('CUDA') or get_software_root('CUDAcore'): + cuda_root = get_software_root('CUDA') or get_software_root('CUDAcore') + install_options.append('AOMP_BUILD_CUDA=1') + install_options.append('CUDA="{!s}"'.format(cuda_root)) + # list of CUDA compute capabilities to use can be specifed in two ways (where (2) overrules (1)): + # (1) in the easyconfig file, via the custom cuda_compute_capabilities; + # (2) in the EasyBuild configuration, via --cuda-compute-capabilities configuration option; + ec_cuda_cc = self.cfg['cuda_compute_capabilities'] + cfg_cuda_cc = build_option('cuda_compute_capabilities') + cuda_cc = cfg_cuda_cc or ec_cuda_cc or [] + if cfg_cuda_cc and ec_cuda_cc: + warning_msg = "cuda_compute_capabilities specified in easyconfig (%s) are overruled by " % ec_cuda_cc + warning_msg += "--cuda-compute-capabilities configuration option (%s)" % cfg_cuda_cc + print_warning(warning_msg) + if not cuda_cc: + raise EasyBuildError("CUDA module was loaded, " + "indicating a build with CUDA, " + "but no CUDA compute capability " + "was specified!") + # Convert '7.0' to '70' format + cuda_cc = [cc.replace('.', '') for cc in cuda_cc] + cuda_str = ",".join(cuda_cc) + install_options.append('NVPTXGPUS="{!s}"'.format(cuda_str)) + else: + # Explicitly disable CUDA + install_options.append('AOMP_BUILD_CUDA=0') + # Combine install instructions above into 'preinstallopts' + self.cfg['preinstallopts'] = ' '.join(install_options) + # Setup components for install + components = self.cfg.get('components', None) + # If no components were defined we use the default + if not components: + components = AOMP_DEFAULT_COMPONENTS + # NOTE: The following has not been tested properly and is therefore + # removed + # + # Add X86 components if correct architecture + # if get_cpu_architecture() == X86_64: + # components.extend(AOMP_X86_COMPONENTS) + # Only build selected components + self.cfg['installopts'] = 'select ' + ' '.join(components) + + def post_install_step(self): + super(EB_AOMP, self).post_install_step() + # The install script will create a symbolic link as the install + # directory, this creates problems for EB as it won't remove the + # symlink. To remedy this we remove the link here and rename the actual + # install directory created by the AOMP install script + if os.path.islink(self.installdir): + remove_file(self.installdir) + else: + err_str = "Expected '{!s}' to be a symbolic link" \ + " that needed to be removed, but it wasn't!" + raise EasyBuildError(err_str.format(self.installdir)) + # Move the actual directory containing the install + install_name = '{!s}_{!s}'.format(os.path.basename(self.installdir), + self.version) + actual_install = os.path.join(os.path.dirname(self.installdir), + install_name) + if os.path.exists(actual_install) and os.path.isdir(actual_install): + move_file(actual_install, self.installdir) + else: + err_str = "Tried to move '{!s}' to '{!s}', " \ + " but it either doesn't exist" \ + " or isn't a directory!" + raise EasyBuildError(err_str.format(actual_install, + self.installdir)) + + def sanity_check_step(self): + """Custom sanity check for AOMP""" + shlib_ext = get_shared_lib_ext() + arch = get_cpu_architecture() + # Check architecture explicitly since Clang uses potentially + # different names + arch_map = { + X86_64: 'x86_64', + POWER: 'ppc64', + AARCH64: 'aarch64', + } + + if arch in arch_map: + arch = arch_map[arch] + else: + print_warning("Unknown CPU architecture (%s) for OpenMP offloading!" % arch) + custom_paths = { + 'files': [ + "amdgcn/bitcode/hip.bc", "amdgcn/bitcode/opencl.bc", "bin/aompcc", + "bin/aompversion", "bin/clang", "bin/flang", "bin/ld.lld", "bin/llvm-config", + "bin/mygpu", "bin/opt", "bin/rocminfo", "include/amd_comgr.h", + "include/hsa/amd_hsa_common.h", "include/hsa/hsa.h", "include/omp.h", + "include/omp_lib.h", "lib/libclang.%s" % shlib_ext, "lib/libflang.%s" % shlib_ext, + "lib/libomp.%s" % shlib_ext, "lib/libomptarget.rtl.amdgpu.%s" % shlib_ext, + "lib/libomptarget.rtl.%s.%s" % (arch, shlib_ext), "lib/libomptarget.%s" % shlib_ext, + ], + 'dirs': ["amdgcn", "include/clang", "include/hsa", "include/llvm"], + } + # If we are building with CUDA support we need to check if it was built properly + if get_software_root('CUDA') or get_software_root('CUDAcore'): + custom_paths['files'].append("lib/libomptarget.rtl.cuda.%s" % shlib_ext) + custom_commands = [ + 'aompcc --help', 'clang --help', 'clang++ --help', 'flang --help', + 'llvm-config --cxxflags', + ] + super(EB_AOMP, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands) diff --git a/easyblocks/c/cp2k.py b/easyblocks/c/cp2k.py new file mode 100644 index 0000000..1adfd2f --- /dev/null +++ b/easyblocks/c/cp2k.py @@ -0,0 +1,970 @@ +# IT4Innovations 2022 +# JK +# EasyBlock for CP2K CUDA support +## +# Copyright 2009-2021 Ghent University +# +# This file is part of EasyBuild, +# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), +# with support of Ghent University (http://ugent.be/hpc), +# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), +# Flemish Research Foundation (FWO) (http://www.fwo.be/en) +# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). +# +# https://github.com/easybuilders/easybuild +# +# EasyBuild is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation v2. +# +# EasyBuild is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. +## +""" +EasyBuild support for building and installing CP2K, implemented as an easyblock + +@author: Stijn De Weirdt (Ghent University) +@author: Dries Verdegem (Ghent University) +@author: Kenneth Hoste (Ghent University) +@author: Pieter De Baets (Ghent University) +@author: Jens Timmerman (Ghent University) +@author: Ward Poelmans (Ghent University) +@author: Luca Marsella (CSCS) +@author: Damian Alvarez (Forschungszentrum Juelich GmbH) +@author: Alan O'Cais (Forschungszentrum Juelich GmbH) +@author: Balazs Hajgato (Free University Brussels (VUB)) +""" + +import fileinput +import glob +import re +import os +import sys +from distutils.version import LooseVersion + +import easybuild.tools.toolchain as toolchain +from easybuild.framework.easyblock import EasyBlock +from easybuild.framework.easyconfig import CUSTOM +from easybuild.tools.build_log import EasyBuildError +from easybuild.tools.environment import setvar +from easybuild.tools.filetools import change_dir, copy_dir, copy_file, mkdir, write_file +from easybuild.tools.config import build_option +from easybuild.tools.modules import get_software_root, get_software_version +from easybuild.tools.run import run_cmd +from easybuild.tools.systemtools import get_avail_core_count + + +class EB_CP2K(EasyBlock): + """ + Support for building CP2K + - prepare module include files if required + - generate custom config file in 'arch' directory + - build CP2K + - run regression test if desired + - install by copying binary executables + """ + + def __init__(self, *args, **kwargs): + super(EB_CP2K, self).__init__(*args, **kwargs) + + self.typearch = None + + # this should be set to False for old versions of GCC (e.g. v4.1) + self.compilerISO_C_BINDING = True + + # compiler options that need to be set in Makefile + self.debug = '' + self.fpic = '' + + # used for both libsmm and libxsmm + self.libsmm = '' + self.modincpath = '' + self.openmp = '' + + self.make_instructions = '' + + @staticmethod + def extra_options(): + extra_vars = { + 'extracflags': ['', "Extra CFLAGS to be added", CUSTOM], + 'extradflags': ['', "Extra DFLAGS to be added", CUSTOM], + 'gpuver': [None, "Value for GPUVER configuration setting, specifies type of GPU to build for", CUSTOM], + 'ignore_regtest_fails': [False, ("Ignore failures in regression test " + "(should be used with care)"), CUSTOM], + 'library': [False, "Also build CP2K as a library", CUSTOM], + 'maxtasks': [4, ("Maximum number of CP2K instances run at " + "the same time during testing"), CUSTOM], + 'modinc': [[], ("List of modinc's to use (*.f90], or 'True' to use " + "all found at given prefix"), CUSTOM], + 'modincprefix': ['', "Intel MKL prefix for modinc include dir", CUSTOM], + 'omp_num_threads': [None, "Value to set $OMP_NUM_THREADS to during testing", CUSTOM], + 'plumed': [None, "Enable PLUMED support", CUSTOM], + 'runtest': [True, "Build and run CP2K tests", CUSTOM], + 'type': ['popt', "Type of build ('popt' or 'psmp')", CUSTOM], + 'typeopt': [True, "Enable optimization", CUSTOM], + } + return EasyBlock.extra_options(extra_vars) + + def _generate_makefile(self, options): + """Generate Makefile based on options dictionary and optional make instructions""" + + text = "# Makefile generated by CP2K easyblock in EasyBuild\n" + for key, value in sorted(options.items()): + text += "%s = %s\n" % (key, value) + return text + self.make_instructions + + def configure_step(self): + """Configure build + - build Libint wrapper + - generate Makefile + """ + + known_types = ['popt', 'psmp'] + if self.cfg['type'] not in known_types: + raise EasyBuildError("Unknown build type specified: '%s', known types are %s", + self.cfg['type'], known_types) + + # correct start dir, if needed + # recent CP2K versions have a 'cp2k' dir in the unpacked 'cp2k' dir + cp2k_path = os.path.join(self.cfg['start_dir'], 'cp2k') + if os.path.exists(cp2k_path): + self.cfg['start_dir'] = cp2k_path + self.log.info("Corrected start_dir to %s" % self.cfg['start_dir']) + + # set compilers options according to toolchain config + # full debug: -g -traceback -check all -fp-stack-check + # -g links to mpi debug libs + if self.toolchain.options['debug']: + self.debug = '-g' + self.log.info("Debug build") + if self.toolchain.options['pic']: + self.fpic = "-fPIC" + self.log.info("Using fPIC") + + # report on extra flags being used + if self.cfg['extracflags']: + self.log.info("Using extra CFLAGS: %s" % self.cfg['extracflags']) + if self.cfg['extradflags']: + self.log.info("Using extra DFLAGS: %s" % self.cfg['extradflags']) + + # lib(x)smm support + libsmm = get_software_root('libsmm') + libxsmm = get_software_root('libxsmm') + if libxsmm: + self.cfg.update('extradflags', '-D__LIBXSMM') + self.libsmm = '-lxsmm -lxsmmf' + self.log.debug('Using libxsmm %s' % libxsmm) + elif libsmm: + libsmms = glob.glob(os.path.join(libsmm, 'lib', 'libsmm_*nn.a')) + dfs = [os.path.basename(os.path.splitext(x)[0]).replace('lib', '-D__HAS_') for x in libsmms] + moredflags = ' ' + ' '.join(dfs) + self.cfg.update('extradflags', moredflags) + self.libsmm = ' '.join(libsmms) + self.log.debug('Using libsmm %s (extradflags %s)' % (self.libsmm, moredflags)) + + # obtain list of modinc's to use + if self.cfg["modinc"]: + self.modincpath = self.prepmodinc() + + # set typearch + self.typearch = "Linux-x86-64-%s" % self.toolchain.name + + # extra make instructions + self.make_instructions = '' # "graphcon.o: graphcon.F\n\t$(FC) -c $(FCFLAGS2) $<\n" + + # compiler toolchain specific configuration + comp_fam = self.toolchain.comp_family() + if comp_fam == toolchain.INTELCOMP: + options = self.configure_intel_based() + elif comp_fam == toolchain.GCC: + options = self.configure_GCC_based() + else: + raise EasyBuildError("Don't know how to tweak configuration for compiler family %s" % comp_fam) + + # BLAS/LAPACK/FFTW + if get_software_root('imkl'): + options = self.configure_MKL(options) + else: + # BLAS + if get_software_root('ACML'): + options = self.configure_ACML(options) + else: + options = self.configure_BLAS_lib(options) + + # FFTW (no MKL involved) + if 'fftw3' in os.getenv('LIBFFT', ''): + options = self.configure_FFTW3(options) + + # LAPACK + if os.getenv('LIBLAPACK_MT', None) is not None: + options = self.configure_LAPACK(options) + + if os.getenv('LIBSCALAPACK', None) is not None: + options = self.configure_ScaLAPACK(options) + + # PLUMED + plumed = get_software_root('PLUMED') + if self.cfg['plumed'] and not plumed: + raise EasyBuildError("The PLUMED module needs to be loaded to build CP2K with PLUMED support") + + # enable PLUMED support if PLUMED is listed as a dependency + # and PLUMED support is either explicitly enabled (plumed = True) or unspecified ('plumed' not defined) + if plumed and (self.cfg['plumed'] or self.cfg['plumed'] is None): + options['LIBS'] += ' -lplumed' + options['DFLAGS'] += ' -D__PLUMED2' + + # ELPA + elpa = get_software_root('ELPA') + if elpa: + options['LIBS'] += ' -lelpa' + elpa_inc_dir = os.path.join(elpa, 'include', 'elpa-%s' % get_software_version('ELPA'), 'modules') + options['FCFLAGSOPT'] += ' -I%s ' % elpa_inc_dir + if LooseVersion(self.version) >= LooseVersion('6.1'): + elpa_ver = ''.join(get_software_version('ELPA').split('.')[:2]) + options['DFLAGS'] += ' -D__ELPA=%s' % elpa_ver + elpa_inc_dir = os.path.join(elpa, 'include', 'elpa-%s' % get_software_version('ELPA'), 'elpa') + options['FCFLAGSOPT'] += ' -I%s ' % elpa_inc_dir + else: + options['DFLAGS'] += ' -D__ELPA3' + + # CUDA support + # see https://github.com/cp2k/cp2k/blob/master/INSTALL.md#2j-cuda-optional-improved-performance-on-gpu-systems + cuda = get_software_root('CUDA') + if cuda: + # determine CUDA compute capability to use based on --cuda-compute-capabilities in EasyBuild configuration, + # or cuda_compute_capabilities easyconfig parameter (fallback); + # must be a single value to build CP2K with CUDA support! + cuda_cc = build_option('cuda_compute_capabilities') or self.cfg.get('cuda_compute_capabilities') + if len(cuda_cc) == 1: + cuda_cc = cuda_cc[0] + elif cuda_cc: + error_msg = "Exactly one CUDA compute capability must be specified, found %d: %s" + raise EasyBuildError(error_msg, len(cuda_cc), ', '.join(cuda_cc)) + else: + error_msg = "Exactly one CUDA compute capability must be specified via " + error_msg += "--cuda-compute-capabilities or the cuda_compute_capabilities easyconfig parameter." + raise EasyBuildError(error_msg) + + # GPUVER must be set, required by the DBCSR component, + # see exts/dbcsr/Makefile and the parameters_*.json in src/acc/libsmm_acc/libcusmm/; + # determine string value to use based on select CUDA compute capability, unless specified explicitly via + # custom 'gpuver' easyconfig parameter + gpuver = self.cfg['gpuver'] + + if gpuver is None: + cuda_cc_lv = LooseVersion(cuda_cc) + known_gpuver = [ + ('7.0', 'V100'), + ('6.0', 'P100'), + ('3.7', 'K80'), + ('3.5', 'K40'), + ] + for min_cuda_cc, val in known_gpuver: + if cuda_cc_lv >= LooseVersion(min_cuda_cc): + gpuver = val + break + + if gpuver is None: + raise EasyBuildError("Failed to determine value for required GPUVER setting!") + else: + options['GPUVER'] = gpuver + + options['DFLAGS'] += ' -D__ACC -D__DBCSR_ACC -D__PW_CUDA -D__GRID_CUDA' + options['LIBS'] += ' -lcudart -lnvrtc -lcuda -lcublas -lcufft -lrt' + options['NVCC'] = 'nvcc' + options['NVFLAGS'] = ' '.join([ + options['DFLAGS'], + '-O3', + '--std=c++11', + '-arch sm_%s' % cuda_cc.replace('.', ''), + # control host compilers + options + "-ccbin='%s'" % os.getenv('CXX'), + "-Xcompiler='%s'" % os.getenv('CXXFLAGS'), + ]) + + # avoid group nesting + options['LIBS'] = options['LIBS'].replace('-Wl,--start-group', '').replace('-Wl,--end-group', '') + + options['LIBS'] = "-Wl,--start-group %s -Wl,--end-group" % options['LIBS'] + + # specify correct location for 'data' directory in final installation + options['DATA_DIR'] = os.path.join(self.installdir, 'data') + + # create arch file using options set + archfile = os.path.join(self.cfg['start_dir'], 'arch', '%s.%s' % (self.typearch, self.cfg['type'])) + txt = self._generate_makefile(options) + write_file(archfile, txt) + self.log.info("Content of makefile (%s):\n%s" % (archfile, txt)) + + def prepmodinc(self): + """Prepare list of module files""" + + self.log.debug("Preparing module files") + + imkl = get_software_root('imkl') + + if imkl: + + # prepare modinc target path + modincpath = os.path.join(os.path.dirname(os.path.normpath(self.cfg['start_dir'])), 'modinc') + self.log.debug("Preparing module files in %s" % modincpath) + + mkdir(modincpath, parents=True) + + # get list of modinc source files + modincdir = os.path.join(imkl, self.cfg["modincprefix"], 'include') + + if isinstance(self.cfg["modinc"], list): + modfiles = [os.path.join(modincdir, x) for x in self.cfg["modinc"]] + + elif isinstance(self.cfg["modinc"], bool) and self.cfg["modinc"]: + modfiles = glob.glob(os.path.join(modincdir, '*.f90')) + + else: + raise EasyBuildError("prepmodinc: Please specify either a boolean value or a list of files in modinc " + "(found: %s).", self.cfg["modinc"]) + + f77 = os.getenv('F77') + if not f77: + raise EasyBuildError("F77 environment variable not set, can't continue.") + + # create modinc files + for f in modfiles: + if f77.endswith('ifort'): + cmd = "%s -module %s -c %s" % (f77, modincpath, f) + elif f77 in ['gfortran', 'mpif77']: + cmd = "%s -J%s -c %s" % (f77, modincpath, f) + else: + raise EasyBuildError("prepmodinc: Unknown value specified for F77 (%s)", f77) + + run_cmd(cmd, log_all=True, simple=True) + + return modincpath + else: + raise EasyBuildError("Don't know how to prepare modinc, imkl not found") + + def configure_common(self): + """Common configuration for all toolchains""" + + # openmp introduces 2 major differences + # -automatic is default: -noautomatic -auto-scalar + # some mem-bandwidth optimisation + if self.cfg['type'] == 'psmp': + self.openmp = self.toolchain.get_flag('openmp') + + # determine which opt flags to use + if self.cfg['typeopt']: + optflags = 'OPT' + regflags = 'OPT2' + else: + optflags = 'NOOPT' + regflags = 'NOOPT' + + # make sure a MPI-2 able MPI lib is used + mpi2 = False + if hasattr(self.toolchain, 'MPI_FAMILY') and self.toolchain.MPI_FAMILY is not None: + known_mpi2_fams = [toolchain.MPICH, toolchain.MPICH2, toolchain.MVAPICH2, toolchain.OPENMPI, + toolchain.INTELMPI] + mpi_fam = self.toolchain.mpi_family() + if mpi_fam in known_mpi2_fams: + mpi2 = True + self.log.debug("Determined MPI2 compatibility based on MPI toolchain component: %s" % mpi_fam) + else: + self.log.debug("Cannot determine MPI2 compatibility based on MPI toolchain component: %s" % mpi_fam) + else: + # can't use toolchain.mpi_family, because of system toolchain + mpi2libs = ['impi', 'MVAPICH2', 'OpenMPI', 'MPICH2', 'MPICH'] + for mpi2lib in mpi2libs: + if get_software_root(mpi2lib): + mpi2 = True + self.log.debug("Determined MPI2 compatibility based on loaded MPI module: %s") + else: + self.log.debug("MPI-2 supporting MPI library %s not loaded.") + + if not mpi2: + raise EasyBuildError("CP2K needs MPI-2, no known MPI-2 supporting library loaded?") + + cppflags = os.getenv('CPPFLAGS') + ldflags = os.getenv('LDFLAGS') + cflags = os.getenv('CFLAGS') + fflags = os.getenv('FFLAGS') + fflags_lowopt = re.sub('-O[0-9]', '-O1', fflags) + options = { + 'CC': os.getenv('MPICC'), + 'CPP': '', + 'FC': '%s %s' % (os.getenv('MPIF90'), self.openmp), + 'LD': '%s %s' % (os.getenv('MPIF90'), self.openmp), + 'AR': 'ar -r', + 'CPPFLAGS': '', + + 'FPIC': self.fpic, + 'DEBUG': self.debug, + + 'FCFLAGS': '$(FCFLAGS%s)' % optflags, + 'FCFLAGS2': '$(FCFLAGS%s)' % regflags, + + 'CFLAGS': ' %s %s %s $(FPIC) $(DEBUG) %s ' % (cflags, cppflags, ldflags, self.cfg['extracflags']), + 'DFLAGS': ' -D__parallel -D__BLACS -D__SCALAPACK -D__FFTSG %s' % self.cfg['extradflags'], + + 'LIBS': os.getenv('LIBS', ''), + + 'FCFLAGSNOOPT': '$(DFLAGS) $(CFLAGS) -O0 $(FREE) $(FPIC) $(DEBUG)', + 'FCFLAGSOPT': '%s $(FREE) $(SAFE) $(FPIC) $(DEBUG)' % fflags, + 'FCFLAGSOPT2': '%s $(FREE) $(SAFE) $(FPIC) $(DEBUG)' % fflags_lowopt, + } + + libint = get_software_root('LibInt') + if libint: + options['DFLAGS'] += ' -D__LIBINT' + + libintcompiler = "%s %s" % (os.getenv('CC'), os.getenv('CFLAGS')) + + # Build libint-wrapper, if required + libint_wrapper = '' + + # required for old versions of GCC + if not self.compilerISO_C_BINDING: + options['DFLAGS'] += ' -D__HAS_NO_ISO_C_BINDING' + + # determine path for libint_tools dir + libinttools_paths = ['libint_tools', 'tools/hfx_tools/libint_tools'] + libinttools_path = None + for path in libinttools_paths: + path = os.path.join(self.cfg['start_dir'], path) + if os.path.isdir(path): + libinttools_path = path + change_dir(libinttools_path) + if not libinttools_path: + raise EasyBuildError("No libinttools dir found") + + # build libint wrapper + cmd = "%s -c libint_cpp_wrapper.cpp -I%s/include" % (libintcompiler, libint) + if not run_cmd(cmd, log_all=True, simple=True): + raise EasyBuildError("Building the libint wrapper failed") + libint_wrapper = '%s/libint_cpp_wrapper.o' % libinttools_path + + # determine Libint libraries based on major version number + libint_maj_ver = get_software_version('Libint').split('.')[0] + if libint_maj_ver == '1': + libint_libs = "$(LIBINTLIB)/libderiv.a $(LIBINTLIB)/libint.a $(LIBINTLIB)/libr12.a" + elif libint_maj_ver == '2': + libint_libs = "$(LIBINTLIB)/libint2.a" + else: + raise EasyBuildError("Don't know how to handle libint version %s", libint_maj_ver) + self.log.info("Using Libint version %s" % (libint_maj_ver)) + + options['LIBINTLIB'] = '%s/lib' % libint + options['LIBS'] += ' %s -lstdc++ %s' % (libint_libs, libint_wrapper) + + # add Libint include dir to $FCFLAGS + options['FCFLAGS'] += ' -I' + os.path.join(libint, 'include') + + else: + # throw a warning, since CP2K without Libint doesn't make much sense + self.log.warning("Libint module not loaded, so building without Libint support") + + libxc = get_software_root('libxc') + if libxc: + cur_libxc_version = get_software_version('libxc') + if LooseVersion(self.version) >= LooseVersion('6.1'): + libxc_min_version = '4.0.3' + options['DFLAGS'] += ' -D__LIBXC' + else: + libxc_min_version = '2.0.1' + options['DFLAGS'] += ' -D__LIBXC2' + + if LooseVersion(cur_libxc_version) < LooseVersion(libxc_min_version): + raise EasyBuildError("This version of CP2K is not compatible with libxc < %s" % libxc_min_version) + + if LooseVersion(cur_libxc_version) >= LooseVersion('4.0.3'): + # cfr. https://www.cp2k.org/howto:compile#k_libxc_optional_wider_choice_of_xc_functionals + options['LIBS'] += ' -L%s/lib -lxcf03 -lxc' % libxc + elif LooseVersion(cur_libxc_version) >= LooseVersion('2.2'): + options['LIBS'] += ' -L%s/lib -lxcf90 -lxc' % libxc + else: + options['LIBS'] += ' -L%s/lib -lxc' % libxc + self.log.info("Using Libxc-%s" % cur_libxc_version) + else: + self.log.info("libxc module not loaded, so building without libxc support") + + return options + + def configure_intel_based(self): + """Configure for Intel based toolchains""" + + # based on guidelines available at + # http://software.intel.com/en-us/articles/build-cp2k-using-intel-fortran-compiler-professional-edition/ + intelurl = ''.join(["http://software.intel.com/en-us/articles/", + "build-cp2k-using-intel-fortran-compiler-professional-edition/"]) + + options = self.configure_common() + + extrainc = '' + if self.modincpath: + extrainc = '-I%s' % self.modincpath + + options.update({ + # -Vaxlib : older options + 'FREE': '-fpp -free', + + # SAFE = -assume protect_parens -fp-model precise -ftz # causes problems, so don't use this + 'SAFE': '-assume protect_parens -no-unroll-aggressive', + + 'INCFLAGS': '$(DFLAGS) -I$(INTEL_INC) -I$(INTEL_INCF) %s' % extrainc, + + 'LDFLAGS': '$(INCFLAGS) ', + 'OBJECTS_ARCHITECTURE': 'machine_intel.o', + }) + + options['DFLAGS'] += ' -D__INTEL' + + options['FCFLAGSOPT'] += ' $(INCFLAGS) -heap-arrays 64' + options['FCFLAGSOPT2'] += ' $(INCFLAGS) -heap-arrays 64' + + ifortver = LooseVersion(get_software_version('ifort')) + + # Required due to memory leak that occurs if high optimizations are used (from CP2K 7.1 intel-popt-makefile) + if ifortver >= LooseVersion("2018.5"): + self.make_instructions += "mp2_optimize_ri_basis.o: mp2_optimize_ri_basis.F\n" \ + "\t$(FC) -c $(subst O2,O0,$(FCFLAGSOPT)) $<\n" + self.log.info("Optimization level of mp2_optimize_ri_basis.F was decreased to '-O0'") + + # RHEL8 intel/2020a lots of CPASSERT failed (due to high optimization in cholesky decomposition) + if ifortver >= LooseVersion("2019"): + self.make_instructions += "cp_fm_cholesky.o: cp_fm_cholesky.F\n\t$(FC) -c $(FCFLAGS2) $<\n" + self.log.info("Optimization flags for cp_fm_cholesky.F is set to '%s'", options['FCFLAGSOPT2']) + + # -i-static has been deprecated prior to 2013, but was still usable. From 2015 it is not. + if ifortver < LooseVersion("2013"): + options['LDFLAGS'] += ' -i-static ' + else: + options['LDFLAGS'] += ' -static-intel ' + + # Otherwise it fails on linking, since there are 2 definitions of main + if LooseVersion(self.version) >= LooseVersion('4.1'): + options['LDFLAGS'] += ' -nofor-main ' + + failmsg = "CP2K won't build correctly with the Intel %%s compilers prior to %%s, see %s" % intelurl + + if ifortver >= LooseVersion("2011") and ifortver < LooseVersion("2012"): + + # don't allow using Intel compiler 2011 prior to release 8, because of known issue (see Intel URL) + if ifortver >= LooseVersion("2011.8"): + # add additional make instructions to Makefile + self.make_instructions += "et_coupling.o: et_coupling.F\n\t$(FC) -c $(FCFLAGS2) $<\n" + self.make_instructions += "qs_vxc_atom.o: qs_vxc_atom.F\n\t$(FC) -c $(FCFLAGS2) $<\n" + + else: + raise EasyBuildError(failmsg, "v12", "v2011.8") + + elif ifortver >= LooseVersion("11"): + if LooseVersion(get_software_version('ifort')) >= LooseVersion("11.1.072"): + self.make_instructions += "qs_vxc_atom.o: qs_vxc_atom.F\n\t$(FC) -c $(FCFLAGS2) $<\n" + + else: + raise EasyBuildError(failmsg, "v11", "v11.1.072") + + else: + raise EasyBuildError("Intel compilers version %s not supported yet.", ifortver) + + return options + + def configure_GCC_based(self): + """Configure for GCC based toolchains""" + options = self.configure_common() + + options.update({ + # need this to prevent "Unterminated character constant beginning" errors + 'FREE': '-ffree-form -ffree-line-length-none', + + 'LDFLAGS': '$(FCFLAGS)', + 'OBJECTS_ARCHITECTURE': 'machine_gfortran.o', + }) + + options['DFLAGS'] += ' -D__GFORTRAN' + + options['FCFLAGSOPT'] += ' $(DFLAGS) $(CFLAGS) -fmax-stack-var-size=32768' + options['FCFLAGSOPT2'] += ' $(DFLAGS) $(CFLAGS)' + + gcc_version = get_software_version('GCCcore') or get_software_version('GCC') + if LooseVersion(gcc_version) >= LooseVersion('10.0') and LooseVersion(self.version) <= LooseVersion('7.1'): + # -fallow-argument-mismatch is required for CP2K 7.1 (and older) when compiling with GCC 10.x & more recent, + # see https://github.com/cp2k/cp2k/issues/1157, https://github.com/cp2k/dbcsr/issues/351, + # https://github.com/cp2k/dbcsr/commit/58ee9709545deda8524cab804bf1f88a61a864ac and + # https://gcc.gnu.org/legacy-ml/gcc-patches/2019-10/msg01861.html + options['FCFLAGSOPT'] += ' -fallow-argument-mismatch' + options['FCFLAGSOPT2'] += ' -fallow-argument-mismatch' + + return options + + def configure_ACML(self, options): + """Configure for AMD Math Core Library (ACML)""" + + openmp_suffix = '' + if self.openmp: + openmp_suffix = '_mp' + + options['ACML_INC'] = '%s/gfortran64%s/include' % (get_software_root('ACML'), openmp_suffix) + options['CFLAGS'] += ' -I$(ACML_INC) -I$(FFTW_INC)' + options['DFLAGS'] += ' -D__FFTACML' + + blas = os.getenv('LIBBLAS', '') + blas = blas.replace('gfortran64', 'gfortran64%s' % openmp_suffix) + options['LIBS'] += ' %s %s %s' % (self.libsmm, os.getenv('LIBSCALAPACK', ''), blas) + + return options + + def configure_BLAS_lib(self, options): + """Configure for BLAS library.""" + options['LIBS'] += ' %s %s' % (self.libsmm, os.getenv('LIBBLAS', '')) + return options + + def configure_MKL(self, options): + """Configure for Intel Math Kernel Library (MKL)""" + + options['INTEL_INC'] = '$(MKLROOT)/include' + options['DFLAGS'] += ' -D__FFTW3' + + extra = '' + if self.modincpath: + extra = '-I%s' % self.modincpath + options['CFLAGS'] += ' -I$(INTEL_INC) %s $(FPIC) $(DEBUG)' % extra + + options['LIBS'] += ' %s %s' % (self.libsmm, os.getenv('LIBSCALAPACK', '')) + + fftw_root = get_software_root('FFTW') + if fftw_root: + libfft = '-lfftw3' + if self.cfg['type'] == 'psmp': + libfft += ' -lfftw3_omp' + + options['CFLAGS'] += ' -I$(INTEL_INCF)' + options['INTEL_INCF'] = os.path.join(fftw_root, 'include') + options['LIBS'] += ' -L%s %s' % (os.path.join(fftw_root, 'lib'), libfft) + + else: + # only use Intel FFTW wrappers if FFTW is not loaded + options['CFLAGS'] += ' -I$(INTEL_INCF)' + options['DFLAGS'] += ' -D__FFTMKL' + options['INTEL_INCF'] = '$(INTEL_INC)/fftw' + options['LIBS'] = '%s %s' % (os.getenv('LIBFFT', ''), options['LIBS']) + + return options + + def configure_FFTW3(self, options): + """Configure for FFTW3""" + + options.update({ + 'FFTW_INC': os.getenv('FFT_INC_DIR', ''), # GCC + 'FFTW3INC': os.getenv('FFT_INC_DIR', ''), # Intel + 'FFTW3LIB': os.getenv('FFT_LIB_DIR', ''), # Intel + }) + + options['DFLAGS'] += ' -D__FFTW3' + if self.cfg['type'] == 'psmp': + libfft = os.getenv('LIBFFT_MT', '') + else: + libfft = os.getenv('LIBFFT', '') + options['LIBS'] += ' -L%s %s' % (os.getenv('FFT_LIB_DIR', '.'), libfft) + + return options + + def configure_LAPACK(self, options): + """Configure for LAPACK library""" + options['LIBS'] += ' %s' % os.getenv('LIBLAPACK_MT', '') + return options + + def configure_ScaLAPACK(self, options): + """Configure for ScaLAPACK library""" + + options['LIBS'] += ' %s' % os.getenv('LIBSCALAPACK', '') + + return options + + def build_step(self): + """Start the actual build + - go into makefiles dir + - patch Makefile + -build_and_install + """ + + if LooseVersion(self.version) < LooseVersion('7.0'): + makefiles = os.path.join(self.cfg['start_dir'], 'makefiles') + change_dir(makefiles) + + # modify makefile for parallel build + parallel = self.cfg['parallel'] + if parallel: + + try: + for line in fileinput.input('Makefile', inplace=1, backup='.orig.patchictce'): + line = re.sub(r"^PMAKE\s*=.*$", "PMAKE\t= $(SMAKE) -j %s" % parallel, line) + sys.stdout.write(line) + except IOError as err: + raise EasyBuildError("Can't modify/write Makefile in %s: %s", makefiles, err) + + # update make options with MAKE + self.cfg.update('buildopts', 'MAKE="make -j %s"' % self.cfg['parallel']) + + # update make options with ARCH and VERSION + self.cfg.update('buildopts', 'ARCH=%s VERSION=%s' % (self.typearch, self.cfg['type'])) + + cmd = "make %s" % self.cfg['buildopts'] + + # clean first + run_cmd(cmd + " clean", log_all=True, simple=True, log_output=True) + + # build and install + if self.cfg['library']: + cmd += ' libcp2k' + run_cmd(cmd + " all", log_all=True, simple=True, log_output=True) + + def test_step(self): + """Run regression test.""" + + if self.cfg['runtest']: + + # we need to specify location of 'data' directory in *build* dir, + # since we've configured CP2K to look into the installation directory + # (where 'data' will be copied to in install step) + setvar('CP2K_DATA_DIR', os.path.join(self.cfg['start_dir'], 'data')) + + if not build_option('mpi_tests'): + self.log.info("Skipping testing of CP2K since MPI testing is disabled") + return + + if self.cfg['omp_num_threads']: + setvar('OMP_NUM_THREADS', self.cfg['omp_num_threads']) + + # change to root of build dir + change_dir(self.builddir) + + # use regression test reference output if available + # try and find an unpacked directory that starts with 'LAST-' + regtest_refdir = None + for d in os.listdir(self.builddir): + if d.startswith("LAST-"): + regtest_refdir = d + break + + # location of do_regtest script + cfg_fn = 'cp2k_regtest.cfg' + + regtest_script = os.path.join(self.cfg['start_dir'], 'tools', 'regtesting', 'do_regtest') + regtest_cmd = [regtest_script, '-nobuild', '-config', cfg_fn] + if LooseVersion(self.version) < LooseVersion('7.1'): + # -nosvn option was removed in CP2K 7.1 + regtest_cmd.insert(1, '-nosvn') + + # older version of CP2K + if not os.path.exists(regtest_script): + regtest_script = os.path.join(self.cfg['start_dir'], 'tools', 'do_regtest') + regtest_cmd = [regtest_script, '-nocvs', '-quick', '-nocompile', '-config', cfg_fn] + + regtest_cmd = ' '.join(regtest_cmd) + + # patch do_regtest so that reference output is used + if regtest_refdir: + self.log.info("Using reference output available in %s" % regtest_refdir) + try: + for line in fileinput.input(regtest_script, inplace=1, backup='.orig.refout'): + line = re.sub(r"^(dir_last\s*=\${dir_base})/.*$", r"\1/%s" % regtest_refdir, line) + sys.stdout.write(line) + except IOError as err: + raise EasyBuildError("Failed to modify '%s': %s", regtest_script, err) + + else: + self.log.info("No reference output found for regression test, just continuing without it...") + + # prefer using 4 cores, since some tests require/prefer square (n^2) numbers or powers of 2 (2^n) + test_core_cnt = min(self.cfg['parallel'], 4) + if get_avail_core_count() < test_core_cnt: + raise EasyBuildError("Cannot run MPI tests as not enough cores (< %s) are available", test_core_cnt) + else: + self.log.info("Using %s cores for the MPI tests" % test_core_cnt) + + # configure regression test + cfg_txt = '\n'.join([ + 'FORT_C_NAME="%(f90)s"', + 'dir_base=%(base)s', + 'cp2k_version=%(cp2k_version)s', + 'dir_triplet=%(triplet)s', + 'export ARCH=${dir_triplet}', + 'cp2k_dir=%(cp2k_dir)s', + 'leakcheck="YES"', + 'maxtasks=%(maxtasks)s', + 'cp2k_run_prefix="%(mpicmd_prefix)s"', + ]) % { + 'f90': os.getenv('F90'), + 'base': os.path.dirname(os.path.normpath(self.cfg['start_dir'])), + 'cp2k_version': self.cfg['type'], + 'triplet': self.typearch, + 'cp2k_dir': os.path.basename(os.path.normpath(self.cfg['start_dir'])), + 'maxtasks': self.cfg['maxtasks'], + 'mpicmd_prefix': self.toolchain.mpi_cmd_for('', test_core_cnt), + } + + write_file(cfg_fn, cfg_txt) + self.log.debug("Contents of %s: %s" % (cfg_fn, cfg_txt)) + + # run regression test + (regtest_output, ec) = run_cmd(regtest_cmd, log_all=True, simple=False, log_output=True) + + if ec == 0: + self.log.info("Regression test output:\n%s" % regtest_output) + else: + raise EasyBuildError("Regression test failed (non-zero exit code): %s", regtest_output) + + # pattern to search for regression test summary + re_pattern = r"number\s+of\s+%s\s+tests\s+(?P<cnt>[0-9]+)" + + # find total number of tests + regexp = re.compile(re_pattern % "", re.M | re.I) + res = regexp.search(regtest_output) + tot_cnt = None + if res: + tot_cnt = int(res.group('cnt')) + else: + raise EasyBuildError("Finding total number of tests in regression test summary failed") + + # function to report on regtest results + def test_report(test_result): + """Report on tests with given result.""" + + postmsg = '' + + test_result = test_result.upper() + regexp = re.compile(re_pattern % test_result, re.M | re.I) + + cnt = None + res = regexp.search(regtest_output) + if not res: + raise EasyBuildError("Finding number of %s tests in regression test summary failed", + test_result.lower()) + else: + cnt = int(res.group('cnt')) + + logmsg = "Regression test reported %s / %s %s tests" + logmsg_values = (cnt, tot_cnt, test_result.lower()) + + # failed tests indicate problem with installation + # wrong tests are only an issue when there are excessively many + if (test_result == "FAILED" and cnt > 0) or (test_result == "WRONG" and (cnt / tot_cnt) > 0.1): + if self.cfg['ignore_regtest_fails']: + self.log.warning(logmsg, *logmsg_values) + self.log.info("Ignoring failures in regression test, as requested.") + else: + raise EasyBuildError(logmsg, *logmsg_values) + elif test_result == "CORRECT" or cnt == 0: + self.log.info(logmsg, *logmsg_values) + else: + self.log.warning(logmsg, *logmsg_values) + + return postmsg + + # number of failed/wrong tests, will report error if count is positive + self.postmsg += test_report("FAILED") + self.postmsg += test_report("WRONG") + + # there are no more 'new' tests from CP2K 8.1 onwards + if LooseVersion(self.version) < LooseVersion('8.0'): + # number of new tests, will be high if a non-suitable regtest reference was used + # will report error if count is positive (is that what we want?) + self.postmsg += test_report("NEW") + + # number of correct tests: just report + test_report("CORRECT") + + def install_step(self): + """Install built CP2K + - copy from exe to bin + - copy data dir (if exists) + - copy tests + """ + + # copy executables + exedir = os.path.join(self.cfg['start_dir'], 'exe', self.typearch) + targetdir = os.path.join(self.installdir, 'bin') + copy_dir(exedir, targetdir) + + # copy libraries and include files, not sure what is strictly required so we take everything + if self.cfg['library']: + libdir = os.path.join(self.cfg['start_dir'], 'lib', self.typearch, self.cfg['type']) + targetdir = os.path.join(self.installdir, 'lib') + copy_dir(libdir, targetdir) + # Also need to populate the include directory + targetdir = os.path.join(self.installdir, 'include') + libcp2k_header = os.path.join(self.cfg['start_dir'], 'src', 'start', 'libcp2k.h') + target_header = os.path.join(targetdir, os.path.basename(libcp2k_header)) + copy_file(libcp2k_header, target_header) + # include all .mod files for fortran users (don't know the exact list so take everything) + mod_path = os.path.join(self.cfg['start_dir'], 'obj', self.typearch, self.cfg['type']) + for mod_file in glob.glob(os.path.join(mod_path, '*.mod')): + target_mod = os.path.join(targetdir, os.path.basename(mod_file)) + copy_file(mod_file, target_mod) + + # copy data dir + datadir = os.path.join(self.cfg['start_dir'], 'data') + targetdir = os.path.join(self.installdir, 'data') + if os.path.exists(targetdir): + self.log.info("Won't copy data dir. Destination directory %s already exists" % targetdir) + elif os.path.exists(datadir): + copy_dir(datadir, targetdir) + else: + self.log.info("Won't copy data dir. Source directory %s does not exist" % datadir) + + # copy tests + srctests = os.path.join(self.cfg['start_dir'], 'tests') + targetdir = os.path.join(self.installdir, 'tests') + if os.path.exists(targetdir): + self.log.info("Won't copy tests. Destination directory %s already exists" % targetdir) + else: + copy_dir(srctests, targetdir) + + # copy regression test results + if self.cfg['runtest']: + try: + testdir = os.path.dirname(os.path.normpath(self.cfg['start_dir'])) + for d in os.listdir(testdir): + if d.startswith('TEST-%s-%s' % (self.typearch, self.cfg['type'])): + path = os.path.join(testdir, d) + target = os.path.join(self.installdir, d) + copy_dir(path, target) + self.log.info("Regression test results dir %s copied to %s" % (d, self.installdir)) + break + except (OSError, IOError) as err: + raise EasyBuildError("Failed to copy regression test results dir: %s", err) + + def sanity_check_step(self): + """Custom sanity check for CP2K""" + + cp2k_type = self.cfg['type'] + custom_paths = { + 'files': ["bin/%s.%s" % (x, cp2k_type) for x in ["cp2k", "cp2k_shell"]], + 'dirs': ["tests"] + } + if self.cfg['library']: + custom_paths['files'].append(os.path.join('lib', 'libcp2k.a')) + custom_paths['files'].append(os.path.join('include', 'libcp2k.h')) + custom_paths['files'].append(os.path.join('include', 'libcp2k.mod')) + super(EB_CP2K, self).sanity_check_step(custom_paths=custom_paths) + + def make_module_extra(self): + """Set up a CP2K_DATA_DIR environment variable to find CP2K provided basis sets""" + + txt = super(EB_CP2K, self).make_module_extra() + + # also define $CP2K_DATA_DIR in module, + # even though CP2K was already configured to pick up 'data' from install dir + # this could be useful for users to access the 'data' dir in a documented way (and it doesn't hurt) + datadir = os.path.join(self.installdir, 'data') + if os.path.exists(datadir): + txt += self.module_generator.set_environment('CP2K_DATA_DIR', datadir) + + return txt + diff --git a/easyblocks/i/imod.py b/easyblocks/i/imod.py new file mode 100644 index 0000000..93b019d --- /dev/null +++ b/easyblocks/i/imod.py @@ -0,0 +1,95 @@ +## +# Copyright 2013-2022 Ghent University +# +# This file is part of EasyBuild, +# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), +# with support of Ghent University (http://ugent.be/hpc), +# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en), +# Flemish Research Foundation (FWO) (http://www.fwo.be/en) +# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). +# +# https://github.com/easybuilders/easybuild +# +# EasyBuild is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation v2. +# +# EasyBuild is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. +## +""" +EasyBuild support for building and installing IMOD, implemented as an easyblock + +@author: Benjamin Roberts (Landcare Research NZ Ltd) +""" +import os +import shutil + +from easybuild.easyblocks.generic.binary import Binary +from easybuild.tools.build_log import EasyBuildError +from easybuild.tools.filetools import remove_dir +from easybuild.tools.run import run_cmd + + +class EB_IMOD(Binary): + """Support for building/installing IMOD.""" + + def install_step(self): + """Install IMOD using install script.""" + + # -dir: Choose location of installation directory + # -skip: do not attempt to deploy resource files in /etc + # -yes: do not prompt for confirmation + script = '{0}_{1}{2}.sh'.format(self.name.lower(), self.version, self.cfg['versionsuffix']) + cmd = "bash {0} -dir {1} -script {1} -skip -yes".format(script, self.installdir) + run_cmd(cmd, log_all=True, simple=True) + + # The assumption by the install script is that installdir will be something + # like /usr/local. So it creates, within the specified install location, a + # number of additional directories within which to install IMOD. We will, + # therefore, move the contents of these directories up and throw away the + # directories themselves. Doing so apparently is not a problem so long as + # IMOD_DIR is correctly set in the module. + link_to_remove = os.path.join(self.installdir, self.name) + dir_to_remove = os.path.join(self.installdir, "{0}_{1}".format(self.name.lower(), self.version)) + try: + for entry in os.listdir(dir_to_remove): + shutil.move(os.path.join(dir_to_remove, entry), self.installdir) + if os.path.realpath(link_to_remove) != os.path.realpath(dir_to_remove): + raise EasyBuildError("Something went wrong: %s doesn't point to %s", link_to_remove, dir_to_remove) + remove_dir(dir_to_remove) + os.remove(link_to_remove) + except OSError as err: + raise EasyBuildError("Failed to clean up install dir: %s", err) + + def sanity_check_step(self): + """Custom sanity check for IMOD.""" + custom_paths = { + 'files': ['bin/imod', 'IMOD-linux.sh', 'IMOD-linux.sh', 'installIMOD'], + 'dirs': ['lib'], + } + super(EB_IMOD, self).sanity_check_step(custom_paths=custom_paths) + + def make_module_extra(self): + """Define IMOD specific variables in generated module file.""" + txt = super(EB_IMOD, self).make_module_extra() + txt += self.module_generator.set_environment('IMOD_DIR', self.installdir) + txt += self.module_generator.set_environment('IMOD_PLUGIN_DIR', + os.path.join(self.installdir, 'lib', 'imodplug')) + txt += self.module_generator.set_environment('IMOD_QTLIBDIR', os.path.join(self.installdir, 'qtlib')) + if os.getenv('JAVA_HOME') is None: + raise EasyBuildError("$JAVA_HOME is not defined for some reason -- check environment") + else: + txt += self.module_generator.set_environment('IMOD_JAVADIR', os.getenv('JAVA_HOME')) + txt += self.module_generator.set_environment('FOR_DISABLE_STACK_TRACE', '1') + txt += self.module_generator.set_alias('subm', "submfg $* &") + txt += self.module_generator.msg_on_load("Please set the environment variable $IMOD_CALIB_DIR if appropriate.\n") + + txt += self.module_generator.msg_on_load("bash users run: 'source $EBROOTIMOD/IMOD-linux.sh\n") + txt += self.module_generator.msg_on_load("csh users run: 'source $EBROOTIMOD/IMOD-linux.csh'\n") + return txt diff --git a/easyblocks/n/nvhpc.py b/easyblocks/n/nvhpc.py new file mode 100644 index 0000000..06a6f9e --- /dev/null +++ b/easyblocks/n/nvhpc.py @@ -0,0 +1,312 @@ +## +# Copyright 2015-2023 Bart Oldeman +# Copyright 2016-2023 Forschungszentrum Juelich +# +# This file is triple-licensed under GPLv2 (see below), MIT, and +# BSD three-clause licenses. +# +# This file is part of EasyBuild, +# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), +# with support of Ghent University (http://ugent.be/hpc), +# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), +# Flemish Research Foundation (FWO) (http://www.fwo.be/en) +# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). +# +# https://github.com/easybuilders/easybuild +# +# EasyBuild is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation v2. +# +# EasyBuild is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. +## +""" +EasyBuild support for installing NVIDIA HPC SDK compilers, based on the easyblock for PGI compilers + +@author: Bart Oldeman (McGill University, Calcul Quebec, Compute Canada) +@author: Damian Alvarez (Forschungszentrum Juelich) +@author: Andreas Herten (Forschungszentrum Juelich) +""" +import os +import fileinput +import re +import stat +import sys +import platform + +from distutils.version import LooseVersion +from easybuild.easyblocks.generic.packedbinary import PackedBinary +from easybuild.framework.easyconfig import CUSTOM +from easybuild.tools.filetools import adjust_permissions, write_file +from easybuild.tools.run import run_cmd +from easybuild.tools.modules import get_software_root, get_software_version +from easybuild.tools.config import build_option +from easybuild.tools.build_log import EasyBuildError, print_warning + + +# contents for siterc file to make PGI/NVHPC pick up $LIBRARY_PATH +# cfr. https://www.pgroup.com/support/link.htm#lib_path_ldflags +SITERC_LIBRARY_PATH = """ +# get the value of the environment variable LIBRARY_PATH +variable LIBRARY_PATH is environment(LIBRARY_PATH); + +# split this value at colons, separate by -L, prepend 1st one by -L +variable library_path is +default($if($LIBRARY_PATH,-L$replace($LIBRARY_PATH,":", -L))); + +# add the -L arguments to the link line +append LDLIBARGS=$library_path; + +# also include the location where libm & co live on Debian-based systems +# cfr. https://github.com/easybuilders/easybuild-easyblocks/pull/919 +append LDLIBARGS=-L/usr/lib/x86_64-linux-gnu; +""" + + +class EB_NVHPC(PackedBinary): + """ + Support for installing the NVIDIA HPC SDK (NVHPC) compilers + """ + + @staticmethod + def extra_options(): + extra_vars = { + 'default_cuda_version': [None, "CUDA Version to be used as default (10.2 or 11.0 or ...)", CUSTOM], + 'module_add_cuda': [False, "Add NVHPC's CUDA to module", CUSTOM], + 'module_add_math_libs': [False, "Add NVHPC's math libraries to module", CUSTOM], + 'module_add_nccl': [False, "Add NVHPC's NCCL library to module", CUSTOM], + 'module_add_nvshmem': [False, "Add NVHPC's NVSHMEM library to module", CUSTOM], + 'module_add_profilers': [False, "Add NVHPC's NVIDIA Profilers to module", CUSTOM], + 'module_byo_compilers': [False, "BYO Compilers: Remove compilers from module", CUSTOM], + 'module_nvhpc_own_mpi': [False, "Add NVHPC's packaged OpenMPI to module", CUSTOM] + } + return PackedBinary.extra_options(extra_vars) + + def __init__(self, *args, **kwargs): + """Easyblock constructor, define custom class variables specific to NVHPC.""" + super(EB_NVHPC, self).__init__(*args, **kwargs) + + # Ideally we should be using something like `easybuild.tools.systemtools.get_cpu_architecture` here, however, + # on `ppc64le` systems this function returns `POWER` instead of `ppc64le`. Since this path needs to reflect + # `arch` (https://easybuild.readthedocs.io/en/latest/version-specific/easyconfig_templates.html) the same + # procedure from `templates.py` was reused here: + architecture = 'Linux_%s' % platform.uname()[4] + self.nvhpc_install_subdir = os.path.join(architecture, self.version) + + def install_step(self): + """Install by running install command.""" + + # EULA for NVHPC must be accepted via --accept-eula-for EasyBuild configuration option, + # or via 'accept_eula = True' in easyconfig file + self.check_accepted_eula(more_info='https://docs.nvidia.com/hpc-sdk/eula/index.html') + + default_cuda_version = self.cfg['default_cuda_version'] + if default_cuda_version is None: + module_cuda_version_full = get_software_version('CUDA') + if module_cuda_version_full is not None: + default_cuda_version = '.'.join(module_cuda_version_full.split('.')[:2]) + else: + error_msg = "A default CUDA version is needed for installation of NVHPC. " + error_msg += "It can not be determined automatically and needs to be added manually. " + error_msg += "You can edit the easyconfig file, " + error_msg += "or use 'eb --try-amend=default_cuda_version=<version>'." + raise EasyBuildError(error_msg) + + # Parse default_compute_capability from different sources (CLI has priority) + ec_default_compute_capability = self.cfg['cuda_compute_capabilities'] + cfg_default_compute_capability = build_option('cuda_compute_capabilities') + if cfg_default_compute_capability is not None: + default_compute_capability = cfg_default_compute_capability + elif ec_default_compute_capability and ec_default_compute_capability is not None: + default_compute_capability = ec_default_compute_capability + else: + error_msg = "A default Compute Capability is needed for installation of NVHPC." + error_msg += "Please provide it either in the easyconfig file like 'cuda_compute_capabilities=\"7.0\"'," + error_msg += "or use 'eb --cuda-compute-capabilities=7.0' from the command line." + raise EasyBuildError(error_msg) + + # Extract first element of default_compute_capability list, if it is a list + if isinstance(default_compute_capability, list): + _before_default_compute_capability = default_compute_capability + default_compute_capability = _before_default_compute_capability[0] + if len(_before_default_compute_capability) > 1: + warning_msg = "Replaced list of compute capabilities {} ".format(_before_default_compute_capability) + warning_msg += "with first element of list: {}".format(default_compute_capability) + print_warning(warning_msg) + + # Remove dot-divider for CC; error out if it is not a string + if isinstance(default_compute_capability, str): + default_compute_capability = default_compute_capability.replace('.', '') + else: + raise EasyBuildError("Unexpected non-string value encountered for compute capability: %s", + default_compute_capability) + + nvhpc_env_vars = { + 'NVHPC_INSTALL_DIR': self.installdir, + 'NVHPC_SILENT': 'true', + 'NVHPC_DEFAULT_CUDA': str(default_cuda_version), # 10.2, 11.0 + 'NVHPC_STDPAR_CUDACC': str(default_compute_capability), # 70, 80; single value, no list! + } + cmd = "%s ./install" % ' '.join(['%s=%s' % x for x in sorted(nvhpc_env_vars.items())]) + run_cmd(cmd, log_all=True, simple=True) + + # make sure localrc uses GCC in PATH, not always the system GCC, and does not use a system g77 but gfortran + install_abs_subdir = os.path.join(self.installdir, self.nvhpc_install_subdir) + compilers_subdir = os.path.join(install_abs_subdir, "compilers") + makelocalrc_folder = os.path.join(compilers_subdir, "bin") + makelocalrc_filename = os.path.join(compilers_subdir, "bin", "makelocalrc") + for line in fileinput.input(makelocalrc_filename, inplace='1', backup='.orig'): + line = re.sub(r"^PATH=/", r"#PATH=/", line) + sys.stdout.write(line) + + if LooseVersion(self.version) >= LooseVersion('22.9'): + cmd = "%s -x %s" % (makelocalrc_filename, makelocalrc_folder) + else: + cmd = "%s -x %s -g77 /" % (makelocalrc_filename, makelocalrc_folder) + run_cmd(cmd, log_all=True, simple=True) + + # If an OS libnuma is NOT found, makelocalrc creates symbolic links to libpgnuma.so + # If we use the EB libnuma, delete those symbolic links to ensure they are not used + if get_software_root("numactl"): + for filename in ["libnuma.so", "libnuma.so.1"]: + path = os.path.join(compilers_subdir, "lib", filename) + if os.path.islink(path): + os.remove(path) + + if LooseVersion(self.version) < LooseVersion('21.3'): + # install (or update) siterc file to make NVHPC consider $LIBRARY_PATH + siterc_path = os.path.join(compilers_subdir, 'bin', 'siterc') + write_file(siterc_path, SITERC_LIBRARY_PATH, append=True) + self.log.info("Appended instructions to pick up $LIBRARY_PATH to siterc file at %s: %s", + siterc_path, SITERC_LIBRARY_PATH) + + # The cuda nvvp tar file has broken permissions + adjust_permissions(self.installdir, stat.S_IWUSR, add=True, onlydirs=True) + + def sanity_check_step(self): + """Custom sanity check for NVHPC""" + prefix = self.nvhpc_install_subdir + compiler_names = ['nvc', 'nvc++', 'nvfortran'] + + files = [os.path.join(prefix, 'compilers', 'bin', x) for x in compiler_names] + if LooseVersion(self.version) < LooseVersion('21.3'): + files.append(os.path.join(prefix, 'compilers', 'bin', 'siterc')) + + custom_paths = { + 'files': files, + 'dirs': [os.path.join(prefix, 'compilers', 'bin'), os.path.join(prefix, 'compilers', 'lib'), + os.path.join(prefix, 'compilers', 'include'), os.path.join(prefix, 'compilers', 'man')] + } + custom_commands = ["%s -v" % compiler for compiler in compiler_names] + super(EB_NVHPC, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands) + + def _nvhpc_extended_components(self, dirs, basepath, env_vars_dirs): + """ + Extends `dirs` dict of key:environment_variables, value:list_of_directories with additional vars and dirs. + The dictionary key for a new env var will be created if it doesn't exist. + Also, the relative path specified in the `env_vars_dirs` dict is absolutized with the `basepath` prefix. + """ + for env_var, folders in sorted(env_vars_dirs.items()): + if env_var not in dirs: + dirs[env_var] = [] + if not isinstance(folders, list): + folders = [folders] + for folder in folders: + dirs[env_var].append(os.path.join(basepath, folder)) + + def make_module_req_guess(self): + """Prefix subdirectories in NVHPC install dir considered for environment variables defined in module file.""" + dirs = super(EB_NVHPC, self).make_module_req_guess() + for key in dirs: + dirs[key] = [os.path.join(self.nvhpc_install_subdir, 'compilers', d) for d in dirs[key]] + + # $CPATH should not be defined in module for NVHPC, it causes problems + # cfr. https://github.com/easybuilders/easybuild-easyblocks/issues/830 + if 'CPATH' in dirs: + self.log.info("Removing $CPATH entry: %s", dirs['CPATH']) + del dirs['CPATH'] + + # EasyBlock option parsing follows: + # BYO Compilers: + # Use NVHPC's libraries and tools with other, external compilers + if self.cfg['module_byo_compilers']: + if 'PATH' in dirs: + del dirs["PATH"] + # Own MPI: + # NVHPC is shipped with a compiled OpenMPI installation + # Enable it by setting according environment variables + if self.cfg['module_nvhpc_own_mpi']: + self.nvhpc_mpi_basedir = os.path.join(self.nvhpc_install_subdir, "comm_libs", "mpi") + env_vars_dirs = { + 'PATH': 'bin', + 'CPATH': 'include', + 'LD_LIBRARY_PATH': 'lib' + } + self._nvhpc_extended_components(dirs, self.nvhpc_mpi_basedir, env_vars_dirs) + # Math Libraries: + # NVHPC is shipped with math libraries (in a dedicated folder) + # Enable them by setting according environment variables + if self.cfg['module_add_math_libs']: + self.nvhpc_math_basedir = os.path.join(self.nvhpc_install_subdir, "math_libs") + env_vars_dirs = { + 'CPATH': 'include', + 'LD_LIBRARY_PATH': 'lib64' + } + self._nvhpc_extended_components(dirs, self.nvhpc_math_basedir, env_vars_dirs) + # GPU Profilers: + # NVHPC is shipped with NVIDIA's GPU profilers (Nsight Compute/Nsight Systems) + # Enable them by setting the according environment variables + if self.cfg['module_add_profilers']: + self.nvhpc_profilers_basedir = os.path.join(self.nvhpc_install_subdir, "profilers") + env_vars_dirs = { + 'PATH': ['Nsight_Compute', 'Nsight_Systems/bin'] + } + self._nvhpc_extended_components(dirs, self.nvhpc_profilers_basedir, env_vars_dirs) + # NCCL: + # NVHPC is shipped with NCCL + # Enable it by setting the according environment variables + if self.cfg['module_add_nccl']: + self.nvhpc_nccl_basedir = os.path.join(self.nvhpc_install_subdir, "comm_libs", "nccl") + env_vars_dirs = { + 'CPATH': 'include', + 'LD_LIBRARY_PATH': 'lib' + } + self._nvhpc_extended_components(dirs, self.nvhpc_nccl_basedir, env_vars_dirs) + # NVSHMEM: + # NVHPC is shipped with NVSHMEM + # Enable it by setting the according environment variables + if self.cfg['module_add_nvshmem']: + self.nvhpc_nvshmem_basedir = os.path.join(self.nvhpc_install_subdir, "comm_libs", "nvshmem") + env_vars_dirs = { + 'CPATH': 'include', + 'LD_LIBRARY_PATH': 'lib' + } + self._nvhpc_extended_components(dirs, self.nvhpc_nvshmem_basedir, env_vars_dirs) + # CUDA: + # NVHPC is shipped with CUDA (possibly multiple versions) + # Rather use this CUDA than an external CUDA (via $CUDA_HOME) by setting according environment variables + if self.cfg['module_add_cuda']: + self.nvhpc_cuda_basedir = os.path.join(self.nvhpc_install_subdir, "cuda") + env_vars_dirs = { + 'PATH': 'bin', + 'LD_LIBRARY_PATH': 'lib64', + 'CPATH': 'include' + } + self._nvhpc_extended_components(dirs, self.nvhpc_cuda_basedir, env_vars_dirs) + return dirs + + def make_module_extra(self): + """Add environment variable for NVHPC location""" + txt = super(EB_NVHPC, self).make_module_extra() + txt += self.module_generator.set_environment('NVHPC', self.installdir) + if LooseVersion(self.version) >= LooseVersion('22.7'): + # NVHPC 22.7+ requires the variable NVHPC_CUDA_HOME for external CUDA. CUDA_HOME has been deprecated. + if not self.cfg['module_add_cuda'] and get_software_root('CUDA'): + txt += self.module_generator.set_environment('NVHPC_CUDA_HOME', os.getenv('CUDA_HOME')) + return txt diff --git a/easyblocks/o/openfoam.py b/easyblocks/o/openfoam.py new file mode 100644 index 0000000..04f0fb3 --- /dev/null +++ b/easyblocks/o/openfoam.py @@ -0,0 +1,556 @@ +## +# Copyright 2009-2023 Ghent University +# +# This file is part of EasyBuild, +# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), +# with support of Ghent University (http://ugent.be/hpc), +# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), +# Flemish Research Foundation (FWO) (http://www.fwo.be/en) +# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). +# +# https://github.com/easybuilders/easybuild +# +# EasyBuild is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation v2. +# +# EasyBuild is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. +## +""" +EasyBuild support for building and installing OpenFOAM, implemented as an easyblock + +@author: Stijn De Weirdt (Ghent University) +@author: Dries Verdegem (Ghent University) +@author: Kenneth Hoste (Ghent University) +@author: Pieter De Baets (Ghent University) +@author: Jens Timmerman (Ghent University) +@author: Xavier Besseron (University of Luxembourg) +@author: Ward Poelmans (Ghent University) +@author: Balazs Hajgato (Free University Brussels (VUB)) +""" + +import glob +import os +import re +import shutil +import stat +import tempfile +from distutils.version import LooseVersion + +import easybuild.tools.environment as env +import easybuild.tools.toolchain as toolchain +from easybuild.easyblocks.generic.cmakemake import setup_cmake_env +from easybuild.framework.easyblock import EasyBlock +from easybuild.tools.build_log import EasyBuildError +from easybuild.tools.filetools import adjust_permissions, apply_regex_substitutions, mkdir +from easybuild.tools.modules import get_software_root, get_software_version +from easybuild.tools.run import run_cmd, run_cmd_qa +from easybuild.tools.systemtools import get_shared_lib_ext, get_cpu_architecture, AARCH64, POWER + + +class EB_OpenFOAM(EasyBlock): + """Support for building and installing OpenFOAM.""" + + def __init__(self, *args, **kwargs): + """Specify that OpenFOAM should be built in install dir.""" + + super(EB_OpenFOAM, self).__init__(*args, **kwargs) + + self.build_in_installdir = True + + self.openfoamdir = None + self.thrdpartydir = None + + # version may start with 'v' for some variants of OpenFOAM + # we need to strip this off to avoid problems when comparing LooseVersion instances in Python 3 + clean_version = self.version.strip('v+') + + # take into account versions like '4.x', + # assume it's equivalent to a very recent minor version (.99) + if '.x' in clean_version: + clean_version = clean_version.replace('.x', '.99') + + self.looseversion = LooseVersion(clean_version) + + self.is_extend = 'extend' in self.name.lower() + self.is_dot_com = self.looseversion >= LooseVersion('1606') + self.is_dot_org = self.looseversion <= LooseVersion('100') + + if self.is_extend: + if self.looseversion >= LooseVersion('3.0'): + self.openfoamdir = 'foam-extend-%s' % self.version + else: + self.openfoamdir = 'OpenFOAM-%s-ext' % self.version + else: + self.openfoamdir = '-'.join([self.name, '-'.join(self.version.split('-')[:2])]) + self.log.debug("openfoamdir: %s" % self.openfoamdir) + + # Set build type to requested value + if self.toolchain.options['debug']: + self.build_type = 'Debug' + else: + self.build_type = 'Opt' + + # determine values for wm_compiler and wm_mplib + comp_fam = self.toolchain.comp_family() + if comp_fam == toolchain.GCC: # @UndefinedVariable + self.wm_compiler = 'Gcc' + self.wm_mplib = "OPENMPI" + elif comp_fam == toolchain.INTELCOMP: # @UndefinedVariable + self.wm_compiler = 'Icc' + self.wm_mplib = "INTELMPI" + else: + raise EasyBuildError("Unknown compiler family, don't know how to set WM_COMPILER") + + def extract_step(self): + """Extract sources as expected by the OpenFOAM(-Extend) build scripts.""" + super(EB_OpenFOAM, self).extract_step() + # make sure that the expected subdir is really there after extracting + # if not, the build scripts (e.g., the etc/bashrc being sourced) will likely fail + openfoam_installdir = os.path.join(self.installdir, self.openfoamdir) + if not os.path.exists(openfoam_installdir): + self.log.warning("Creating expected directory %s, and moving everything there" % openfoam_installdir) + try: + contents_installdir = os.listdir(self.installdir) + source = os.path.join(self.installdir, contents_installdir[0]) + # it's one directory but has a wrong name + if len(contents_installdir) == 1 and os.path.isdir(source): + target = os.path.join(self.installdir, self.openfoamdir) + self.log.debug("Renaming %s to %s", source, target) + os.rename(source, target) + else: + mkdir(openfoam_installdir) + for fil in contents_installdir: + if fil != self.openfoamdir: + source = os.path.join(self.installdir, fil) + target = os.path.join(openfoam_installdir, fil) + self.log.debug("Moving %s to %s", source, target) + shutil.move(source, target) + os.chdir(openfoam_installdir) + except OSError as err: + raise EasyBuildError("Failed to move all files to %s: %s", openfoam_installdir, err) + + def patch_step(self, beginpath=None): + """Adjust start directory and start path for patching to correct directory.""" + self.cfg['start_dir'] = os.path.join(self.installdir, self.openfoamdir) + super(EB_OpenFOAM, self).patch_step(beginpath=self.cfg['start_dir']) + + def configure_step(self): + """Configure OpenFOAM build by setting appropriate environment variables.""" + # compiler & compiler flags + comp_fam = self.toolchain.comp_family() + + extra_flags = '' + if comp_fam == toolchain.GCC: # @UndefinedVariable + if get_software_version('GCC') >= LooseVersion('4.8'): + # make sure non-gold version of ld is used, since OpenFOAM requires it + # see http://www.openfoam.org/mantisbt/view.php?id=685 + extra_flags = '-fuse-ld=bfd' + + # older versions of OpenFOAM-Extend require -fpermissive + if self.is_extend and self.looseversion < LooseVersion('2.0'): + extra_flags += ' -fpermissive' + + if self.looseversion < LooseVersion('3.0'): + extra_flags += ' -fno-delete-null-pointer-checks' + + elif comp_fam == toolchain.INTELCOMP: # @UndefinedVariable + # make sure -no-prec-div is used with Intel compilers + extra_flags = '-no-prec-div' + + for env_var in ['CFLAGS', 'CXXFLAGS']: + env.setvar(env_var, "%s %s" % (os.environ.get(env_var, ''), extra_flags)) + + # patch out hardcoding of WM_* environment variables + # for example, replace 'export WM_COMPILER=Gcc' with ': ${WM_COMPILER:=Gcc}; export WM_COMPILER' + for script in [os.path.join(self.builddir, self.openfoamdir, x) for x in ['etc/bashrc', 'etc/cshrc']]: + self.log.debug("Patching out hardcoded $WM_* env vars in %s", script) + # disable any third party stuff, we use EB controlled builds + regex_subs = [(r"^(setenv|export) WM_THIRD_PARTY_USE_.*[ =].*$", r"# \g<0>")] + + # this does not work for OpenFOAM Extend lower than 2.0 + if not self.is_extend or self.looseversion >= LooseVersion('2.0'): + key = "WM_PROJECT_VERSION" + regex_subs += [(r"^(setenv|export) %s=.*$" % key, r"export %s=%s #\g<0>" % (key, self.version))] + + WM_env_var = ['WM_COMPILER', 'WM_COMPILE_OPTION', 'WM_MPLIB', 'WM_THIRD_PARTY_DIR'] + # OpenFOAM >= 3.0.0 can use 64 bit integers + if not self.is_extend and self.looseversion >= LooseVersion('3.0'): + WM_env_var.append('WM_LABEL_SIZE') + for env_var in WM_env_var: + regex_subs.append((r"^(setenv|export) (?P<var>%s)[ =](?P<val>.*)$" % env_var, + r": ${\g<var>:=\g<val>}; export \g<var>")) + apply_regex_substitutions(script, regex_subs) + + # inject compiler variables into wmake/rules files + ldirs = glob.glob(os.path.join(self.builddir, self.openfoamdir, 'wmake', 'rules', 'linux*')) + if self.looseversion >= LooseVersion('1906'): + ldirs += glob.glob(os.path.join(self.builddir, self.openfoamdir, 'wmake', 'rules', 'General', '*')) + langs = ['c', 'c++'] + + # NOTE: we do not want to change the Debug rules files becuse + # that would change the cOPT/c++OPT values from their empty setting. + suffixes = ['', 'Opt'] + wmake_rules_files = [os.path.join(ldir, lang + suff) for ldir in ldirs for lang in langs for suff in suffixes] + wmake_rules_files += [os.path.join(ldir, "general") for ldir in ldirs] + + mpicc = os.environ['MPICC'] + mpicxx = os.environ['MPICXX'] + cc_seq = os.environ.get('CC_SEQ', os.environ['CC']) + cxx_seq = os.environ.get('CXX_SEQ', os.environ['CXX']) + + if self.toolchain.mpi_family() == toolchain.OPENMPI: + # no -cc/-cxx flags supported in OpenMPI compiler wrappers + c_comp_cmd = 'OMPI_CC="%s" %s' % (cc_seq, mpicc) + cxx_comp_cmd = 'OMPI_CXX="%s" %s' % (cxx_seq, mpicxx) + else: + # -cc/-cxx should work for all MPICH-based MPIs (including Intel MPI) + c_comp_cmd = '%s -cc="%s"' % (mpicc, cc_seq) + cxx_comp_cmd = '%s -cxx="%s"' % (mpicxx, cxx_seq) + + comp_vars = { + # specify MPI compiler wrappers and compiler commands + sequential compiler that should be used by them + 'cc': c_comp_cmd, + 'CC': cxx_comp_cmd, + 'cOPT': os.environ['CFLAGS'], + 'c++OPT': os.environ['CXXFLAGS'], + } + for wmake_rules_file in wmake_rules_files: + # the cOpt and c++Opt files don't exist in the General directories (which are included for recent versions) + if not os.path.isfile(wmake_rules_file): + continue + fullpath = os.path.join(self.builddir, self.openfoamdir, wmake_rules_file) + self.log.debug("Patching compiler variables in %s", fullpath) + regex_subs = [] + for comp_var, newval in comp_vars.items(): + regex_subs.append((r"^(%s\s*=\s*).*$" % re.escape(comp_var), r"\1%s" % newval)) + # replace /lib/cpp by cpp, but keep the arguments + regex_subs.append((r"^(CPP\s*=\s*)/lib/cpp(.*)$", r"\1cpp\2")) + apply_regex_substitutions(fullpath, regex_subs) + + # enable verbose build for debug purposes + # starting with openfoam-extend 3.2, PS1 also needs to be set + env.setvar("FOAM_VERBOSE", '1') + + # installation directory + env.setvar("FOAM_INST_DIR", self.installdir) + + # third party directory + self.thrdpartydir = "ThirdParty-%s" % self.version + # only if third party stuff is actually installed + if os.path.exists(self.thrdpartydir): + os.symlink(os.path.join("..", self.thrdpartydir), self.thrdpartydir) + env.setvar("WM_THIRD_PARTY_DIR", os.path.join(self.installdir, self.thrdpartydir)) + + env.setvar("WM_COMPILER", self.wm_compiler) + env.setvar("WM_MPLIB", self.wm_mplib) + + # Set Compile options according to build type + env.setvar("WM_COMPILE_OPTION", self.build_type) + + # parallel build spec + env.setvar("WM_NCOMPPROCS", str(self.cfg['parallel'])) + + # OpenFOAM >= 3.0.0 can use 64 bit integers + if not self.is_extend and self.looseversion >= LooseVersion('3.0'): + if self.toolchain.options['i8']: + env.setvar("WM_LABEL_SIZE", '64') + else: + env.setvar("WM_LABEL_SIZE", '32') + + # make sure lib/include dirs for dependencies are found + openfoam_extend_v3 = self.is_extend and self.looseversion >= LooseVersion('3.0') + if self.looseversion < LooseVersion("2") or openfoam_extend_v3: + self.log.debug("List of deps: %s" % self.cfg.dependencies()) + for dep in self.cfg.dependencies(): + dep_name = dep['name'].upper(), + dep_root = get_software_root(dep['name']) + env.setvar("%s_SYSTEM" % dep_name, "1") + dep_vars = { + "%s_DIR": "%s", + "%s_BIN_DIR": "%s/bin", + "%s_LIB_DIR": "%s/lib", + "%s_INCLUDE_DIR": "%s/include", + } + for var, val in dep_vars.items(): + env.setvar(var % dep_name, val % dep_root) + else: + for depend in ['SCOTCH', 'METIS', 'CGAL', 'Paraview']: + dependloc = get_software_root(depend) + if dependloc: + if depend == 'CGAL' and get_software_root('Boost'): + env.setvar("CGAL_ROOT", dependloc) + env.setvar("BOOST_ROOT", get_software_root('Boost')) + else: + env.setvar("%s_ROOT" % depend.upper(), dependloc) + + def build_step(self): + """Build OpenFOAM using make after sourcing script to set environment.""" + + # Some parts of OpenFOAM uses CMake to build + # make sure the basic environment is correct + setup_cmake_env(self.toolchain) + + precmd = "source %s" % os.path.join(self.builddir, self.openfoamdir, "etc", "bashrc") + if not self.is_extend and self.looseversion >= LooseVersion('4.0'): + if self.looseversion >= LooseVersion('2006'): + cleancmd = "cd $WM_PROJECT_DIR && wclean -platform -all && cd -" + else: + cleancmd = "cd $WM_PROJECT_DIR && wcleanPlatform -all && cd -" + else: + cleancmd = "wcleanAll" + + # make directly in install directory + cmd_tmpl = "%(precmd)s && %(cleancmd)s && %(prebuildopts)s %(makecmd)s" % { + 'precmd': precmd, + 'cleancmd': cleancmd, + 'prebuildopts': self.cfg['prebuildopts'], + 'makecmd': os.path.join(self.builddir, self.openfoamdir, '%s'), + } + if self.is_extend and self.looseversion >= LooseVersion('3.0'): + qa = { + "Proceed without compiling ParaView [Y/n]": 'Y', + "Proceed without compiling cudaSolvers? [Y/n]": 'Y', + } + noqa = [ + ".* -o .*", + "checking .*", + "warning.*", + "configure: creating.*", + "%s .*" % os.environ['CC'], + "wmake .*", + "Making dependency list for source file.*", + r"\s*\^\s*", # warning indicator + "Cleaning .*", + ] + run_cmd_qa(cmd_tmpl % 'Allwmake.firstInstall', qa, no_qa=noqa, log_all=True, simple=True, maxhits=500) + else: + cmd = 'Allwmake' + if self.looseversion > LooseVersion('1606'): + # use Allwmake -log option if possible since this can be useful during builds, but also afterwards + cmd += ' -log' + run_cmd(cmd_tmpl % cmd, log_all=True, simple=True, log_output=True) + + def det_psubdir(self): + """Determine the platform-specific installation directory for OpenFOAM.""" + # OpenFOAM >= 3.0.0 can use 64 bit integers + # same goes for OpenFOAM-Extend >= 4.1 + if self.is_extend: + set_int_size = self.looseversion >= LooseVersion('4.1') + else: + set_int_size = self.looseversion >= LooseVersion('3.0') + + if set_int_size: + if self.toolchain.options['i8']: + int_size = 'Int64' + else: + int_size = 'Int32' + else: + int_size = '' + + archpart = '64' + arch = get_cpu_architecture() + if arch == AARCH64: + # Variants have different abbreviations for ARM64... + if self.is_dot_org: + archpart = 'Arm64' + else: + archpart = 'ARM64' + elif arch == POWER: + archpart = 'PPC64le' + + psubdir = "linux%s%sDP%s%s" % (archpart, self.wm_compiler, int_size, self.build_type) + return psubdir + + def install_step(self): + """Building was performed in install dir, so just fix permissions.""" + + # fix permissions of OpenFOAM dir + fullpath = os.path.join(self.installdir, self.openfoamdir) + adjust_permissions(fullpath, stat.S_IROTH, add=True, recursive=True, ignore_errors=True) + adjust_permissions(fullpath, stat.S_IXOTH, add=True, recursive=True, onlydirs=True, ignore_errors=True) + + # fix permissions of ThirdParty dir and subdirs (also for 2.x) + # if the thirdparty tarball is installed + fullpath = os.path.join(self.installdir, self.thrdpartydir) + if os.path.exists(fullpath): + adjust_permissions(fullpath, stat.S_IROTH, add=True, recursive=True, ignore_errors=True) + adjust_permissions(fullpath, stat.S_IXOTH, add=True, recursive=True, onlydirs=True, ignore_errors=True) + + # create symlinks in the lib directory to all libraries in the mpi subdirectory + # to make sure they take precedence over the libraries in the dummy subdirectory + shlib_ext = get_shared_lib_ext() + psubdir = self.det_psubdir() + openfoam_extend_v3 = self.is_extend and self.looseversion >= LooseVersion('3.0') + if openfoam_extend_v3 or self.looseversion < LooseVersion("2"): + libdir = os.path.join(self.installdir, self.openfoamdir, "lib", psubdir) + else: + libdir = os.path.join(self.installdir, self.openfoamdir, "platforms", psubdir, "lib") + + # OpenFOAM v2012 puts mpi into eb-mpi + if self.looseversion >= LooseVersion("2012"): + mpilibssubdir = "eb-mpi" + else: + mpilibssubdir = "mpi" + mpilibsdir = os.path.join(libdir, mpilibssubdir) + + if os.path.exists(mpilibsdir): + for lib in glob.glob(os.path.join(mpilibsdir, "*.%s" % shlib_ext)): + libname = os.path.basename(lib) + dst = os.path.join(libdir, libname) + os.symlink(os.path.join(mpilibssubdir, libname), dst) + + def sanity_check_step(self): + """Custom sanity check for OpenFOAM""" + shlib_ext = get_shared_lib_ext() + psubdir = self.det_psubdir() + + openfoam_extend_v3 = self.is_extend and self.looseversion >= LooseVersion('3.0') + if openfoam_extend_v3 or self.looseversion < LooseVersion("2"): + toolsdir = os.path.join(self.openfoamdir, "applications", "bin", psubdir) + libsdir = os.path.join(self.openfoamdir, "lib", psubdir) + dirs = [toolsdir, libsdir] + else: + toolsdir = os.path.join(self.openfoamdir, "platforms", psubdir, "bin") + libsdir = os.path.join(self.openfoamdir, "platforms", psubdir, "lib") + dirs = [toolsdir, libsdir] + + # some randomly selected binaries + # if one of these is missing, it's very likely something went wrong + tools = ["boundaryFoam", "engineFoam", "buoyantSimpleFoam", "buoyantBoussinesqSimpleFoam", "sonicFoam"] + tools += ["surfaceAdd", "surfaceFind", "surfaceSmooth"] + tools += ["blockMesh", "checkMesh", "deformedGeom", "engineSwirl", "modifyMesh", "refineMesh"] + + # surfaceSmooth is replaced by surfaceLambdaMuSmooth is OpenFOAM v2.3.0 + if not self.is_extend and self.looseversion >= LooseVersion("2.3.0"): + tools.remove("surfaceSmooth") + tools.append("surfaceLambdaMuSmooth") + # sonicFoam and buoyantBoussineqSimpleFoam deprecated in version 7+ + if self.is_dot_org and self.looseversion >= LooseVersion('7'): + tools.remove("buoyantBoussinesqSimpleFoam") + tools.remove("sonicFoam") + # buoyantSimpleFoam replaced by buoyantFoam in versions 10+ + if self.is_dot_org and self.looseversion >= LooseVersion("10"): + tools.remove("buoyantSimpleFoam") + tools.append("buoyantFoam") + # engineFoam replaced by reactingFoam in versions 10+ + if self.is_dot_org and self.looseversion >= LooseVersion("10"): + tools.remove("engineFoam") + tools.append("reactingFoam") + + bins = [os.path.join(self.openfoamdir, "bin", x) for x in ["paraFoam"]] + \ + [os.path.join(toolsdir, x) for x in tools] + + # test setting up the OpenFOAM environment in bash shell + load_openfoam_env = "source $FOAM_BASH" + custom_commands = [load_openfoam_env] + + # check for the Pstream and scotchDecomp libraries, there must be a dummy one and an mpi one + #if self.is_extend: + # libs = [os.path.join(libsdir, "libscotchDecomp.%s" % shlib_ext), + # os.path.join(libsdir, "libmetisDecomp.%s" % shlib_ext)] + # if self.looseversion < LooseVersion('3.2'): + # # Pstream should have both a dummy and a mpi one + # libs.extend([os.path.join(libsdir, x, "libPstream.%s" % shlib_ext) for x in ["dummy"]]) + # else: + # libs.extend([os.path.join(libsdir, "libparMetisDecomp.%s" % shlib_ext)]) + #else: + # # OpenFOAM v2012 puts mpi into eb-mpi + # if self.is_dot_com and self.looseversion >= LooseVersion("2012"): + # mpilibssubdir = "mpi" + # else: + # mpilibssubdir = "mpi" + + # # there must be a dummy one and an mpi one for both + # libs = [os.path.join(libsdir, x, "libPstream.%s" % shlib_ext) for x in ["dummy", mpilibssubdir]] + \ + # [os.path.join(libsdir, x, "libptscotchDecomp.%s" % shlib_ext) for x in ["dummy", mpilibssubdir]] +\ + # [os.path.join(libsdir, "libscotchDecomp.%s" % shlib_ext)] + \ + # [os.path.join(libsdir, "dummy", "libscotchDecomp.%s" % shlib_ext)] + + if not self.is_extend and self.looseversion >= LooseVersion("2.4.0"): + # also check for foamMonitor for OpenFOAM versions other than OpenFOAM-Extend + bins.append(os.path.join(self.openfoamdir, 'bin', 'foamMonitor')) + + # test foamMonitor; wrap `foamMonitor -h` to generate exit code 1 if any dependency is missing + # the command `foamMonitor -h` does not return correct exit codes on its own in all versions + test_foammonitor = "! foamMonitor -h 2>&1 | grep 'not installed'" + custom_commands.append(' && '.join([load_openfoam_env, test_foammonitor])) + + custom_paths = { + 'files': [os.path.join(self.openfoamdir, 'etc', x) for x in ["bashrc", "cshrc"]] + bins, # + libs, + 'dirs': dirs, + } + + # run motorBike tutorial case to ensure the installation is functional (if it's available); + # only for recent (>= v6.0) versions of openfoam.org variant + if self.is_dot_org and self.looseversion >= LooseVersion('6'): + openfoamdir_path = os.path.join(self.installdir, self.openfoamdir) + motorbike_path = os.path.join(openfoamdir_path, 'tutorials', 'incompressible', 'simpleFoam', 'motorBike') + if os.path.exists(motorbike_path): + test_dir = tempfile.mkdtemp() + + if self.looseversion >= LooseVersion('9'): + geom_target_dir = 'geometry' + else: + geom_target_dir = 'triSurface' + + cmds = [ + "cp -a %s %s" % (motorbike_path, test_dir), + "cd %s" % os.path.join(test_dir, os.path.basename(motorbike_path)), + "source $FOAM_BASH", + ". $WM_PROJECT_DIR/bin/tools/RunFunctions", + "cp $FOAM_TUTORIALS/resources/geometry/motorBike.obj.gz constant/%s/" % geom_target_dir, + "runApplication surfaceFeatures", + "runApplication blockMesh", + "runApplication decomposePar -copyZero", + "runParallel snappyHexMesh -overwrite", + "runParallel patchSummary", + "runParallel potentialFoam", + "runParallel simpleFoam", + "runApplication reconstructParMesh -constant", + "runApplication reconstructPar -latestTime", + "cd %s" % self.builddir, + "rm -r %s" % test_dir, + ] + # all commands need to be run in a single shell command, + # because sourcing $FOAM_BASH sets up environment + custom_commands.append(' && '.join(cmds)) + + super(EB_OpenFOAM, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands) + + def make_module_extra(self, altroot=None, altversion=None): + """Define extra environment variables required by OpenFOAM""" + + txt = super(EB_OpenFOAM, self).make_module_extra() + + env_vars = [ + # Set WM_COMPILE_OPTION in the module file + # $FOAM_BASH will then pick it up correctly. + ('WM_COMPILE_OPTION', self.build_type), + ('WM_PROJECT_VERSION', self.version), + ('FOAM_INST_DIR', self.installdir), + ('WM_COMPILER', self.wm_compiler), + ('WM_MPLIB', self.wm_mplib), + ('FOAM_BASH', os.path.join(self.installdir, self.openfoamdir, 'etc', 'bashrc')), + ('FOAM_CSH', os.path.join(self.installdir, self.openfoamdir, 'etc', 'cshrc')), + ] + + # OpenFOAM >= 3.0.0 can use 64 bit integers + if not self.is_extend and self.looseversion >= LooseVersion('3.0'): + if self.toolchain.options['i8']: + env_vars += [('WM_LABEL_SIZE', '64')] + else: + env_vars += [('WM_LABEL_SIZE', '32')] + + for (env_var, val) in env_vars: + # check whether value is defined for compatibility with --module-only + if val: + txt += self.module_generator.set_environment(env_var, val) + + return txt diff --git a/easyblocks/q/quantumespresso.py b/easyblocks/q/quantumespresso.py new file mode 100644 index 0000000..ff1edd9 --- /dev/null +++ b/easyblocks/q/quantumespresso.py @@ -0,0 +1,526 @@ +## +# Copyright 2009-2023 Ghent University +# +# This file is part of EasyBuild, +# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), +# with support of Ghent University (http://ugent.be/hpc), +# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), +# Flemish Research Foundation (FWO) (http://www.fwo.be/en) +# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). +# +# https://github.com/easybuilders/easybuild +# +# EasyBuild is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation v2. +# +# EasyBuild is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. +## +""" +EasyBuild support for Quantum ESPRESSO, implemented as an easyblock + +@author: Kenneth Hoste (Ghent University) +@author: Ake Sandgren (HPC2N, Umea University) +""" +import fileinput +import os +import re +import shutil +import sys +from distutils.version import LooseVersion + +import easybuild.tools.environment as env +import easybuild.tools.toolchain as toolchain +from easybuild.easyblocks.generic.configuremake import ConfigureMake +from easybuild.framework.easyconfig import CUSTOM +from easybuild.tools.build_log import EasyBuildError +from easybuild.tools.filetools import copy_dir, copy_file +from easybuild.tools.modules import get_software_root, get_software_version + + +class EB_QuantumESPRESSO(ConfigureMake): + """Support for building and installing Quantum ESPRESSO.""" + + @staticmethod + def extra_options(): + """Custom easyconfig parameters for Quantum ESPRESSO.""" + extra_vars = { + 'hybrid': [False, "Enable hybrid build (with OpenMP)", CUSTOM], + 'with_scalapack': [True, "Enable ScaLAPACK support", CUSTOM], + 'with_ace': [False, "Enable Adaptively Compressed Exchange support", CUSTOM], + } + return ConfigureMake.extra_options(extra_vars) + + def __init__(self, *args, **kwargs): + """Add extra config options specific to Quantum ESPRESSO.""" + super(EB_QuantumESPRESSO, self).__init__(*args, **kwargs) + + if LooseVersion(self.version) >= LooseVersion("6"): + self.install_subdir = "qe-%s" % self.version + else: + self.install_subdir = "espresso-%s" % self.version + + def patch_step(self): + """Patch files from build dir (not start dir).""" + super(EB_QuantumESPRESSO, self).patch_step(beginpath=self.builddir) + + def configure_step(self): + """Custom configuration procedure for Quantum ESPRESSO.""" + + # compose list of DFLAGS (flag, value, keep_stuff) + # for guidelines, see include/defs.h.README in sources + dflags = [] + + repls = [] + + extra_libs = [] + + comp_fam_dflags = { + toolchain.INTELCOMP: '-D__INTEL', + toolchain.GCC: '-D__GFORTRAN -D__STD_F95', + } + comp_fam = self.toolchain.comp_family() + if comp_fam in comp_fam_dflags: + dflags.append(comp_fam_dflags[comp_fam]) + else: + raise EasyBuildError("EasyBuild does not yet have support for QuantumESPRESSO with toolchain %s" % comp_fam) + + if self.toolchain.options.get('openmp', False) or self.cfg['hybrid']: + self.cfg.update('configopts', '--enable-openmp') + dflags.append(" -D__OPENMP") + + if self.toolchain.options.get('usempi', None): + dflags.append('-D__MPI -D__PARA') + else: + self.cfg.update('configopts', '--disable-parallel') + + if self.cfg['with_scalapack']: + dflags.append(" -D__SCALAPACK") + if self.toolchain.options.get('usempi', None): + if get_software_root("impi") and get_software_root("imkl"): + self.cfg.update('configopts', '--with-scalapack=intel') + else: + self.cfg.update('configopts', '--without-scalapack') + + libxc = get_software_root("libxc") + if libxc: + libxc_v = get_software_version("libxc") + if LooseVersion(libxc_v) < LooseVersion("3.0.1"): + raise EasyBuildError("Must use libxc >= 3.0.1") + dflags.append(" -D__LIBXC") + repls.append(('IFLAGS', '-I%s' % os.path.join(libxc, 'include'), True)) + if LooseVersion(self.version) < LooseVersion("6.5"): + extra_libs.append(" -lxcf90 -lxc") + else: + extra_libs.append(" -lxcf90 -lxcf03 -lxc") + + hdf5 = get_software_root("HDF5") + if hdf5: + self.cfg.update('configopts', '--with-hdf5=%s' % hdf5) + dflags.append(" -D__HDF5") + hdf5_lib_repl = '-L%s/lib -lhdf5hl_fortran -lhdf5_hl -lhdf5_fortran -lhdf5 -lsz -lz -ldl -lm' % hdf5 + repls.append(('HDF5_LIB', hdf5_lib_repl, False)) + + elpa = get_software_root("ELPA") + if elpa: + if not self.cfg['with_scalapack']: + raise EasyBuildError("ELPA requires ScaLAPACK but 'with_scalapack' is set to False") + + elpa_v = get_software_version("ELPA") + if LooseVersion(self.version) >= LooseVersion("6"): + + # NOTE: Quantum Espresso should use -D__ELPA_<year> for corresponding ELPA version + # However for ELPA VERSION >= 2017.11 Quantum Espresso needs to use ELPA_2018 + # because of outdated bindings. See: https://xconfigure.readthedocs.io/en/latest/elpa/ + if LooseVersion("2018") > LooseVersion(elpa_v) >= LooseVersion("2017.11"): + dflags.append('-D__ELPA_2018') + else: + # get year from LooseVersion + elpa_year_v = elpa_v.split('.')[0] + dflags.append('-D__ELPA_%s' % elpa_year_v) + + elpa_min_ver = "2016.11.001.pre" + else: + elpa_min_ver = "2015" + dflags.append('-D__ELPA_2015 -D__ELPA') + + if LooseVersion(elpa_v) < LooseVersion(elpa_min_ver): + raise EasyBuildError("QuantumESPRESSO %s needs ELPA to be " + + "version %s or newer", self.version, elpa_min_ver) + + if self.toolchain.options.get('openmp', False): + elpa_include = 'elpa_openmp-%s' % elpa_v + elpa_lib = 'libelpa_openmp.a' + else: + elpa_include = 'elpa-%s' % elpa_v + elpa_lib = 'libelpa.a' + elpa_include = os.path.join(elpa, 'include', elpa_include) + repls.append(('IFLAGS', '-I%s' % os.path.join(elpa_include, 'modules'), True)) + self.cfg.update('configopts', '--with-elpa-include=%s' % elpa_include) + elpa_lib = os.path.join(elpa, 'lib', elpa_lib) + self.cfg.update('configopts', '--with-elpa-lib=%s' % elpa_lib) + + if comp_fam == toolchain.INTELCOMP: + # set preprocessor command (-E to stop after preprocessing, -C to preserve comments) + cpp = "%s -E -C" % os.getenv('CC') + repls.append(('CPP', cpp, False)) + env.setvar('CPP', cpp) + + # also define $FCCPP, but do *not* include -C (comments should not be preserved when preprocessing Fortran) + env.setvar('FCCPP', "%s -E" % os.getenv('CC')) + + if comp_fam == toolchain.INTELCOMP: + # Intel compiler must have -assume byterecl (see install/configure) + repls.append(('F90FLAGS', '-fpp -assume byterecl', True)) + repls.append(('FFLAGS', '-assume byterecl', True)) + elif comp_fam == toolchain.GCC: + f90_flags = ['-cpp'] + if LooseVersion(get_software_version('GCC')) >= LooseVersion('10'): + f90_flags.append('-fallow-argument-mismatch') + repls.append(('F90FLAGS', ' '.join(f90_flags), True)) + + super(EB_QuantumESPRESSO, self).configure_step() + + if self.toolchain.options.get('openmp', False): + libfft = os.getenv('LIBFFT_MT') + else: + libfft = os.getenv('LIBFFT') + if libfft: + if "fftw3" in libfft: + dflags.append('-D__FFTW3') + else: + dflags.append('-D__FFTW') + env.setvar('FFTW_LIBS', libfft) + + if get_software_root('ACML'): + dflags.append('-D__ACML') + + if self.cfg['with_ace']: + dflags.append(" -D__EXX_ACE") + + # always include -w to supress warnings + dflags.append('-w') + + if LooseVersion(self.version) >= LooseVersion("6.6"): + dflags.append(" -Duse_beef") + libbeef = get_software_root("libbeef") + if libbeef: + repls.append(('BEEF_LIBS_SWITCH', 'external', False)) + repls.append(('BEEF_LIBS', '%s/lib/libbeef.a' % libbeef, False)) + + repls.append(('DFLAGS', ' '.join(dflags), False)) + + # complete C/Fortran compiler and LD flags + if self.toolchain.options.get('openmp', False) or self.cfg['hybrid']: + repls.append(('LDFLAGS', self.toolchain.get_flag('openmp'), True)) + repls.append(('(?:C|F90|F)FLAGS', self.toolchain.get_flag('openmp'), True)) + + # obtain library settings + libs = [] + num_libs = ['BLAS', 'LAPACK', 'FFT'] + if self.cfg['with_scalapack']: + num_libs.extend(['SCALAPACK']) + for lib in num_libs: + if self.toolchain.options.get('openmp', False): + val = os.getenv('LIB%s_MT' % lib) + else: + val = os.getenv('LIB%s' % lib) + if lib == 'SCALAPACK' and elpa: + val = ' '.join([elpa_lib, val]) + repls.append(('%s_LIBS' % lib, val, False)) + libs.append(val) + libs = ' '.join(libs) + + repls.append(('BLAS_LIBS_SWITCH', 'external', False)) + repls.append(('LAPACK_LIBS_SWITCH', 'external', False)) + repls.append(('LD_LIBS', ' '.join(extra_libs + [os.getenv('LIBS')]), False)) + + # Do not use external FoX. + # FoX starts to be used in 6.2 and they use a patched version that + # is newer than FoX 4.1.2 which is the latest release. + # Ake Sandgren, 20180712 + if get_software_root('FoX'): + raise EasyBuildError("Found FoX external module, QuantumESPRESSO" + + "must use the version they include with the source.") + + self.log.debug("List of replacements to perform: %s" % repls) + + if LooseVersion(self.version) >= LooseVersion("6"): + make_ext = '.inc' + else: + make_ext = '.sys' + + # patch make.sys file + fn = os.path.join(self.cfg['start_dir'], 'make' + make_ext) + try: + for line in fileinput.input(fn, inplace=1, backup='.orig.eb'): + for (k, v, keep) in repls: + # need to use [ \t]* instead of \s*, because vars may be undefined as empty, + # and we don't want to include newlines + if keep: + line = re.sub(r"^(%s\s*=[ \t]*)(.*)$" % k, r"\1\2 %s" % v, line) + else: + line = re.sub(r"^(%s\s*=[ \t]*).*$" % k, r"\1%s" % v, line) + + # fix preprocessing directives for .f90 files in make.sys if required + if LooseVersion(self.version) < LooseVersion("6.0"): + if comp_fam == toolchain.GCC: + line = re.sub(r"^\t\$\(MPIF90\) \$\(F90FLAGS\) -c \$<", + "\t$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" + + "\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o", + line) + + if LooseVersion(self.version) >= LooseVersion("6.6"): + # fix order of BEEF_LIBS in QE_LIBS + line = re.sub(r"^(QELIBS\s*=[ \t]*)(.*) \$\(BEEF_LIBS\) (.*)$", + r"QELIBS = $(BEEF_LIBS) \2 \3", line) + + # use FCCPP instead of CPP for Fortran headers + line = re.sub(r"\t\$\(CPP\) \$\(CPPFLAGS\) \$< -o \$\*\.fh", + "\t$(FCCPP) $(CPPFLAGS) $< -o $*.fh", line) + + sys.stdout.write(line) + except IOError as err: + raise EasyBuildError("Failed to patch %s: %s", fn, err) + + self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read())) + + # patch default make.sys for wannier + if LooseVersion(self.version) >= LooseVersion("5"): + fn = os.path.join(self.cfg['start_dir'], 'install', 'make_wannier90' + make_ext) + else: + fn = os.path.join(self.cfg['start_dir'], 'plugins', 'install', 'make_wannier90.sys') + try: + for line in fileinput.input(fn, inplace=1, backup='.orig.eb'): + line = re.sub(r"^(LIBS\s*=\s*).*", r"\1%s" % libs, line) + + sys.stdout.write(line) + + except IOError as err: + raise EasyBuildError("Failed to patch %s: %s", fn, err) + + self.log.debug("Contents of patched %s: %s" % (fn, open(fn, "r").read())) + + # patch Makefile of want plugin + wantprefix = 'want-' + wantdirs = [d for d in os.listdir(self.builddir) if d.startswith(wantprefix)] + + if len(wantdirs) > 1: + raise EasyBuildError("Found more than one directory with %s prefix, help!", wantprefix) + + if len(wantdirs) != 0: + wantdir = os.path.join(self.builddir, wantdirs[0]) + make_sys_in_path = None + cand_paths = [os.path.join('conf', 'make.sys.in'), os.path.join('config', 'make.sys.in')] + for path in cand_paths: + full_path = os.path.join(wantdir, path) + if os.path.exists(full_path): + make_sys_in_path = full_path + break + if make_sys_in_path is None: + raise EasyBuildError("Failed to find make.sys.in in want directory %s, paths considered: %s", + wantdir, ', '.join(cand_paths)) + + try: + for line in fileinput.input(make_sys_in_path, inplace=1, backup='.orig.eb'): + # fix preprocessing directives for .f90 files in make.sys if required + if comp_fam == toolchain.GCC: + line = re.sub("@f90rule@", + "$(CPP) -C $(CPPFLAGS) $< -o $*.F90\n" + + "\t$(MPIF90) $(F90FLAGS) -c $*.F90 -o $*.o", + line) + + sys.stdout.write(line) + except IOError as err: + raise EasyBuildError("Failed to patch %s: %s", fn, err) + + # move non-espresso directories to where they're expected and create symlinks + try: + dirnames = [d for d in os.listdir(self.builddir) if d not in [self.install_subdir, 'd3q-latest']] + targetdir = os.path.join(self.builddir, self.install_subdir) + for dirname in dirnames: + shutil.move(os.path.join(self.builddir, dirname), os.path.join(targetdir, dirname)) + self.log.info("Moved %s into %s" % (dirname, targetdir)) + + dirname_head = dirname.split('-')[0] + # Handle the case where the directory is preceded by 'qe-' + if dirname_head == 'qe': + dirname_head = dirname.split('-')[1] + linkname = None + if dirname_head == 'sax': + linkname = 'SaX' + if dirname_head == 'wannier90': + linkname = 'W90' + elif dirname_head in ['d3q', 'gipaw', 'plumed', 'want', 'yambo']: + linkname = dirname_head.upper() + if linkname: + os.symlink(os.path.join(targetdir, dirname), os.path.join(targetdir, linkname)) + + except OSError as err: + raise EasyBuildError("Failed to move non-espresso directories: %s", err) + + def install_step(self): + """Custom install step for Quantum ESPRESSO.""" + raise EasyBuildError("STOP") + # extract build targets as list + targets = self.cfg['buildopts'].split() + + # Copy all binaries + bindir = os.path.join(self.installdir, 'bin') + copy_dir(os.path.join(self.cfg['start_dir'], 'bin'), bindir) + + # Pick up files not installed in bin + def copy_binaries(path): + full_dir = os.path.join(self.cfg['start_dir'], path) + self.log.info("Looking for binaries in %s" % full_dir) + for filename in os.listdir(full_dir): + full_path = os.path.join(full_dir, filename) + if os.path.isfile(full_path): + if filename.endswith('.x'): + copy_file(full_path, bindir) + + if 'upf' in targets or 'all' in targets: + if LooseVersion(self.version) < LooseVersion("6.6"): + copy_binaries('upftools') + else: + copy_binaries('upflib') + copy_file(os.path.join(self.cfg['start_dir'], 'upflib', 'fixfiles.py'), bindir) + + if 'want' in targets: + copy_binaries('WANT') + + if 'w90' in targets: + copy_binaries('W90') + + if 'yambo' in targets: + copy_binaries('YAMBO') + + def sanity_check_step(self): + """Custom sanity check for Quantum ESPRESSO.""" + + # extract build targets as list + targets = self.cfg['buildopts'].split() + + bins = [] + if LooseVersion(self.version) < LooseVersion("6.7"): + # build list of expected binaries based on make targets + bins.extend(["iotk", "iotk.x", "iotk_print_kinds.x"]) + + if 'cp' in targets or 'all' in targets: + bins.extend(["cp.x", "wfdd.x"]) + if LooseVersion(self.version) < LooseVersion("6.4"): + bins.append("cppp.x") + + # only for v4.x, not in v5.0 anymore, called gwl in 6.1 at least + if 'gww' in targets or 'gwl' in targets: + bins.extend(["gww_fit.x", "gww.x", "head.x", "pw4gww.x"]) + + if 'ld1' in targets or 'all' in targets: + bins.extend(["ld1.x"]) + + if 'gipaw' in targets: + bins.extend(["gipaw.x"]) + + if 'neb' in targets or 'pwall' in targets or 'all' in targets: + if LooseVersion(self.version) > LooseVersion("5"): + bins.extend(["neb.x", "path_interpolation.x"]) + + if 'ph' in targets or 'all' in targets: + bins.extend(["dynmat.x", "lambda.x", "matdyn.x", "ph.x", "phcg.x", "q2r.x"]) + if LooseVersion(self.version) < LooseVersion("6"): + bins.extend(["d3.x"]) + if LooseVersion(self.version) > LooseVersion("5"): + bins.extend(["fqha.x", "q2qstar.x"]) + + if 'pp' in targets or 'pwall' in targets or 'all' in targets: + bins.extend(["average.x", "bands.x", "dos.x", "epsilon.x", "initial_state.x", + "plan_avg.x", "plotband.x", "plotproj.x", "plotrho.x", "pmw.x", "pp.x", + "projwfc.x", "sumpdos.x", "pw2wannier90.x", "pw2gw.x", + "wannier_ham.x", "wannier_plot.x"]) + if LooseVersion(self.version) > LooseVersion("5") and LooseVersion(self.version) < LooseVersion("6.4"): + bins.extend(["pw2bgw.x", "bgw2pw.x"]) + elif LooseVersion(self.version) <= LooseVersion("5"): + bins.extend(["pw2casino.x"]) + if LooseVersion(self.version) < LooseVersion("6.4"): + bins.extend(["pw_export.x"]) + + if 'pw' in targets or 'all' in targets: + bins.extend(["dist.x", "ev.x", "kpoints.x", "pw.x", "pwi2xsf.x"]) + if LooseVersion(self.version) < LooseVersion("6.5"): + if LooseVersion(self.version) >= LooseVersion("5.1"): + bins.extend(["generate_rVV10_kernel_table.x"]) + if LooseVersion(self.version) > LooseVersion("5"): + bins.extend(["generate_vdW_kernel_table.x"]) + if LooseVersion(self.version) <= LooseVersion("5"): + bins.extend(["path_int.x"]) + if LooseVersion(self.version) < LooseVersion("5.3.0"): + bins.extend(["band_plot.x", "bands_FS.x", "kvecs_FS.x"]) + + if 'pwcond' in targets or 'pwall' in targets or 'all' in targets: + bins.extend(["pwcond.x"]) + + if 'tddfpt' in targets or 'all' in targets: + if LooseVersion(self.version) > LooseVersion("5"): + bins.extend(["turbo_lanczos.x", "turbo_spectrum.x"]) + + upftools = [] + if 'upf' in targets or 'all' in targets: + if LooseVersion(self.version) < LooseVersion("6.6"): + upftools = ["casino2upf.x", "cpmd2upf.x", "fhi2upf.x", "fpmd2upf.x", "ncpp2upf.x", + "oldcp2upf.x", "read_upf_tofile.x", "rrkj2upf.x", "uspp2upf.x", "vdb2upf.x"] + if LooseVersion(self.version) > LooseVersion("5"): + upftools.extend(["interpolate.x", "upf2casino.x"]) + if LooseVersion(self.version) >= LooseVersion("6.3"): + upftools.extend(["fix_upf.x"]) + if LooseVersion(self.version) < LooseVersion("6.4"): + upftools.extend(["virtual.x"]) + else: + upftools.extend(["virtual_v2.x"]) + else: + upftools = ["upfconv.x", "virtual_v2.x", "fixfiles.py"] + + if 'vdw' in targets: # only for v4.x, not in v5.0 anymore + bins.extend(["vdw.x"]) + + if 'w90' in targets: + bins.extend(["wannier90.x"]) + if LooseVersion(self.version) >= LooseVersion("5.4"): + bins.extend(["postw90.x"]) + if LooseVersion(self.version) < LooseVersion("6.1"): + bins.extend(["w90chk2chk.x"]) + + want_bins = [] + if 'want' in targets: + want_bins = ["blc2wan.x", "conductor.x", "current.x", "disentangle.x", + "dos.x", "gcube2plt.x", "kgrid.x", "midpoint.x", "plot.x", "sumpdos", + "wannier.x", "wfk2etsf.x"] + if LooseVersion(self.version) > LooseVersion("5"): + want_bins.extend(["cmplx_bands.x", "decay.x", "sax2qexml.x", "sum_sgm.x"]) + + if 'xspectra' in targets: + bins.extend(["xspectra.x"]) + + yambo_bins = [] + if 'yambo' in targets: + yambo_bins = ["a2y", "p2y", "yambo", "ypp"] + + d3q_bins = [] + if 'd3q' in targets: + d3q_bins = ['d3_asr3.x', 'd3_lw.x', 'd3_q2r.x', + 'd3_qq2rr.x', 'd3q.x', 'd3_r2q.x', 'd3_recenter.x', + 'd3_sparse.x', 'd3_sqom.x', 'd3_tk.x'] + if LooseVersion(self.version) < LooseVersion("6.4"): + d3q_bins.append('d3_import3py.x') + + custom_paths = { + 'files': [os.path.join('bin', x) for x in bins + upftools + want_bins + yambo_bins + d3q_bins], + 'dirs': [] + } + + super(EB_QuantumESPRESSO, self).sanity_check_step(custom_paths=custom_paths) diff --git a/easyblocks/r/relion.py b/easyblocks/r/relion.py new file mode 100644 index 0000000..6213fb3 --- /dev/null +++ b/easyblocks/r/relion.py @@ -0,0 +1,125 @@ +## +# Copyright 2009-2022 Ghent University +# +# This file is part of EasyBuild, +# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), +# with support of Ghent University (http://ugent.be/hpc), +# the Flemish Supercomputer Centre (VSC) (https://www.vscentrum.be), +# Flemish Research Foundation (FWO) (http://www.fwo.be/en) +# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en). +# +# https://github.com/easybuilders/easybuild +# +# EasyBuild is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation v2. +# +# EasyBuild is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>. +## +""" +EasyBuild support for building and installing RELION, implemented as an easyblock + +@author: Jasper Grimm (University of York) +""" +from easybuild.easyblocks.generic.cmakemake import CMakeMake +from easybuild.framework.easyconfig import CUSTOM +from easybuild.tools.modules import get_software_root +from easybuild.tools.config import build_option +from easybuild.tools.build_log import EasyBuildError, print_warning + + +class EB_RELION(CMakeMake): + """Support for building/installing RELION.""" + + @staticmethod + def extra_options(): + extra_vars = CMakeMake.extra_options() + extra_vars.update({ + 'cuda_texture': [False, "Enable cuda texture", CUSTOM], + 'default_cuda_capability': [None, "Default CUDA capabilitity for building RELION, e.g. '8.6'", CUSTOM], + 'doubleprec_cpu': [True, "Enable double precision (CPU)", CUSTOM], + 'doubleprec_gpu': [False, "Enable double precision (GPU)", CUSTOM], + 'disable_gui': [False, "Build without GUI", CUSTOM], + 'use_mkl': [True, "Use MKL for FFT (if MKL is a depencency)", CUSTOM], + }) + return extra_vars + + def configure_step(self, *args, **kwargs): + """Custom configure step for RELION""" + + # configure some default options + self.cfg.update('configopts', '-DCMAKE_SHARED_LINKER="$LIBS"') + self.cfg.update('configopts', '-DMPI_INCLUDE_PATH="$MPI_INC_DIR"') + + gui_deps = get_software_root('FLTK') and get_software_root('X11') + if self.cfg['disable_gui'] or not gui_deps: + if not gui_deps: + print_warning("Missing dependencies for the GUI (FLTK and X11 are required). Building without GUI.") + self.cfg.update('configopts', '-DGUI=OFF') + + if get_software_root('MKL') and self.cfg['use_mkl']: + self.cfg.update('configopts', '-DMKLFFT=ON') + + # check if CUDA is present + if get_software_root('CUDA'): + self.cfg.update('configopts', '-DCUDA=ON') + + # check cuda_compute_capabilities + cuda_cc = self.cfg['cuda_compute_capabilities'] or build_option('cuda_compute_capabilities') or [] + if not cuda_cc: + raise EasyBuildError("Can't build RELION with CUDA support without" + " specifying 'cuda-compute-capabilities'") + self.cfg.update('configopts', '-DCUDA_ARCH="%s"' % ' '.join(cuda_cc)) + + # check default_cuda_capability + default_cc = self.cfg['default_cuda_capability'] or min(cuda_cc) + if not self.cfg['default_cuda_capability']: + print_warning("No default CUDA capability defined! " + "Using '%s' taken as minimum from 'cuda_compute_capabilities'" % default_cc) + self.cfg.update('configopts', '-DDEFAULT_CUDA_ARCH="%s"' % default_cc) + + if self.cfg['cuda_texture']: + self.cfg.update('configopts', '-DCUDA_TEXTURE=ON') + + if not self.cfg['doubleprec_cpu']: + self.cfg.update('configopts', '-DDoublePrec_CPU=OFF') + + if self.cfg['doubleprec_gpu']: + self.log.warning("Enabling GPU double precision is not recommnded") + self.cfg.update('configopts', '-DDoublePrec_ACC=ON') + else: + self.cfg.update('configopts', '-DDoublePrec_ACC=OFF') + + else: + # CPU build + self.cfg.update('configopts', '-DALTCPU=ON') + + if self.cfg['doubleprec_cpu']: + self.cfg.update('configopts', '-DDoublePrec_CPU=ON') + else: + self.cfg.update('configopts', '-DDoublePrec_CPU=OFF') + + super(EB_RELION, self).configure_step(*args, **kwargs) + + def install_step(self, *args, **kwargs): + """Custom install step for RELION""" + self.cfg['install_cmd'] = 'make -j %s install' % self.cfg['parallel'] + + super(EB_RELION, self).install_step(*args, **kwargs) + + def sanity_check_step(self): + """Custom sanity check step for RELION.""" + custom_paths = { + 'files': ['bin/relion%s' % x for x in ['', '_autopick', '_batchrun', '_batchrun_mpi']], + 'dirs': [], + } + + custom_commands = ['relion --help', 'relion --version'] + + super(EB_RELION, self).sanity_check_step(custom_paths=custom_paths, custom_commands=custom_commands) diff --git a/easyblocks/w/wien2k.py b/easyblocks/w/wien2k.py index e827691..ce6c4c7 100644 --- a/easyblocks/w/wien2k.py +++ b/easyblocks/w/wien2k.py @@ -1,5 +1,5 @@ ## -# Copyright 2009-2019 Ghent University +# Copyright 2009-2023 Ghent University # # This file is part of EasyBuild, # originally created by the HPC team of Ghent University (http://ugent.be/hpc/en), @@ -46,7 +46,8 @@ import easybuild.tools.toolchain as toolchain from easybuild.framework.easyblock import EasyBlock from easybuild.framework.easyconfig import CUSTOM from easybuild.tools.build_log import EasyBuildError -from easybuild.tools.filetools import extract_file, mkdir, read_file, rmtree2, write_file +from easybuild.tools.filetools import apply_regex_substitutions, change_dir, extract_file, mkdir, read_file +from easybuild.tools.filetools import remove_dir, write_file from easybuild.tools.modules import get_software_root, get_software_version from easybuild.tools.run import run_cmd, run_cmd_qa @@ -73,6 +74,8 @@ class EB_WIEN2k(EasyBlock): 'mpi_remote': [False, "Whether to initiate MPI calls locally or remotely", CUSTOM], 'wien_granularity': [True, "Granularity for parallel execution (see manual)", CUSTOM], 'taskset': [None, "Specifies an optional command for binding a process to a specific core", CUSTOM], + 'nmatmax': [19000, "Specifies the maximum matrix size", CUSTOM], + 'nume': [6000, "Specifies the number of states to output.", CUSTOM], } return EasyBlock.extra_options(extra_vars) @@ -86,9 +89,9 @@ class EB_WIEN2k(EasyBlock): cmd = "./expand_lapw" qanda = {'continue (y/n)': 'y'} no_qa = [ - 'tar -xf.*', - '.*copied and linked.*', - ] + 'tar -xf.*', + '.*copied and linked.*', + ] run_cmd_qa(cmd, qanda, no_qa=no_qa, log_all=True, simple=True) @@ -102,7 +105,11 @@ class EB_WIEN2k(EasyBlock): # toolchain-dependent values comp_answer = None if self.toolchain.comp_family() == toolchain.INTELCOMP: # @UndefinedVariable - if LooseVersion(get_software_version("icc")) >= LooseVersion("2011"): + if get_software_root('icc'): + intelver = get_software_version('icc') + elif get_software_root('intel-compilers'): + intelver = get_software_version('intel-compilers') + if LooseVersion(intelver) >= LooseVersion("2011"): if LooseVersion(self.version) < LooseVersion("17"): comp_answer = 'I' # Linux (Intel ifort 12.0 compiler + mkl ) else: @@ -159,7 +166,7 @@ class EB_WIEN2k(EasyBlock): else: line = regexp.sub('\\1:%s:%s' % (key, val), line) # avoid exit code > 0 at end of configuration - line = re.sub('(\s+)exit 1', '\\1exit 0', line) + line = re.sub(r'(\s+)exit 1', '\\1exit 0', line) sys.stdout.write(line) # set correct compilers @@ -178,24 +185,32 @@ class EB_WIEN2k(EasyBlock): dc['cc'] = dc.pop('COMPILERC') dc['fortran'] = dc.pop('COMPILER') dc['parallel'] = dc.pop('COMPILERP') - write_file('WIEN2k_COMPILER', '\n'.join(['%s:%s' % (k, v) for k, v in dc.iteritems()])) + write_file('WIEN2k_COMPILER', '\n'.join(['%s:%s' % (k, v) for k, v in dc.items()])) # configure with patched configure script self.log.debug('%s part I (configure)' % self.cfgscript) + if LooseVersion(self.version) >= LooseVersion('21'): + perlroot = get_software_root('Perl') + if perlroot is None: + raise EasyBuildError("Perl is a required dependency of WIEN2k as of version 21") + self.perlbin = os.path.join(perlroot, 'bin', 'perl') + else: + self.perlbin = '' + cmd = "./%s" % self.cfgscript qanda = { - 'Press RETURN to continue': '', - 'Your compiler:': '', - 'Hit Enter to continue': '', - 'Remote shell (default is ssh) =': '', - 'Remote copy (default is scp) =': '', - 'and you need to know details about your installed mpi ..) (y/n)': 'y', - 'Q to quit Selection:': 'Q', - 'A Compile all programs (suggested) Q Quit Selection:': 'Q', - ' Please enter the full path of the perl program: ': '', - 'continue or stop (c/s)': 'c', - '(like taskset -c). Enter N / your_specific_command:': 'N', + 'Press RETURN to continue': '', + 'Your compiler:': '', + 'Hit Enter to continue': '', + 'Remote shell (default is ssh) =': '', + 'Remote copy (default is scp) =': '', + 'and you need to know details about your installed mpi ..) (y/n)': 'y', + 'Q to quit Selection:': 'Q', + 'A Compile all programs (suggested) Q Quit Selection:': 'Q', + 'Please enter the full path of the perl program: ': self.perlbin, + 'continue or stop (c/s)': 'c', + '(like taskset -c). Enter N / your_specific_command:': 'N', } if LooseVersion(self.version) >= LooseVersion("13"): fftw_root = get_software_root('FFTW') @@ -205,80 +220,123 @@ class EB_WIEN2k(EasyBlock): else: raise EasyBuildError("Required FFTW dependency is missing") qanda.update({ - ') Selection:': comp_answer, - 'Shared Memory Architecture? (y/N):': 'N', - 'Set MPI_REMOTE to 0 / 1:': '0', - 'You need to KNOW details about your installed MPI and FFTW ) (y/n)': 'y', - 'Please specify whether you want to use FFTW3 (default) or FFTW2 (FFTW3 / FFTW2):': fftw_spec, - 'Please specify the ROOT-path of your FFTW installation (like /opt/fftw3):': fftw_root, - 'is this correct? enter Y (default) or n:': 'Y', + ') Selection:': comp_answer, + 'Shared Memory Architecture? (y/N):': 'N', + 'Set MPI_REMOTE to 0 / 1:': '0', + 'You need to KNOW details about your installed MPI and FFTW ) (y/n)': 'y', + 'Do you want to use FFTW (recommended, but for sequential code not required)? (Y,n):': 'y', + 'Please specify whether you want to use FFTW3 (default) or FFTW2 (FFTW3 / FFTW2):': fftw_spec, + 'Please specify the ROOT-path of your FFTW installation (like /opt/fftw3):': fftw_root, + 'is this correct? enter Y (default) or n:': 'Y', }) libxcroot = get_software_root('libxc') - libxcquestion = 'LIBXC (that you have installed%s)? (y,N):' % \ - (' before' if LooseVersion(self.version) < LooseVersion("17") else '') + + if LooseVersion(self.version) < LooseVersion("17"): + libxcstr1 = ' before' + libxcstr3 = '' + elif LooseVersion(self.version) > LooseVersion("19"): + libxcstr1 = ' - usually not needed' + libxcstr3 = 'root-' + else: + libxcstr1 = '' + libxcstr3 = '' + + if LooseVersion(self.version) > LooseVersion("23"): + libxcquestion1 = 'Would you like to use LIBXC (needed ONLY for self-consistent gKS mGGA calculations, for the stress tensor and experts who want to play with different DFT options. It must have been installed before)? (y,N): ' + else: + libxcquestion1 = 'LIBXC (that you have installed%s)? (y,N):' % libxcstr1 + + libxcquestion2 = 'Do you want to automatically search for LIBXC installations? (Y,n):' + libxcquestion3 = 'Please enter the %sdirectory of your LIBXC-installation!:' % libxcstr3 + libxcquestion4 = 'Please enter the lib-directory of your LIBXC-installation (usually lib or lib64)!:' + libxcquestion5 = 'LIBXC (usually not needed, ONLY for experts who want to play with different DFT options. ' + libxcquestion5 += 'It must have been installed before)? (y,N):' + if libxcroot: qanda.update({ - libxcquestion: 'y', - 'Do you want to automatically search for LIBXC installations? (Y,n):': 'n', - 'Please enter the root-directory of your LIBXC-installation!:': libxcroot, - 'Please enter the lib-directory of your LIBXC-installation (usually lib or lib64!:': 'lib', + libxcquestion1: 'y', + libxcquestion2: 'n', + libxcquestion3: libxcroot, + libxcquestion4: 'lib', + libxcquestion5: 'y', }) else: - qanda.update({libxcquestion: ''}) + qanda.update({ + libxcquestion1: 'N', + libxcquestion5: 'N', + }) if LooseVersion(self.version) >= LooseVersion("17"): scalapack_libs = os.getenv('LIBSCALAPACK').split() scalapack = next((lib[2:] for lib in scalapack_libs if 'scalapack' in lib), 'scalapack') blacs = next((lib[2:] for lib in scalapack_libs if 'blacs' in lib), 'openblas') qanda.update({ - 'You need to KNOW details about your installed MPI, ELPA, and FFTW ) (y/N)': 'y', - 'Do you want to use a present ScaLAPACK installation? (Y,n):': 'y', - 'Do you want to use the MKL version of ScaLAPACK? (Y,n):': 'n', # we set it ourselves below - 'Do you use Intel MPI? (Y,n):': 'y', - 'Is this correct? (Y,n):': 'y', - 'Please specify the target architecture of your ScaLAPACK libraries (e.g. intel64)!:': '', - 'ScaLAPACK root:': os.getenv('MKLROOT') or os.getenv('EBROOTSCALAPACK'), - 'ScaLAPACK library:': scalapack, - 'BLACS root:': os.getenv('MKLROOT') or os.getenv('EBROOTOPENBLAS'), - 'BLACS library:': blacs, - 'Please enter your choice of additional libraries!:': '', - 'Do you want to use a present FFTW installation? (Y,n):': 'y', - 'Please specify the path of your FFTW installation (like /opt/fftw3/) ' - 'or accept present choice (enter):': fftw_root, - 'Please specify the target achitecture of your FFTW library (e.g. lib64) ' - 'or accept present choice (enter):': '', - 'Do you want to automatically search for FFTW installations? (Y,n):': 'n', - 'Please specify the ROOT-path of your FFTW installation (like /opt/fftw3/) ' - 'or accept present choice (enter):': fftw_root, - 'Is this correct? enter Y (default) or n:': 'Y', - 'Please specify the name of your FFTW library or accept present choice (enter):': '', - 'Please specify your parallel compiler options or accept the recommendations ' - '(Enter - default)!:': '', - 'Please specify your MPIRUN command or accept the recommendations (Enter - default)!:': '', - # the temporary directory is hardcoded into execution scripts and must exist at runtime - 'Please enter the full path to your temporary directory:': '/tmp', - }) + 'You need to KNOW details about your installed MPI, ELPA, and FFTW ) (y/N)': 'y', + 'Do you want to use a present ScaLAPACK installation? (Y,n):': 'y', + 'Do you want to use the MKL version of ScaLAPACK? (Y,n):': 'n', # we set it ourselves below + 'Do you use Intel MPI? (Y,n):': 'y', + 'Is this correct? (Y,n):': 'y', + 'Please specify the target architecture of your ScaLAPACK libraries (e.g. intel64)!:': '', + 'ScaLAPACK root:': os.getenv('MKLROOT') or os.getenv('EBROOTSCALAPACK'), + 'ScaLAPACK library:': scalapack, + 'BLACS root:': os.getenv('MKLROOT') or os.getenv('EBROOTOPENBLAS'), + 'BLACS library:': blacs, + 'Please enter your choice of additional libraries!:': '', + 'Do you want to use a present FFTW installation? (Y,n):': 'y', + 'Please specify the path of your FFTW installation (like /opt/fftw3/) ' + 'or accept present choice (enter):': fftw_root, + 'Please specify the target achitecture of your FFTW library (e.g. lib64) ' + 'or accept present choice (enter):': '', + 'Do you want to automatically search for FFTW installations? (Y,n):': 'n', + 'Please specify the ROOT-path of your FFTW installation (like /opt/fftw3/) ' + 'or accept present choice (enter):': fftw_root, + 'Is this correct? enter Y (default) or n:': 'Y', + 'Please specify the name of your FFTW library or accept present choice (enter):': '', + 'or accept the recommendations (Enter - default)!:': '', + # the temporary directory is hardcoded into execution scripts and must exist at runtime + 'Please enter the full path to your temporary directory:': '/tmp', + }) + std_qa = {} elparoot = get_software_root('ELPA') if elparoot: + + apply_regex_substitutions(self.cfgscript, [(r'cat elpahelp2$', 'cat -n elpahelp2')]) + + elpa_dict = { + 'root': elparoot, + 'version': get_software_version('ELPA'), + 'variant': 'elpa_openmp' if self.toolchain.get_flag('openmp') else 'elpa'} + + elpa_dir = "%(root)s/include/%(variant)s-%(version)s" % elpa_dict + std_qa.update({ + r".*(?P<number>[0-9]+)\t%s\n(.*\n)*" % elpa_dir: "%(number)s", + }) + qanda.update({ 'Do you want to use ELPA? (y,N):': 'y', 'Do you want to automatically search for ELPA installations? (Y,n):': 'n', 'Please specify the ROOT-path of your ELPA installation (like /usr/local/elpa/) ' 'or accept present path (Enter):': elparoot, + 'Please specify the lib-directory of your ELPA installation (e.g. lib or lib64)!:': 'lib', + 'Please specify the lib-directory of your ELPA installation (e.g. lib or lib64):': 'lib', + 'Please specify the name of your installed ELPA library (e.g. elpa or elpa_openmp)!:': + elpa_dict['variant'], + 'Please specify the name of your installed ELPA library (e.g. elpa or elpa_openmp):': + elpa_dict['variant'], }) else: qanda.update({'Do you want to use ELPA? (y,N):': 'n'}) else: qanda.update({ - 'compiler) Selection:': comp_answer, - 'Shared Memory Architecture? (y/n):': 'n', - 'If you are using mpi2 set MPI_REMOTE to 0 Set MPI_REMOTE to 0 / 1:': '0', - 'Do you have MPI and Scalapack installed and intend to run ' - 'finegrained parallel? (This is usefull only for BIG cases ' - '(50 atoms and more / unit cell) and you need to know details ' - 'about your installed mpi and fftw ) (y/n)': 'y', + 'compiler) Selection:': comp_answer, + 'Shared Memory Architecture? (y/n):': 'n', + 'If you are using mpi2 set MPI_REMOTE to 0 Set MPI_REMOTE to 0 / 1:': '0', + 'Do you have MPI and Scalapack installed and intend to run ' + 'finegrained parallel? (This is usefull only for BIG cases ' + '(50 atoms and more / unit cell) and you need to know details ' + 'about your installed mpi and fftw ) (y/n)': 'y', }) no_qa = [ @@ -287,15 +345,14 @@ class EB_WIEN2k(EasyBlock): "%s[ \t]*.*" % os.getenv('F90'), "%s[ \t]*.*" % os.getenv('CC'), ".*SRC_.*", - "Please enter the full path of the perl program:", ] - std_qa = { + std_qa.update({ r'S\s+Save and Quit[\s\n]+To change an item select option.[\s\n]+Selection:': 'S', 'Recommended setting for parallel f90 compiler: .* Current selection: Your compiler:': os.getenv('MPIF90'), r'process or you can change single items in "Compiling Options".[\s\n]+Selection:': 'S', r'A\s+Compile all programs (suggested)[\s\n]+Q\s*Quit[\s\n]+Selection:': 'Q', - } + }) run_cmd_qa(cmd, qanda, no_qa=no_qa, std_qa=std_qa, log_all=True, simple=True) @@ -329,6 +386,15 @@ class EB_WIEN2k(EasyBlock): self.log.debug("Patched file %s: %s", parallel_options_fp, read_file(parallel_options_fp)) + # Set configurable parameters for size of problems. + param_subs = [ + (r'\s+PARAMETER\s+\(\s*NMATMAX\s*=\s*\d+\)', r' PARAMETER (NMATMAX=%s)' % self.cfg['nmatmax']), + (r'\s+PARAMETER\s+\(\s*NUME\s*=\s*\d+\)', r' PARAMETER (NUME=%s)' % self.cfg['nume']), + ] + self.log.debug("param_subs = %s" % param_subs) + apply_regex_substitutions('SRC_lapw1/param.inc', param_subs) + self.log.debug("Patched file %s: %s", 'SRC_lapw1/param.inc', read_file('SRC_lapw1/param.inc')) + def build_step(self): """Build WIEN2k by running siteconfig_lapw script again.""" @@ -338,31 +404,30 @@ class EB_WIEN2k(EasyBlock): qanda = { 'Press RETURN to continue': '\nQ', # also answer on first qanda pattern with 'Q' to quit - ' Please enter the full path of the perl program: ': '', - } + 'Please enter the full path of the perl program: ': self.perlbin, + } if LooseVersion(self.version) < LooseVersion("17"): qanda.update({ - 'L Perl path (if not in /usr/bin/perl) Q Quit Selection:': 'R', - 'A Compile all programs S Select program Q Quit Selection:': 'A', + 'L Perl path (if not in /usr/bin/perl) Q Quit Selection:': 'R', + 'A Compile all programs S Select program Q Quit Selection:': 'A', }) else: qanda.update({ - 'program Q Quit Selection:': 'A', - 'Path Q Quit Selection:': 'R', + 'program Q Quit Selection:': 'A', + 'Path Q Quit Selection:': 'R', }) no_qa = [ - "%s[ \t]*.*" % os.getenv('MPIF90'), - "%s[ \t]*.*" % os.getenv('F90'), - "%s[ \t]*.*" % os.getenv('CC'), - "mv[ \t]*.*", - ".*SRC_.*", - ".*: warning .*", - ".*Stop.", - "Compile time errors (if any) were:", - "Please enter the full path of the perl program:", - ] + "%s[ \t]*.*" % os.getenv('MPIF90'), + "%s[ \t]*.*" % os.getenv('F90'), + "%s[ \t]*.*" % os.getenv('CC'), + "mv[ \t]*.*", + ".*SRC_.*", + ".*: warning .*", + ".*Stop.", + "Compile time errors (if any) were:", + ] self.log.debug("no_qa for %s: %s" % (cmd, no_qa)) run_cmd_qa(cmd, qanda, no_qa=no_qa, log_all=True, simple=True) @@ -376,7 +441,7 @@ class EB_WIEN2k(EasyBlock): cmd = "x_lapw lapw1 %s" % cmd_arg (out, _) = run_cmd(cmd, log_all=True, simple=False) - re_success = re.compile("LAPW1\s+END") + re_success = re.compile(r"LAPW1\s+END") if not re_success.search(out): raise EasyBuildError("Test '%s' in %s failed (pattern '%s' not found)?", cmd, os.getcwd(), re_success.pattern) @@ -413,7 +478,8 @@ class EB_WIEN2k(EasyBlock): # unpack serial benchmark serial_test_name = "test_case" - extract_file(testdata_paths['%s.tar.gz' % serial_test_name], tmpdir) + srcdir = extract_file(testdata_paths['%s.tar.gz' % serial_test_name], tmpdir, change_into_dir=False) + change_dir(srcdir) # run serial benchmark os.chdir(os.path.join(tmpdir, serial_test_name)) @@ -421,14 +487,15 @@ class EB_WIEN2k(EasyBlock): # unpack parallel benchmark (in serial benchmark dir) parallel_test_name = "mpi-benchmark" - extract_file(testdata_paths['%s.tar.gz' % parallel_test_name], tmpdir) + srcdir = extract_file(testdata_paths['%s.tar.gz' % parallel_test_name], tmpdir, change_into_dir=False) + change_dir(srcdir) # run parallel benchmark os.chdir(os.path.join(tmpdir, serial_test_name)) run_wien2k_test("-p") os.chdir(cwd) - rmtree2(tmpdir) + remove_dir(tmpdir) except OSError as err: raise EasyBuildError("Failed to run WIEN2k benchmark tests: %s", err) @@ -492,7 +559,7 @@ class EB_WIEN2k(EasyBlock): # cleanup try: os.chdir(cwd) - rmtree2(tmpdir) + remove_dir(tmpdir) except OSError as err: raise EasyBuildError("Failed to clean up temporary test dir: %s", err) -- GitLab