diff --git a/src/simulation/m_rhs.fpp b/src/simulation/m_rhs.fpp index 99a923145e..10cba941ca 100644 --- a/src/simulation/m_rhs.fpp +++ b/src/simulation/m_rhs.fpp @@ -1895,7 +1895,7 @@ contains @:DEALLOCATE_GLOBAL(qL_rsz_vf, qR_rsz_vf) end if - if (weno_Re_flux) then + if (any(Re_size > 0) .and. weno_Re_flux) then @:DEALLOCATE_GLOBAL(dqL_rsx_vf, dqR_rsx_vf) if (n > 0) then diff --git a/src/simulation/m_start_up.fpp b/src/simulation/m_start_up.fpp index 09bfe64219..02e65d49f9 100644 --- a/src/simulation/m_start_up.fpp +++ b/src/simulation/m_start_up.fpp @@ -128,7 +128,7 @@ contains ! Namelist of the global parameters which may be specified by user namelist /user_inputs/ case_dir, run_time_info, m, n, p, dt, & t_step_start, t_step_stop, t_step_save, & - model_eqns, adv_alphan, & + model_eqns, num_fluids, adv_alphan, & mpp_lim, time_stepper, weno_eps, weno_flat, & riemann_flat, cu_mpi, cu_tensor, & mapped_weno, mp_weno, weno_avg, & @@ -143,7 +143,7 @@ contains rhoref, pref, bubbles, bubble_model, & R0ref, & #:if not MFC_CASE_OPTIMIZATION - nb, weno_order, num_fluids, & + nb, weno_order, & #:endif Ca, Web, Re_inv, & monopole, mono, num_mono, & diff --git a/toolchain/mfc/run/:wq b/toolchain/mfc/run/:wq deleted file mode 100644 index 4192bf8598..0000000000 --- a/toolchain/mfc/run/:wq +++ /dev/null @@ -1,115 +0,0 @@ -import typing, dataclasses -import os -from .. import common -from ..state import ARG - -# Note: This file is now only used when running -# in serial mode. - -@dataclasses.dataclass -class MPIBinary: - name: str - bin: str - - def is_present(self) -> bool: - return common.does_command_exist(self.bin) - - def gen_params(self) -> typing.List[str]: - raise common.MFCException(f"MPIBinary::gen_params <{self.name}> not implemented.") - - -class JSRUN(MPIBinary): - def __init__(self): - super().__init__("IBM's JSRUN", "jsrun") - - def gen_params(self) -> typing.List[str]: - # ORNL Summit: https://docs.olcf.ornl.gov/systems/summit_user_guide.html?highlight=lsf#launching-a-job-with-jsrun - # We create one resource-set per CPU(Core)/GPU pair. - nrs=ARG("tasks_per_node")*ARG("nodes") - cores_per_rs=1 - gpus_per_rs=min(ARG("tasks_per_node"), 1) - tasks_per_rs=1 - - arguments=[ - '--nrs', nrs, - '--cpu_per_rs', cores_per_rs, - '--gpu_per_rs', gpus_per_rs, - '--tasks_per_rs', tasks_per_rs - ] - - if gpus_per_rs >= 1: - arguments.append('--smpiargs=-gpu') - - return arguments - - -class SRUN(MPIBinary): - def __init__(self): - super().__init__("SLURM's SRUN", "srun") - - def gen_params(self) -> typing.List[str]: - host = os.popen('hostname').read() - if "frontier" in host: - params = ['-n', ARG("tasks_per_node"), "--unbuffered"] - else: - params = ['--ntasks-per-node', ARG("tasks_per_node")] - - if ARG("nodes") != 1: - params += ['-N', ARG("nodes")] - - params += ['--no-build'] - - # MFC binds its GPUs on its own, as long as they have been allocated - # by the system's scheduler, or are present on your local machine, - # if running in serial mode. - - if not common.isspace(ARG("account")): - params += ['-A', ARG("account")] - - if not common.isspace(ARG("partition")): - params += ['-p', ARG("partition")] - - return params - - -class MPIEXEC(MPIBinary): - def __init__(self): - super().__init__("MPIEXEC", "mpiexec") - - def gen_params(self) -> str: - return ["-np", ARG("tasks_per_node")*ARG("nodes")] - - -class MPIRUN(MPIBinary): - def __init__(self): - super().__init__("MPIRUN", "mpirun") - - def gen_params(self) -> str: - return ["-np", ARG("tasks_per_node")*ARG("nodes")] - - -# In descending order of priority (if no override present) -BINARIES: list = [ JSRUN(), SRUN(), MPIRUN(), MPIEXEC() ] - -def get_binary(exclude: typing.List[str] = None) -> MPIBinary: - if exclude is None: - exclude = [] - - binaries = [ - b for b in BINARIES if b.is_present() and b.bin not in exclude - ] - - if len(binaries) == 0: - raise common.MFCException("No MPI binary found.") - - # Handle user override - if ARG("binary") is not None: - for binary in binaries: - binary: MPIBinary - - if binary.bin == ARG("binary"): - return binary - - raise common.MFCException(f"MPI Binary <{ARG('binary')}> not found.") - - return binaries[0] diff --git a/toolchain/mfc/run/input.py b/toolchain/mfc/run/input.py index 5ff26ba081..3b763b9f13 100644 --- a/toolchain/mfc/run/input.py +++ b/toolchain/mfc/run/input.py @@ -207,7 +207,7 @@ def _default() -> str: "pre_process" : self.__get_pre_fpp, "simulation" : self.__get_sim_fpp, "post_process" : self.__get_post_fpp, - }.get(build.get_target(target).name, _default)() + }.get(build.get_target(target).name, _default)(print) return result def generate_fpp(self, target) -> None: diff --git a/toolchain/mfc/run/mpi_bins.py b/toolchain/mfc/run/mpi_bins.py deleted file mode 100644 index 60c044b208..0000000000 --- a/toolchain/mfc/run/mpi_bins.py +++ /dev/null @@ -1,113 +0,0 @@ -import typing, dataclasses -import os -from .. import common -from ..state import ARG - -# Note: This file is now only used when running -# in serial mode. - -@dataclasses.dataclass -class MPIBinary: - name: str - bin: str - - def is_present(self) -> bool: - return common.does_command_exist(self.bin) - - def gen_params(self) -> typing.List[str]: - raise common.MFCException(f"MPIBinary::gen_params <{self.name}> not implemented.") - - -class JSRUN(MPIBinary): - def __init__(self): - super().__init__("IBM's JSRUN", "jsrun") - - def gen_params(self) -> typing.List[str]: - # ORNL Summit: https://docs.olcf.ornl.gov/systems/summit_user_guide.html?highlight=lsf#launching-a-job-with-jsrun - # We create one resource-set per CPU(Core)/GPU pair. - nrs=ARG("tasks_per_node")*ARG("nodes") - cores_per_rs=1 - gpus_per_rs=min(ARG("tasks_per_node"), 1) - tasks_per_rs=1 - - arguments=[ - '--nrs', nrs, - '--cpu_per_rs', cores_per_rs, - '--gpu_per_rs', gpus_per_rs, - '--tasks_per_rs', tasks_per_rs - ] - - if gpus_per_rs >= 1: - arguments.append('--smpiargs=-gpu') - - return arguments - - -class SRUN(MPIBinary): - def __init__(self): - super().__init__("SLURM's SRUN", "srun") - - def gen_params(self) -> typing.List[str]: - host = os.popen('hostname').read() - if "frontier" in host: - params = ['-n', ARG("tasks_per_node"), "--unbuffered"] - else: - params = ['--ntasks-per-node', ARG("tasks_per_node")] - - if ARG("nodes") != 1: - params += ['-N', ARG("nodes")] - - # MFC binds its GPUs on its own, as long as they have been allocated - # by the system's scheduler, or are present on your local machine, - # if running in serial mode. - - if not common.isspace(ARG("account")): - params += ['-A', ARG("account")] - - if not common.isspace(ARG("partition")): - params += ['-p', ARG("partition")] - - return params - - -class MPIEXEC(MPIBinary): - def __init__(self): - super().__init__("MPIEXEC", "mpiexec") - - def gen_params(self) -> str: - return ["-np", ARG("tasks_per_node")*ARG("nodes")] - - -class MPIRUN(MPIBinary): - def __init__(self): - super().__init__("MPIRUN", "mpirun") - - def gen_params(self) -> str: - return ["-np", ARG("tasks_per_node")*ARG("nodes")] - - -# In descending order of priority (if no override present) -BINARIES: list = [ JSRUN(), SRUN(), MPIRUN(), MPIEXEC() ] - -def get_binary(exclude: typing.List[str] = None) -> MPIBinary: - if exclude is None: - exclude = [] - - binaries = [ - b for b in BINARIES if b.is_present() and b.bin not in exclude - ] - - if len(binaries) == 0: - raise common.MFCException("No MPI binary found.") - - # Handle user override - if ARG("binary") is not None: - for binary in binaries: - binary: MPIBinary - - if binary.bin == ARG("binary"): - return binary - - raise common.MFCException(f"MPI Binary <{ARG('binary')}> not found.") - - return binaries[0]