diff --git a/host-configs/apple/macOS_base.cmake b/host-configs/apple/macOS_base.cmake index ec74d15b9c1..6d56f70755c 100644 --- a/host-configs/apple/macOS_base.cmake +++ b/host-configs/apple/macOS_base.cmake @@ -25,7 +25,7 @@ set(ENABLE_CALIPER "OFF" CACHE PATH "" FORCE ) set( BLAS_LIBRARIES ${HOMEBREW_DIR}/opt/lapack/lib/libblas.dylib CACHE PATH "" FORCE ) set( LAPACK_LIBRARIES ${HOMEBREW_DIR}/opt/lapack/lib/liblapack.dylib CACHE PATH "" FORCE ) -set(ENABLE_DOXYGEN ON CACHE BOOL "" FORCE) +set(ENABLE_DOXYGEN OFF CACHE BOOL "" FORCE) set(ENABLE_SPHINX ON CACHE BOOL "" FORCE) set(ENABLE_MATHPRESSO ON CACHE BOOL "" FORCE ) diff --git a/src/coreComponents/common/CMakeLists.txt b/src/coreComponents/common/CMakeLists.txt index 830be7b7b1f..9a4de42a717 100644 --- a/src/coreComponents/common/CMakeLists.txt +++ b/src/coreComponents/common/CMakeLists.txt @@ -48,6 +48,7 @@ set( common_headers Tensor.hpp TimingMacros.hpp TypeDispatch.hpp + TypesHelpers.hpp initializeEnvironment.hpp LifoStorage.hpp LifoStorageCommon.hpp diff --git a/src/coreComponents/common/MpiWrapper.hpp b/src/coreComponents/common/MpiWrapper.hpp index 4d017ef8393..de529215ea0 100644 --- a/src/coreComponents/common/MpiWrapper.hpp +++ b/src/coreComponents/common/MpiWrapper.hpp @@ -22,6 +22,7 @@ #include "common/DataTypes.hpp" #include "common/Span.hpp" +#include "common/TypesHelpers.hpp" #if defined(GEOS_USE_MPI) #include @@ -128,6 +129,8 @@ struct MpiWrapper Min, //!< Min Sum, //!< Sum Prod, //!< Prod + LogicalAnd, //!< Logical and + LogicalOr, //!< Logical or }; MpiWrapper() = delete; @@ -351,18 +354,6 @@ struct MpiWrapper array1d< T > & recvbuf, MPI_Comm comm = MPI_COMM_GEOS ); - /** - * @brief Strongly typed wrapper around MPI_Allreduce. - * @param[in] sendbuf The pointer to the sending buffer. - * @param[out] recvbuf The pointer to the receive buffer. - * @param[in] count The number of values to send/receive. - * @param[in] op The MPI_Op to perform. - * @param[in] comm The MPI_Comm over which the gather operates. - * @return The return value of the underlying call to MPI_Allreduce(). - */ - template< typename T > - static int allReduce( T const * sendbuf, T * recvbuf, int count, MPI_Op op, MPI_Comm comm = MPI_COMM_GEOS ); - /** * @brief Convenience wrapper for the MPI_Allreduce function. * @tparam T type of data to reduce. Must correspond to a valid MPI_Datatype. @@ -385,6 +376,29 @@ struct MpiWrapper template< typename T > static void allReduce( Span< T const > src, Span< T > dst, Reduction const op, MPI_Comm comm = MPI_COMM_GEOS ); + /** + * @brief Convenience wrapper for the MPI_Allreduce function. Version for arrays. + * @tparam T type of data to reduce. Must correspond to a valid MPI_Datatype. + * @param src[in] The values to send to the reduction. + * @param dst[out] The resulting values. + * @param op The Reduction enum to perform. + * @param comm The communicator. + */ + template< typename SRC_CONTAINER_TYPE, typename DST_CONTAINER_TYPE > + static void allReduce( SRC_CONTAINER_TYPE const & src, DST_CONTAINER_TYPE & dst, Reduction const op, MPI_Comm const comm = MPI_COMM_GEOS ); + + /** + * @brief Convenience wrapper for the MPI_Allreduce function. Version for arrays. + * @tparam T type of data to reduce. Must correspond to a valid MPI_Datatype. + * @param src[in] The values to send to the reduction. + * @param dst[out] The resulting values. + * @param count The number of contiguos elements of the arrays to perform the reduction on (must be leq than the size). + * @param op The Reduction enum to perform. + * @param comm The communicator. + */ + template< typename SRC_CONTAINER_TYPE, typename DST_CONTAINER_TYPE > + static void allReduce( SRC_CONTAINER_TYPE const & src, DST_CONTAINER_TYPE & dst, int const count, Reduction const op, MPI_Comm const comm ); + /** * @brief Strongly typed wrapper around MPI_Reduce. @@ -639,6 +653,19 @@ struct MpiWrapper */ template< typename T > static T maxValLoc( T localValueLocation, MPI_Comm comm = MPI_COMM_GEOS ); +private: + + /** + * @brief Strongly typed wrapper around MPI_Allreduce. + * @param[in] sendbuf The pointer to the sending buffer. + * @param[out] recvbuf The pointer to the receive buffer. + * @param[in] count The number of values to send/receive. + * @param[in] op The MPI_Op to perform. + * @param[in] comm The MPI_Comm over which the gather operates. + * @return The return value of the underlying call to MPI_Allreduce(). + */ + template< typename T > + static int allReduce( T const * sendbuf, T * recvbuf, int count, MPI_Op op, MPI_Comm comm = MPI_COMM_GEOS ); }; namespace internal @@ -701,6 +728,14 @@ inline MPI_Op MpiWrapper::getMpiOp( Reduction const op ) { return MPI_PROD; } + case Reduction::LogicalAnd: + { + return MPI_LAND; + } + case Reduction::LogicalOr: + { + return MPI_LOR; + } default: GEOS_ERROR( "Unsupported reduction operation" ); return MPI_NO_OP; @@ -1113,6 +1148,35 @@ void MpiWrapper::allReduce( Span< T const > const src, Span< T > const dst, Redu allReduce( src.data(), dst.data(), LvArray::integerConversion< int >( src.size() ), getMpiOp( op ), comm ); } +template< typename SRC_CONTAINER_TYPE, typename DST_CONTAINER_TYPE > +void MpiWrapper::allReduce( SRC_CONTAINER_TYPE const & src, DST_CONTAINER_TYPE & dst, int const count, Reduction const op, MPI_Comm const comm ) +{ + static_assert( std::is_trivially_copyable< typename get_value_type< SRC_CONTAINER_TYPE >::type >::value, + "The type in the source container must be trivially copyable." ); + static_assert( std::is_trivially_copyable< typename get_value_type< DST_CONTAINER_TYPE >::type >::value, + "The type in the destination container must be trivially copyable." ); + static_assert( std::is_same< typename get_value_type< SRC_CONTAINER_TYPE >::type, + typename get_value_type< DST_CONTAINER_TYPE >::type >::value, + "Source and destination containers must have the same value type." ); + GEOS_ASSERT_GE( src.size(), count ); + GEOS_ASSERT_GE( dst.size(), count ); + allReduce( src.data(), dst.data(), count, getMpiOp( op ), comm ); +} + +template< typename SRC_CONTAINER_TYPE, typename DST_CONTAINER_TYPE > +void MpiWrapper::allReduce( SRC_CONTAINER_TYPE const & src, DST_CONTAINER_TYPE & dst, Reduction const op, MPI_Comm const comm ) +{ + static_assert( std::is_trivially_copyable< typename get_value_type< SRC_CONTAINER_TYPE >::type >::value, + "The type in the source container must be trivially copyable." ); + static_assert( std::is_trivially_copyable< typename get_value_type< DST_CONTAINER_TYPE >::type >::value, + "The type in the destination container must be trivially copyable." ); + static_assert( std::is_same< typename get_value_type< SRC_CONTAINER_TYPE >::type, + typename get_value_type< DST_CONTAINER_TYPE >::type >::value, + "Source and destination containers must have the same value type." ); + GEOS_ASSERT_EQ( src.size(), dst.size() ); + allReduce( src.data(), dst.data(), LvArray::integerConversion< int >( src.size() ), getMpiOp( op ), comm ); +} + template< typename T > T MpiWrapper::sum( T const & value, MPI_Comm comm ) { diff --git a/src/coreComponents/common/TypesHelpers.hpp b/src/coreComponents/common/TypesHelpers.hpp new file mode 100644 index 00000000000..f0e514e35a1 --- /dev/null +++ b/src/coreComponents/common/TypesHelpers.hpp @@ -0,0 +1,124 @@ +/* + * ------------------------------------------------------------------------------------------------------------ + * SPDX-License-Identifier: LGPL-2.1-only + * + * Copyright (c) 2016-2024 Lawrence Livermore National Security LLC + * Copyright (c) TotalEnergies + * Copyright (c) 2018-2024 The Board of Trustees of the Leland Stanford Junior University + * Copyright (c) 2023-2024 Chevron + * Copyright (c) 2019- GEOS/GEOSX Contributors + * All rights reserved + * + * See top level LICENSE, COPYRIGHT, CONTRIBUTORS, NOTICE, and ACKNOWLEDGEMENTS files for details. + * ------------------------------------------------------------------------------------------------------------ + */ + +/** + * @file TypesHelpers.hpp + * + */ + +#ifndef TYPES_HELPERS_HPP +#define TYPES_HELPERS_HPP + +#include + +namespace geos +{ + +namespace internal +{ +/** + * @brief Trait to determine if a type defines a `value_type` member. + * + * This primary template defaults to `std::false_type`, indicating that + * the type `T` does not define a `value_type` member. + * + * @tparam T The type to check. + * @tparam void A SFINAE parameter used to specialize the trait. + */ +template< typename T, typename = void > +struct has_value_type : std::false_type {}; + +/** + * @brief Specialization of `has_value_type` for types with a `value_type` member. + * + * If the type `T` defines a `value_type` member, this specialization + * is used, which inherits from `std::true_type`. + * + * @tparam T The type to check. + */ +template< typename T > +struct has_value_type< T, std::void_t< typename T::value_type > > : std::true_type {}; + +/** + * @brief Trait to determine if a type defines a `ValueType` member. + * + * This primary template defaults to `std::false_type`, indicating that + * the type `T` does not define a `ValueType` member. + * + * @tparam T The type to check. + * @tparam void A SFINAE parameter used to specialize the trait. + */ +template< typename T, typename = void > +struct has_ValueType : std::false_type {}; + +/** + * @brief Specialization of `has_ValueType` for types with a `ValueType` member. + * + * If the type `T` defines a `ValueType` member, this specialization + * is used, which inherits from `std::true_type`. + * + * @tparam T The type to check. + */ +template< typename T > +struct has_ValueType< T, std::void_t< typename T::ValueType > > : std::true_type {}; + +} // namespace internal + +/** + * @brief Trait to retrieve the `value_type` or `ValueType` of a type `T`. + * + * This primary template provides a static assertion error if `T` does not + * define either `value_type` or `ValueType`. + * + * @tparam T The type from which to extract the type alias. + * @tparam Enable A SFINAE parameter used for specialization. + */ +template< typename T, typename Enable = void > +struct get_value_type +{ + static_assert( sizeof(T) == 0, "T must define either value_type or ValueType." ); +}; + +/** + * @brief Specialization of `get_value_type` for types with a `value_type` member. + * + * If the type `T` defines a `value_type` member, this specialization + * retrieves it as the alias `type`. + * + * @tparam T The type from which to extract `value_type`. + */ +template< typename T > +struct get_value_type< T, std::enable_if_t< internal::has_value_type< T >::value > > +{ + using type = typename T::value_type; +}; + +/** + * @brief Specialization of `get_value_type` for types with a `ValueType` member. + * + * If the type `T` does not define a `value_type` but defines a `ValueType`, + * this specialization retrieves it as the alias `type`. + * + * @tparam T The type from which to extract `ValueType`. + */ +template< typename T > +struct get_value_type< T, std::enable_if_t< !internal::has_value_type< T >::value && internal::has_ValueType< T >::value > > +{ + using type = typename T::ValueType; +}; + +} // namespace geos + +#endif /* TYPES_HELPERS_HPP */ diff --git a/src/coreComponents/common/initializeEnvironment.cpp b/src/coreComponents/common/initializeEnvironment.cpp index 832f4fa542f..64387060583 100644 --- a/src/coreComponents/common/initializeEnvironment.cpp +++ b/src/coreComponents/common/initializeEnvironment.cpp @@ -290,7 +290,7 @@ static void addUmpireHighWaterMarks() string allocatorNameMinChars = string( MAX_NAME_LENGTH, '\0' ); // Make sure that each rank is looking at the same allocator. - MpiWrapper::allReduce( allocatorNameFixedSize.c_str(), &allocatorNameMinChars.front(), MAX_NAME_LENGTH, MPI_MIN, MPI_COMM_GEOS ); + MpiWrapper::allReduce( allocatorNameFixedSize, allocatorNameMinChars, MpiWrapper::Reduction::Min, MPI_COMM_GEOS ); if( allocatorNameFixedSize != allocatorNameMinChars ) { GEOS_WARNING( "Not all ranks have an allocator named " << allocatorNameFixedSize << ", cannot compute high water mark." ); diff --git a/src/coreComponents/fileIO/timeHistory/HDFHistoryIO.cpp b/src/coreComponents/fileIO/timeHistory/HDFHistoryIO.cpp index 24ae0ddddf6..fdfbf95c61d 100644 --- a/src/coreComponents/fileIO/timeHistory/HDFHistoryIO.cpp +++ b/src/coreComponents/fileIO/timeHistory/HDFHistoryIO.cpp @@ -258,8 +258,9 @@ void HDFHistoryIO::init( bool existsOkay ) void HDFHistoryIO::write() { // check if the size has changed on any process in the primary comm - int anyChanged = false; - MpiWrapper::allReduce( &m_sizeChanged, &anyChanged, 1, MPI_LOR, m_comm ); + int const anyChanged = MpiWrapper::allReduce( m_sizeChanged, + MpiWrapper::Reduction::LogicalOr, + m_comm ); m_sizeChanged = anyChanged; // this will set the first dim large enough to hold all the rows we're about to write diff --git a/src/coreComponents/finiteVolume/TwoPointFluxApproximation.cpp b/src/coreComponents/finiteVolume/TwoPointFluxApproximation.cpp index 25c128770fa..665e1eed140 100644 --- a/src/coreComponents/finiteVolume/TwoPointFluxApproximation.cpp +++ b/src/coreComponents/finiteVolume/TwoPointFluxApproximation.cpp @@ -951,10 +951,9 @@ void TwoPointFluxApproximation::computeAquiferStencil( DomainPartition & domain, localSumFaceAreasView[aquiferIndex] += targetSetSumFaceAreas.get(); } ); - MpiWrapper::allReduce( localSumFaceAreas.data(), - globalSumFaceAreas.data(), - localSumFaceAreas.size(), - MpiWrapper::getMpiOp( MpiWrapper::Reduction::Sum ), + MpiWrapper::allReduce( localSumFaceAreas, + globalSumFaceAreas, + MpiWrapper::Reduction::Sum, MPI_COMM_GEOS ); // Step 3: compute the face area fraction for each connection, and insert into boundary stencil diff --git a/src/coreComponents/mesh/ParticleManager.cpp b/src/coreComponents/mesh/ParticleManager.cpp index 057533e4f7e..57bd1687935 100644 --- a/src/coreComponents/mesh/ParticleManager.cpp +++ b/src/coreComponents/mesh/ParticleManager.cpp @@ -77,11 +77,9 @@ void ParticleManager::setMaxGlobalIndex() m_localMaxGlobalIndex = std::max( m_localMaxGlobalIndex, subRegion.maxGlobalIndex() ); } ); - MpiWrapper::allReduce( &m_localMaxGlobalIndex, - &m_maxGlobalIndex, - 1, - MPI_MAX, - MPI_COMM_GEOS ); + m_maxGlobalIndex = MpiWrapper::allReduce( m_localMaxGlobalIndex, + MpiWrapper::Reduction::Max, + MPI_COMM_GEOS ); } Group * ParticleManager::createChild( string const & childKey, string const & childName ) diff --git a/src/coreComponents/mesh/generators/InternalMeshGenerator.cpp b/src/coreComponents/mesh/generators/InternalMeshGenerator.cpp index ace6dd8846e..99fe38fb059 100644 --- a/src/coreComponents/mesh/generators/InternalMeshGenerator.cpp +++ b/src/coreComponents/mesh/generators/InternalMeshGenerator.cpp @@ -603,10 +603,9 @@ void InternalMeshGenerator::fillCellBlockManager( CellBlockManager & cellBlockMa { elemCenterCoordsLocal[k] = m_min[dim] + ( m_max[dim] - m_min[dim] ) * ( k + 0.5 ) / m_numElemsTotal[dim]; } - MpiWrapper::allReduce( elemCenterCoordsLocal.data(), - elemCenterCoords[dim].data(), - m_numElemsTotal[dim], - MPI_MAX, + MpiWrapper::allReduce( elemCenterCoordsLocal, + elemCenterCoords[dim], + MpiWrapper::Reduction::Max, MPI_COMM_GEOS ); } diff --git a/src/coreComponents/mesh/generators/VTKUtilities.cpp b/src/coreComponents/mesh/generators/VTKUtilities.cpp index a18b3e6767a..1fb198d70ce 100644 --- a/src/coreComponents/mesh/generators/VTKUtilities.cpp +++ b/src/coreComponents/mesh/generators/VTKUtilities.cpp @@ -746,8 +746,7 @@ vtkSmartPointer< vtkDataSet > manageGlobalIds( vtkSmartPointer< vtkDataSet > mes { // Add global ids on the fly if needed int const me = hasGlobalIds( mesh ); - int everyone; - MpiWrapper::allReduce( &me, &everyone, 1, MPI_MAX, MPI_COMM_GEOS ); + int const everyone = MpiWrapper::allReduce( me, MpiWrapper::Reduction::Max, MPI_COMM_GEOS ); if( everyone and not me ) { diff --git a/src/coreComponents/physicsSolvers/PhysicsSolverBaseKernels.hpp b/src/coreComponents/physicsSolvers/PhysicsSolverBaseKernels.hpp index ac8433b208b..45e2ade8570 100644 --- a/src/coreComponents/physicsSolvers/PhysicsSolverBaseKernels.hpp +++ b/src/coreComponents/physicsSolvers/PhysicsSolverBaseKernels.hpp @@ -267,10 +267,9 @@ class LinfResidualNormHelper static void computeGlobalNorm( array1d< real64 > const & localResidualNorm, array1d< real64 > & globalResidualNorm ) { - MpiWrapper::allReduce( localResidualNorm.data(), - globalResidualNorm.data(), - localResidualNorm.size(), - MpiWrapper::getMpiOp( MpiWrapper::Reduction::Max ), + MpiWrapper::allReduce( localResidualNorm, + globalResidualNorm, + MpiWrapper::Reduction::Max, MPI_COMM_GEOS ); } }; @@ -309,16 +308,17 @@ class L2ResidualNormHelper { array1d< real64 > sumLocalResidualNorm( localResidualNorm.size() ); array1d< real64 > sumLocalResidualNormalizer( localResidualNormalizer.size() ); - MpiWrapper::allReduce( localResidualNorm.data(), - sumLocalResidualNorm.data(), - localResidualNorm.size(), - MpiWrapper::getMpiOp( MpiWrapper::Reduction::Sum ), + + MpiWrapper::allReduce( localResidualNorm, + sumLocalResidualNorm, + MpiWrapper::Reduction::Sum, MPI_COMM_GEOS ); - MpiWrapper::allReduce( localResidualNormalizer.data(), - sumLocalResidualNormalizer.data(), - localResidualNormalizer.size(), - MpiWrapper::getMpiOp( MpiWrapper::Reduction::Sum ), + + MpiWrapper::allReduce( localResidualNormalizer, + sumLocalResidualNormalizer, + MpiWrapper::Reduction::Sum, MPI_COMM_GEOS ); + for( integer i = 0; i < localResidualNorm.size(); ++i ) { globalResidualNorm[i] = sqrt( sumLocalResidualNorm[i] ) / sqrt( sumLocalResidualNormalizer[i] ); diff --git a/src/coreComponents/physicsSolvers/contact/ContactSolverBase.cpp b/src/coreComponents/physicsSolvers/contact/ContactSolverBase.cpp index edd3bd4ec24..9555658db65 100644 --- a/src/coreComponents/physicsSolvers/contact/ContactSolverBase.cpp +++ b/src/coreComponents/physicsSolvers/contact/ContactSolverBase.cpp @@ -184,10 +184,9 @@ void ContactSolverBase::computeFractureStateStatistics( MeshLevel const & mesh, array1d< globalIndex > totalCounter( 4 ); - MpiWrapper::allReduce( localCounter.data(), - totalCounter.data(), - 4, - MPI_SUM, + MpiWrapper::allReduce( localCounter, + totalCounter, + MpiWrapper::Reduction::Sum, MPI_COMM_GEOS ); numStick = totalCounter[0]; diff --git a/src/coreComponents/physicsSolvers/contact/SolidMechanicsEmbeddedFractures.cpp b/src/coreComponents/physicsSolvers/contact/SolidMechanicsEmbeddedFractures.cpp index b78afce691a..4e31c7c4eaa 100644 --- a/src/coreComponents/physicsSolvers/contact/SolidMechanicsEmbeddedFractures.cpp +++ b/src/coreComponents/physicsSolvers/contact/SolidMechanicsEmbeddedFractures.cpp @@ -841,12 +841,9 @@ bool SolidMechanicsEmbeddedFractures::updateConfiguration( DomainPartition & dom synchronizeFractureState( domain ); // Compute if globally the fracture state has changed - int hasConfigurationConvergedGlobally; - MpiWrapper::allReduce( &hasConfigurationConverged, - &hasConfigurationConvergedGlobally, - 1, - MPI_LAND, - MPI_COMM_GEOS ); + int const hasConfigurationConvergedGlobally = MpiWrapper::allReduce( hasConfigurationConverged, + MpiWrapper::Reduction::LogicalAnd, + MPI_COMM_GEOS ); // for this solver it makes sense to reset the state. // if( !hasConfigurationConvergedGlobally ) diff --git a/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseFVM.cpp b/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseFVM.cpp index aeb88ef68e3..67ee8bf2f9a 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseFVM.cpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/CompositionalMultiphaseFVM.cpp @@ -165,14 +165,7 @@ void CompositionalMultiphaseFVM::initializePreSubGroups() ? LinearSolverParameters::MGR::StrategyType::thermalCompositionalMultiphaseFVM : LinearSolverParameters::MGR::StrategyType::compositionalMultiphaseFVM; - DomainPartition & domain = this->getGroupByPath< DomainPartition >( "/Problem/domain" ); - NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); - FiniteVolumeManager const & fvManager = numericalMethodManager.getFiniteVolumeManager(); - if( !fvManager.hasGroup< FluxApproximationBase >( m_discretizationName ) ) - { - GEOS_ERROR( "A discretization deriving from FluxApproximationBase must be selected with CompositionalMultiphaseFlow" ); - } - + checkDiscretizationName(); } void CompositionalMultiphaseFVM::setupDofs( DomainPartition const & domain, diff --git a/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.cpp b/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.cpp index e9a55b86c46..fe3bad87c88 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.cpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.cpp @@ -368,6 +368,18 @@ void FlowSolverBase::initializePreSubGroups() } } +void FlowSolverBase::checkDiscretizationName() const +{ + DomainPartition const & domain = this->getGroupByPath< DomainPartition >( "/Problem/domain" ); + NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); + FiniteVolumeManager const & fvManager = numericalMethodManager.getFiniteVolumeManager(); + if( !fvManager.hasGroup< FluxApproximationBase >( m_discretizationName ) ) + { + GEOS_ERROR( GEOS_FMT( "{}: can not find discretization named '{}' (a discretization deriving from FluxApproximationBase must be selected for {} solver '{}' )", + getDataContext(), m_discretizationName, getCatalogName(), getName())); + } +} + void FlowSolverBase::validatePoreVolumes( DomainPartition const & domain ) const { real64 minPoreVolume = LvArray::NumericLimits< real64 >::max; @@ -671,15 +683,13 @@ void FlowSolverBase::findMinMaxElevationInEquilibriumTarget( DomainPartition & d } ); - MpiWrapper::allReduce( localMaxElevation.data(), - maxElevation.data(), - localMaxElevation.size(), - MpiWrapper::getMpiOp( MpiWrapper::Reduction::Max ), + MpiWrapper::allReduce( localMaxElevation.toView(), + maxElevation, + MpiWrapper::Reduction::Max, MPI_COMM_GEOS ); - MpiWrapper::allReduce( localMinElevation.data(), - minElevation.data(), - localMinElevation.size(), - MpiWrapper::getMpiOp( MpiWrapper::Reduction::Min ), + MpiWrapper::allReduce( localMinElevation.toView(), + minElevation, + MpiWrapper::Reduction::Min, MPI_COMM_GEOS ); } @@ -726,10 +736,9 @@ void FlowSolverBase::computeSourceFluxSizeScalingFactor( real64 const & time, } ); // synchronize the set size over all the MPI ranks - MpiWrapper::allReduce( bcAllSetsSize.data(), - bcAllSetsSize.data(), - bcAllSetsSize.size(), - MpiWrapper::getMpiOp( MpiWrapper::Reduction::Sum ), + MpiWrapper::allReduce( bcAllSetsSize, + bcAllSetsSize, + MpiWrapper::Reduction::Sum, MPI_COMM_GEOS ); } @@ -809,10 +818,9 @@ void FlowSolverBase::saveAquiferConvergedState( real64 const & time, localSumFluxes[aquiferIndex] += targetSetSumFluxes; } ); - MpiWrapper::allReduce( localSumFluxes.data(), - globalSumFluxes.data(), - localSumFluxes.size(), - MpiWrapper::getMpiOp( MpiWrapper::Reduction::Sum ), + MpiWrapper::allReduce( localSumFluxes, + globalSumFluxes, + MpiWrapper::Reduction::Sum, MPI_COMM_GEOS ); // Step 3: we are ready to save the summed fluxes for each individual aquifer diff --git a/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.hpp b/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.hpp index b97716d0b00..19e79d90ab2 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.hpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/FlowSolverBase.hpp @@ -230,6 +230,8 @@ class FlowSolverBase : public PhysicsSolverBase virtual void initializePreSubGroups() override; + void checkDiscretizationName() const; + virtual void initializePostInitialConditionsPreSubGroups() override; void initializeState( DomainPartition & domain ); diff --git a/src/coreComponents/physicsSolvers/fluidFlow/ReactiveCompositionalMultiphaseOBL.cpp b/src/coreComponents/physicsSolvers/fluidFlow/ReactiveCompositionalMultiphaseOBL.cpp index 07c38cd2726..4670d6425a4 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/ReactiveCompositionalMultiphaseOBL.cpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/ReactiveCompositionalMultiphaseOBL.cpp @@ -125,14 +125,7 @@ void ReactiveCompositionalMultiphaseOBL::initializePreSubGroups() { FlowSolverBase::initializePreSubGroups(); - DomainPartition & domain = this->getGroupByPath< DomainPartition >( "/Problem/domain" ); - NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); - FiniteVolumeManager const & fvManager = numericalMethodManager.getFiniteVolumeManager(); - if( !fvManager.hasGroup< FluxApproximationBase >( m_discretizationName ) ) - { - GEOS_ERROR( "A discretization deriving from FluxApproximationBase must be selected with ReactiveCompositionalMultiphaseOBL" ); - } - + checkDiscretizationName(); } void ReactiveCompositionalMultiphaseOBL::setupDofs( DomainPartition const & domain, diff --git a/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseFVM.cpp b/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseFVM.cpp index cc9cdc6aaf6..5da5846273c 100644 --- a/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseFVM.cpp +++ b/src/coreComponents/physicsSolvers/fluidFlow/SinglePhaseFVM.cpp @@ -68,14 +68,7 @@ void SinglePhaseFVM< BASE >::initializePreSubGroups() { BASE::initializePreSubGroups(); - DomainPartition & domain = this->template getGroupByPath< DomainPartition >( "/Problem/domain" ); - NumericalMethodsManager const & numericalMethodManager = domain.getNumericalMethodManager(); - FiniteVolumeManager const & fvManager = numericalMethodManager.getFiniteVolumeManager(); - - if( !fvManager.hasGroup< FluxApproximationBase >( m_discretizationName ) ) - { - GEOS_ERROR( "A discretization deriving from FluxApproximationBase must be selected with SinglePhaseFVM" ); - } + this->checkDiscretizationName(); if( m_isThermal ) { diff --git a/src/coreComponents/physicsSolvers/multiphysics/HydrofractureSolver.cpp b/src/coreComponents/physicsSolvers/multiphysics/HydrofractureSolver.cpp index a3e77c5a2c4..ab48ca06ea6 100644 --- a/src/coreComponents/physicsSolvers/multiphysics/HydrofractureSolver.cpp +++ b/src/coreComponents/physicsSolvers/multiphysics/HydrofractureSolver.cpp @@ -226,11 +226,9 @@ real64 HydrofractureSolver< POROMECHANICS_SOLVER >::fullyCoupledSolverStep( real { locallyFractured = 1; } - MpiWrapper::allReduce( &locallyFractured, - &globallyFractured, - 1, - MPI_MAX, - MPI_COMM_GEOS ); + globallyFractured = MpiWrapper::allReduce( locallyFractured, + MpiWrapper::Reduction::Max, + MPI_COMM_GEOS ); if( globallyFractured == 0 ) { diff --git a/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsLagrangianFEM.cpp b/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsLagrangianFEM.cpp index fa5c1685db3..dd19b20d9b3 100644 --- a/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsLagrangianFEM.cpp +++ b/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsLagrangianFEM.cpp @@ -513,11 +513,9 @@ real64 SolidMechanicsLagrangianFEM::solverStep( real64 const & time_n, { locallyFractured = 1; } - MpiWrapper::allReduce( &locallyFractured, - &globallyFractured, - 1, - MPI_MAX, - MPI_COMM_GEOS ); + globallyFractured = MpiWrapper::allReduce( locallyFractured, + MpiWrapper::Reduction::Max, + MPI_COMM_GEOS ); } if( globallyFractured == 0 ) { diff --git a/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsMPM.cpp b/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsMPM.cpp index a2b1d3ec2ff..c576f6513e2 100644 --- a/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsMPM.cpp +++ b/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsMPM.cpp @@ -3313,23 +3313,17 @@ void SolidMechanicsMPM::printProfilingResults() // Get total CPU time for the entire time step real64 totalStepTimeThisRank = m_profilingTimes[numIntervals] - m_profilingTimes[0]; - real64 totalStepTimeAllRanks; - MpiWrapper::allReduce< real64 >( &totalStepTimeThisRank, - &totalStepTimeAllRanks, - 1, - MPI_SUM, - MPI_COMM_GEOS ); + real64 const totalStepTimeAllRanks = MpiWrapper::allReduce( totalStepTimeThisRank, + MpiWrapper::Reduction::Sum, + MPI_COMM_GEOS ); // Get total CPU times for each queried time interval for( unsigned int i = 0; i < numIntervals; i++ ) { real64 timeIntervalThisRank = ( m_profilingTimes[i+1] - m_profilingTimes[i] ); - real64 timeIntervalAllRanks; - MpiWrapper::allReduce< real64 >( &timeIntervalThisRank, - &timeIntervalAllRanks, - 1, - MPI_SUM, - MPI_COMM_GEOS ); + real64 const timeIntervalAllRanks = MpiWrapper::allReduce( timeIntervalThisRank, + MpiWrapper::Reduction::Sum, + MPI_COMM_GEOS ); if( rank == 0 ) { timeIntervalsAllRanks[i] = timeIntervalAllRanks; diff --git a/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsStatistics.cpp b/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsStatistics.cpp index d16dd1ea519..12f47f2b3e3 100644 --- a/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsStatistics.cpp +++ b/src/coreComponents/physicsSolvers/solidMechanics/SolidMechanicsStatistics.cpp @@ -146,16 +146,14 @@ void SolidMechanicsStatistics::computeNodeStatistics( MeshLevel & mesh, real64 c nodeStatistics.minDisplacement[1] = minDispY.get(); nodeStatistics.minDisplacement[2] = minDispZ.get(); - MpiWrapper::allReduce( nodeStatistics.maxDisplacement.data(), - nodeStatistics.maxDisplacement.data(), - 3, - MpiWrapper::getMpiOp( MpiWrapper::Reduction::Max ), + MpiWrapper::allReduce( nodeStatistics.maxDisplacement, + nodeStatistics.maxDisplacement, + MpiWrapper::Reduction::Max, MPI_COMM_GEOS ); - MpiWrapper::allReduce( nodeStatistics.minDisplacement.data(), - nodeStatistics.minDisplacement.data(), - 3, - MpiWrapper::getMpiOp( MpiWrapper::Reduction::Min ), + MpiWrapper::allReduce( nodeStatistics.minDisplacement, + nodeStatistics.minDisplacement, + MpiWrapper::Reduction::Min, MPI_COMM_GEOS ); TableData mechanicsData; diff --git a/src/coreComponents/physicsSolvers/surfaceGeneration/SurfaceGenerator.cpp b/src/coreComponents/physicsSolvers/surfaceGeneration/SurfaceGenerator.cpp index 788c078ada3..e41674c9f82 100644 --- a/src/coreComponents/physicsSolvers/surfaceGeneration/SurfaceGenerator.cpp +++ b/src/coreComponents/physicsSolvers/surfaceGeneration/SurfaceGenerator.cpp @@ -4580,12 +4580,9 @@ SurfaceGenerator::calculateRuptureRate( SurfaceElementRegion & faceElementRegion maxRuptureRate = std::max( maxRuptureRate, ruptureRate( faceElemIndex ) ); } - real64 globalMaxRuptureRate; - MpiWrapper::allReduce( &maxRuptureRate, - &globalMaxRuptureRate, - 1, - MPI_MAX, - MPI_COMM_GEOS ); + real64 const globalMaxRuptureRate = MpiWrapper::allReduce( maxRuptureRate, + MpiWrapper::Reduction::Max, + MPI_COMM_GEOS ); return globalMaxRuptureRate; } diff --git a/src/coreComponents/physicsSolvers/wavePropagation/sem/elastic/secondOrderEqn/isotropic/ElasticWaveEquationSEM.cpp b/src/coreComponents/physicsSolvers/wavePropagation/sem/elastic/secondOrderEqn/isotropic/ElasticWaveEquationSEM.cpp index 7c2e50a4c42..9e84c25e6c6 100644 --- a/src/coreComponents/physicsSolvers/wavePropagation/sem/elastic/secondOrderEqn/isotropic/ElasticWaveEquationSEM.cpp +++ b/src/coreComponents/physicsSolvers/wavePropagation/sem/elastic/secondOrderEqn/isotropic/ElasticWaveEquationSEM.cpp @@ -1013,10 +1013,10 @@ void ElasticWaveEquationSEM::cleanup( real64 const time_n, computeAllSeismoTraces( time_n, 0.0, uy_np1, uy_n, dasReceivers, m_linearDASVectorY.toView(), true ); computeAllSeismoTraces( time_n, 0.0, uz_np1, uz_n, dasReceivers, m_linearDASVectorZ.toView(), true ); // sum contributions from all MPI ranks, since some receivers might be split among multiple ranks - MpiWrapper::allReduce( dasReceivers.data(), - dasReceivers.data(), + MpiWrapper::allReduce( dasReceivers, + dasReceivers, m_linearDASGeometry.size( 0 ), - MpiWrapper::getMpiOp( MpiWrapper::Reduction::Sum ), + MpiWrapper::Reduction::Sum, MPI_COMM_GEOS ); WaveSolverUtils::writeSeismoTrace( "dasTraceReceiver", getName(), m_outputSeismoTrace, m_linearDASGeometry.size( 0 ), m_receiverIsLocal, m_nsamplesSeismoTrace, dasReceivers );