From 18811d435747a57c324f1c106cdf8b56e3a41dbd Mon Sep 17 00:00:00 2001 From: Cheng Li <69489965+chengcli@users.noreply.github.com> Date: Sat, 9 Mar 2024 23:12:04 -0500 Subject: [PATCH] Refactoring Exchanger (#128) - simplify exchanger - add back more complex diagnostics --- patches/24.time_integrator.patch | 2 +- src/diagnostics/buoyancy.cpp | 6 +- src/diagnostics/curl.cpp | 6 +- src/diagnostics/diagnostics.cpp | 14 +- src/diagnostics/diagnostics.hpp | 117 ++++++++++------- src/diagnostics/diagnostics_factory.cpp | 22 ++-- src/diagnostics/divergence.cpp | 5 +- .../{todo => }/horizontal_divergence.cpp | 13 +- src/diagnostics/hydro_flux.cpp | 92 +++++++++++++ src/diagnostics/hydro_mean.cpp | 30 +++-- src/diagnostics/pressure_anomaly.cpp | 66 ++++++++++ src/diagnostics/radiative_flux.cpp | 88 +++++++++++++ .../{todo => }/temperature_anomaly.cpp | 19 +-- src/diagnostics/todo/pressure_anomaly.cpp | 37 ------ src/diagnostics/todo/radiative_flux.cpp | 86 ------------ src/diagnostics/todo/total_flux.cpp | 77 ----------- src/exchanger/exchanger.cpp | 109 ++++++++++++++++ src/exchanger/exchanger.hpp | 123 +++++++++--------- src/exchanger/message_traits.cpp | 33 ----- src/exchanger/message_traits.hpp | 64 --------- src/exchanger/neighbor_exchanger.hpp | 70 ---------- src/harp/radiation_band.hpp | 4 +- src/harp/radiation_band_exchanger.cpp | 7 +- src/mesh_destroy.cpp | 2 +- src/mesh_setup.cpp | 2 +- src/nbody/particle_data.cpp | 18 +-- src/nbody/particle_data.hpp | 8 +- src/nbody/particle_exchanger.cpp | 59 ++++++++- src/nbody/particles.hpp | 8 +- src/outputs/load_user_output_data.cpp | 2 +- src/outputs/netcdf.cpp | 8 +- src/outputs/output_utils.cpp | 17 ++- src/outputs/pnetcdf.cpp | 99 +++++++------- 33 files changed, 702 insertions(+), 611 deletions(-) rename src/diagnostics/{todo => }/horizontal_divergence.cpp (87%) create mode 100644 src/diagnostics/hydro_flux.cpp create mode 100644 src/diagnostics/pressure_anomaly.cpp create mode 100644 src/diagnostics/radiative_flux.cpp rename src/diagnostics/{todo => }/temperature_anomaly.cpp (80%) delete mode 100644 src/diagnostics/todo/pressure_anomaly.cpp delete mode 100644 src/diagnostics/todo/radiative_flux.cpp delete mode 100644 src/diagnostics/todo/total_flux.cpp delete mode 100644 src/exchanger/message_traits.cpp delete mode 100644 src/exchanger/message_traits.hpp delete mode 100644 src/exchanger/neighbor_exchanger.hpp diff --git a/patches/24.time_integrator.patch b/patches/24.time_integrator.patch index 329f9851..3cf654df 100644 --- a/patches/24.time_integrator.patch +++ b/patches/24.time_integrator.patch @@ -123,7 +123,7 @@ index 2bfc5178..e0fca29d 100644 + auto all_diags = pmb->pimpl->all_diags; + + for (auto& diag : all_diags) { -+ diag->Progress(pmb->phydro->w); ++ diag->Progress(pmb); + } + pmb->UserWorkInLoop(); diff --git a/src/diagnostics/buoyancy.cpp b/src/diagnostics/buoyancy.cpp index 9d08854a..df11fc4a 100644 --- a/src/diagnostics/buoyancy.cpp +++ b/src/diagnostics/buoyancy.cpp @@ -14,8 +14,10 @@ Buoyancy::Buoyancy(MeshBlock* pmb) : Diagnostics(pmb, "b") { grav_ = pmb->phydro->hsrc.GetG1(); } -void Buoyancy::Finalize(AthenaArray const& w) { - MeshBlock* pmb = pmy_block_; +void Buoyancy::Finalize(MeshBlock* pmb) { + Coordinates* pcoord = pmb->pcoord; + auto const& w = pmb->phydro->w; + int is = pmb->is, js = pmb->js, ks = pmb->ks; int ie = pmb->ie, je = pmb->je, ke = pmb->ke; diff --git a/src/diagnostics/curl.cpp b/src/diagnostics/curl.cpp index 4f9ee75a..89250583 100644 --- a/src/diagnostics/curl.cpp +++ b/src/diagnostics/curl.cpp @@ -1,5 +1,6 @@ // athena #include +#include #include // canoe @@ -23,9 +24,10 @@ Curl::Curl(MeshBlock *pmb) : Diagnostics(pmb, "curl") { } } -void Curl::Finalize(AthenaArray const &w) { - MeshBlock *pmb = pmy_block_; +void Curl::Finalize(MeshBlock *pmb) { Coordinates *pcoord = pmb->pcoord; + auto const &w = pmb->phydro->w; + int is = pmb->is, js = pmb->js, ks = pmb->ks; int ie = pmb->ie, je = pmb->je, ke = pmb->ke; diff --git a/src/diagnostics/diagnostics.cpp b/src/diagnostics/diagnostics.cpp index e88127ab..1c73fd67 100644 --- a/src/diagnostics/diagnostics.cpp +++ b/src/diagnostics/diagnostics.cpp @@ -12,16 +12,21 @@ // diagnostics #include "diagnostics.hpp" -Diagnostics::Diagnostics(MeshBlock *pmb, std::string name) - : NamedGroup(name), pmy_block_(pmb), ncycle_(0) { +Diagnostics::Diagnostics(MeshBlock *pmb, std::string name) : NamedGroup(name) { Application::Logger app("main"); app->Log("Initialize Diagnostics"); ncells1_ = pmb->block_size.nx1 + 2 * (NGHOST); ncells2_ = 1; ncells3_ = 1; - if (pmb->pmy_mesh->f2) ncells2_ = pmb->block_size.nx2 + 2 * (NGHOST); - if (pmb->pmy_mesh->f3) ncells3_ = pmb->block_size.nx3 + 2 * (NGHOST); + + if (pmb->pmy_mesh->f2) { + ncells2_ = pmb->block_size.nx2 + 2 * (NGHOST); + } + + if (pmb->pmy_mesh->f3) { + ncells3_ = pmb->block_size.nx3 + 2 * (NGHOST); + } x1edge_.NewAthenaArray(ncells1_ + 1); x1edge_p1_.NewAthenaArray(ncells1_); @@ -38,6 +43,7 @@ Diagnostics::Diagnostics(MeshBlock *pmb, std::string name) vol_.NewAthenaArray(ncells1_); total_vol_.NewAthenaArray(ncells1_); + total_area_.NewAthenaArray(ncells1_); } Diagnostics::~Diagnostics() { diff --git a/src/diagnostics/diagnostics.hpp b/src/diagnostics/diagnostics.hpp index 5919afa4..b685ea4c 100644 --- a/src/diagnostics/diagnostics.hpp +++ b/src/diagnostics/diagnostics.hpp @@ -16,6 +16,7 @@ #include class ParameterInput; +class Meshlock; class Diagnostics : public NamedGroup { public: @@ -28,19 +29,12 @@ class Diagnostics : public NamedGroup { virtual ~Diagnostics(); virtual int GetNumVars() const = 0; - virtual void Progress(AthenaArray const &w) {} - virtual void Finalize(AthenaArray const &w) {} + virtual void Progress(MeshBlock *pmb) {} + virtual void Finalize(MeshBlock *pmb) {} protected: - MeshBlock *pmy_block_; - int ncells1_, ncells2_, ncells3_; - int ncycle_; - - //! mean and eddy component - AthenaArray mean_, eddy_; - //! MPI color of each rank std::vector color_; @@ -53,10 +47,7 @@ class Diagnostics : public NamedGroup { AthenaArray x1area_, x2area_, x2area_p1_, x3area_, x3area_p1_; - AthenaArray vol_, total_vol_; - - void GatherVolumnData(AthenaArray &total_vol, - AthenaArray &total_data); + AthenaArray vol_, total_vol_, total_area_; }; using DiagnosticsPtr = std::shared_ptr; @@ -74,7 +65,7 @@ class Divergence : public Diagnostics { Divergence(MeshBlock *pmb); virtual ~Divergence() {} - void Finalize(AthenaArray const &w) override; + void Finalize(MeshBlock *pmb) override; int GetNumVars() const override { return 1; } protected: @@ -87,7 +78,7 @@ class Curl : public Diagnostics { Curl(MeshBlock *pmb); virtual ~Curl() {} - void Finalize(AthenaArray const &w) override; + void Finalize(MeshBlock *pmb) override; int GetNumVars() const override { return 1; } protected: @@ -100,7 +91,7 @@ class Buoyancy : public Diagnostics { Buoyancy(MeshBlock *pmb); virtual ~Buoyancy() {} - void Finalize(AthenaArray const &w) override; + void Finalize(MeshBlock *pmb) override; int GetNumVars() const override { return 1; } protected: @@ -112,71 +103,99 @@ class Buoyancy : public Diagnostics { class HydroMean : public Diagnostics { public: HydroMean(MeshBlock *pmb); - virtual ~HydroMean(); + virtual ~HydroMean() {} - void Progress(AthenaArray const &w) override; - void Finalize(AthenaArray const &w) override; + void Progress(MeshBlock *pmb) override; + void Finalize(MeshBlock *pmb) override; int GetNumVars() const override { return NHYDRO; } + + protected: + int ncycle_; +}; + +// 5. horizontal divergence +class HorizontalDivergence : public Diagnostics { + public: + HorizontalDivergence(MeshBlock *pmb); + virtual ~HorizontalDivergence() {} + + void Finalize(MeshBlock *pmb) override; + int GetNumVars() const override { return 1; } + + protected: + AthenaArray v2f2_, v3f3_; }; -// 5. temperature anomaly -class TemperatureAnomaly : public Diagnostics { +// 6. temperature anomaly +class TemperatureAnomaly : public Diagnostics, public PlanarExchanger { public: TemperatureAnomaly(MeshBlock *pmb); virtual ~TemperatureAnomaly() {} - void Finalize(AthenaArray const &w); + + void Finalize(MeshBlock *pmb) override; + int GetNumVars() const override { return 1; } + + protected: + //! mean component + AthenaArray mean_; }; -// 6. pressure anomaly -class PressureAnomaly : public Diagnostics { +// 7. pressure anomaly +class PressureAnomaly : public Diagnostics, public PlanarExchanger { public: PressureAnomaly(MeshBlock *pmb); virtual ~PressureAnomaly() {} - void Finalize(AthenaArray const &w); + + void Finalize(MeshBlock *pmb) override; + int GetNumVars() const override { return 1; } protected: - AthenaArray pf_; + //! mean component + AthenaArray mean_; }; -/* 6. eddy flux -class EddyFlux : public Diagnostics { +// 8. total radiative flux +class RadiativeFlux : public Diagnostics, public PlanarExchanger { public: - EddyFlux(MeshBlock *pmb); - virtual ~EddyFlux(); - void Progress(AthenaArray const &w); - void Finalize(AthenaArray const &w); + RadiativeFlux(MeshBlock *pmb); + virtual ~RadiativeFlux() {} + + void Progress(MeshBlock *pmb) override; + void Finalize(MeshBlock *pmb) override; + int GetNumVars() const override { return 2; } + + protected: + int ncycle_; }; -// 7. hydro flux -class HydroFlux : public Diagnostics { +// 9. hydro flux +class HydroFlux : public Diagnostics, public PlanarExchanger { public: HydroFlux(MeshBlock *pmb); virtual ~HydroFlux() {} - void Progress(AthenaArray const &w); - void Finalize(AthenaArray const &w); -}; -// 8. horizontal divergence -class HorizontalDivergence : public Diagnostics { - public: - HorizontalDivergence(MeshBlock *pmb); - virtual ~HorizontalDivergence(); - void Finalize(AthenaArray const &w); + void Progress(MeshBlock *pmb) override; + void Finalize(MeshBlock *pmb) override; + int GetNumVars() const override { return NHYDRO; } protected: - AthenaArray v2f2_, v3f3_; + int ncycle_; }; - -// 10. total radiative flux -class RadiativeFlux : public Diagnostics { +/* 6. eddy flux +class EddyFlux : public Diagnostics { public: - RadiativeFlux(MeshBlock *pmb); - virtual ~RadiativeFlux() {} + EddyFlux(MeshBlock *pmb); + virtual ~EddyFlux(); void Progress(AthenaArray const &w); void Finalize(AthenaArray const &w); + + protected: + //! mean and eddy component + AthenaArray mean_, eddy_; }; + // 11. total angular momentum class SphericalAngularMomentum : public Diagnostics { public: diff --git a/src/diagnostics/diagnostics_factory.cpp b/src/diagnostics/diagnostics_factory.cpp index 42b96f56..2b44d87d 100644 --- a/src/diagnostics/diagnostics_factory.cpp +++ b/src/diagnostics/diagnostics_factory.cpp @@ -26,18 +26,18 @@ DiagnosticsContainer DiagnosticsFactory::CreateFrom(MeshBlock *pmb, diag.push_back(std::make_shared(pmb)); } else if (name == "mean") { // 4. diag.push_back(std::make_shared(pmb)); - /*} else if (name == "tempa") { // 4. - diag.push_back(std::make_shared(pmb)); - } else if (name == "presa") { // 5. - diag.push_back(std::make_shared(pmb)); - } else if (name == "eddyflux") { // 6. + } else if (name == "div_h") { // 5. + diag.push_back(std::make_shared(pmb)); + } else if (name == "tempa") { // 6. + diag.push_back(std::make_shared(pmb)); + } else if (name == "presa") { // 7. + diag.push_back(std::make_shared(pmb)); + } else if (name == "radflux") { // 8. + diag.push_back(std::make_shared(pmb)); + } else if (name == "hydroflux") { // 9. + diag.push_back(std::make_shared(pmb)); + /*} else if (name == "eddyflux") { // 6. diag.push_back(std::make_shared(pmb)); - } else if (name == "hydroflux") { // 7. - diag.push_back(std::make_shared(pmb)); - } else if (name == "div_h") { // 8. - diag.push_back(std::make_shared(pmb)); - } else if (name == "radflux") { // 10. - diag.push_back(std::make_shared(pmb)); } else if (name == "am") { // 11. diag.push_back(std::make_shared(pmb)); } else if (name == "eke") { // 12. diff --git a/src/diagnostics/divergence.cpp b/src/diagnostics/divergence.cpp index 0a5f153e..00663fb8 100644 --- a/src/diagnostics/divergence.cpp +++ b/src/diagnostics/divergence.cpp @@ -1,5 +1,6 @@ // athena #include +#include #include // canoe @@ -20,9 +21,9 @@ Divergence::Divergence(MeshBlock *pmb) : Diagnostics(pmb, "div") { } } -void Divergence::Finalize(AthenaArray const &w) { - MeshBlock *pmb = pmy_block_; +void Divergence::Finalize(MeshBlock *pmb) { Coordinates *pcoord = pmb->pcoord; + auto const &w = pmb->phydro->w; int is = pmb->is, js = pmb->js, ks = pmb->ks; int ie = pmb->ie, je = pmb->je, ke = pmb->ke; diff --git a/src/diagnostics/todo/horizontal_divergence.cpp b/src/diagnostics/horizontal_divergence.cpp similarity index 87% rename from src/diagnostics/todo/horizontal_divergence.cpp rename to src/diagnostics/horizontal_divergence.cpp index 607ad7d0..b74980e6 100644 --- a/src/diagnostics/todo/horizontal_divergence.cpp +++ b/src/diagnostics/horizontal_divergence.cpp @@ -1,5 +1,6 @@ // athena #include +#include #include // canoe @@ -9,21 +10,17 @@ HorizontalDivergence::HorizontalDivergence(MeshBlock *pmb) : Diagnostics(pmb, "div_h") { type = "SCALARS"; data.NewAthenaArray(ncells3_, ncells2_, ncells1_); + if (pmb->block_size.nx2 > 1) v2f2_.NewAthenaArray(ncells3_, ncells2_ + 1, ncells1_); if (pmb->block_size.nx3 > 1) v3f3_.NewAthenaArray(ncells3_ + 1, ncells2_, ncells1_); } -HorizontalDivergence::~HorizontalDivergence() { - data.DeleteAthenaArray(); - if (pmy_block_->block_size.nx2 > 1) v2f2_.DeleteAthenaArray(); - if (pmy_block_->block_size.nx3 > 1) v3f3_.DeleteAthenaArray(); -} - -void HorizontalDivergence::Finalize(AthenaArray const &w) { - MeshBlock *pmb = pmy_block_; +void HorizontalDivergence::Finalize(MeshBlock *pmb) { Coordinates *pcoord = pmb->pcoord; + auto const &w = pmb->phydro->w; + int is = pmb->is, js = pmb->js, ks = pmb->ks; int ie = pmb->ie, je = pmb->je, ke = pmb->ke; diff --git a/src/diagnostics/hydro_flux.cpp b/src/diagnostics/hydro_flux.cpp new file mode 100644 index 00000000..82539df1 --- /dev/null +++ b/src/diagnostics/hydro_flux.cpp @@ -0,0 +1,92 @@ +// athena +#include +#include + +// snap +#include + +// canoe +#include + +// diagnostics +#include "diagnostics.hpp" + +HydroFlux::HydroFlux(MeshBlock *pmb) + : Diagnostics(pmb, "hydroflux"), ncycle_(0) { + type = "VECTORS"; + std::string varname = "v1rho,"; + + for (int n = 1; n <= NVAPOR; ++n) varname += "v1q" + std::to_string(n) + ","; + for (int n = 0; n < 3; ++n) varname += "v1v" + std::to_string(n + 1) + ","; + varname += "v1T"; + + SetName(varname); + data.NewAthenaArray(NHYDRO, ncells1_); + + Regroup(pmb, X1DIR); + + // calculate total volume + total_vol_.ZeroClear(); + for (int k = pmb->ks; k <= pmb->ke; ++k) + for (int j = pmb->js; j <= pmb->je; ++j) { + pmb->pcoord->CellVolume(k, j, pmb->is, pmb->ie, vol_); + + for (int i = pmb->is; i <= pmb->ie + 1; ++i) { + total_vol_(i) += vol_(i); + } + } + +#ifdef MPI_PARALLEL + MPI_Allreduce(MPI_IN_PLACE, total_vol_.data(), total_vol_.GetSize(), + MPI_ATHENA_REAL, MPI_SUM, mpi_comm_); +#endif +} + +void HydroFlux::Progress(MeshBlock *pmb) { + Coordinates *pcoord = pmb->pcoord; + auto const &w = pmb->phydro->w; + + auto pthermo = Thermodynamics::GetInstance(); + + int is = pmb->is, js = pmb->js, ks = pmb->ks; + int ie = pmb->ie, je = pmb->je, ke = pmb->ke; + + if (ncycle_ == 0) { + std::fill(data.data(), data.data() + data.GetSize(), 0.); + } + + // sum over horizontal grids weighted by volume + for (int k = ks; k <= ke; ++k) + for (int j = js; j <= je; ++j) { + pcoord->CellVolume(k, j, is, ie, vol_); + + for (int i = is; i <= ie; ++i) { + data(0, i) += vol_(i) * w(IDN, k, j, i) * w(IVX, k, j, i); + for (int n = 1; n < IPR; ++n) + data(n, i) += vol_(i) * w(n, k, j, i) * w(IVX, k, j, i); + data(IPR, i) += + vol_(i) * pthermo->GetTemp(pmb, k, j, i) * w(IVX, k, j, i); + } + } + + ncycle_++; +} + +void HydroFlux::Finalize(MeshBlock *pmb) { + // take time and spatial average + if (ncycle_ > 0) { + // sum over all ranks +#ifdef MPI_PARALLEL + MPI_Allreduce(MPI_IN_PLACE, data.data(), data.GetSize(), MPI_ATHENA_REAL, + MPI_SUM, mpi_comm_); +#endif + + for (int n = 0; n < NHYDRO; ++n) { + for (int i = pmb->is; i <= pmb->ie; ++i) + data(n, i) /= ncycle_ * total_vol_(i); + } + } + + // clear cycle; + ncycle_ = 0; +} diff --git a/src/diagnostics/hydro_mean.cpp b/src/diagnostics/hydro_mean.cpp index b5c5ba8b..244617a1 100644 --- a/src/diagnostics/hydro_mean.cpp +++ b/src/diagnostics/hydro_mean.cpp @@ -1,7 +1,6 @@ // athena #include #include -#include // snap #include @@ -9,7 +8,7 @@ // canoe #include "diagnostics.hpp" -HydroMean::HydroMean(MeshBlock *pmb) : Diagnostics(pmb, "mean") { +HydroMean::HydroMean(MeshBlock *pmb) : Diagnostics(pmb, "mean"), ncycle_(0) { type = "VECTORS"; std::string varname = "rho_bar,"; @@ -20,14 +19,12 @@ HydroMean::HydroMean(MeshBlock *pmb) : Diagnostics(pmb, "mean") { varname += "T_bar"; SetName(varname); - data.NewAthenaArray(NHYDRO, ncells3_, ncells2_, ncells1_); } -HydroMean::~HydroMean() { data.DeleteAthenaArray(); } - -void HydroMean::Progress(AthenaArray const &w) { - MeshBlock *pmb = pmy_block_; +void HydroMean::Progress(MeshBlock *pmb) { + Coordinates *pcoord = pmb->pcoord; + auto const &w = pmb->phydro->w; int is = pmb->is, js = pmb->js, ks = pmb->ks; int ie = pmb->ie, je = pmb->je, ke = pmb->ke; @@ -41,19 +38,23 @@ void HydroMean::Progress(AthenaArray const &w) { for (int n = 0; n < IPR; ++n) for (int k = ks; k <= ke; ++k) for (int j = js; j <= je; ++j) - for (int i = is; i <= ie; ++i) data(n, k, j, i) += w(n, k, j, i); + for (int i = is; i <= ie; ++i) { + data(n, k, j, i) += w(n, k, j, i); + } // Temperature field is save in IPR for (int k = ks; k <= ke; ++k) for (int j = js; j <= je; ++j) - for (int i = is; i <= ie; ++i) + for (int i = is; i <= ie; ++i) { data(IPR, k, j, i) += pthermo->GetTemp(pmb, k, j, i); + } ncycle_++; } -void HydroMean::Finalize(AthenaArray const &w) { - MeshBlock *pmb = pmy_block_; +void HydroMean::Finalize(MeshBlock *pmb) { + Coordinates *pcoord = pmb->pcoord; + auto const &w = pmb->phydro->w; int is = pmb->is, js = pmb->js, ks = pmb->ks; int ie = pmb->ie, je = pmb->je, ke = pmb->ke; @@ -72,12 +73,15 @@ void HydroMean::Finalize(AthenaArray const &w) { for (int n = 0; n < IPR; ++n) for (int k = ks; k <= ke; ++k) for (int j = js; j <= je; ++j) - for (int i = is; i <= ie; ++i) data(n, k, j, i) = w(n, k, j, i); + for (int i = is; i <= ie; ++i) { + data(n, k, j, i) = w(n, k, j, i); + } for (int k = ks; k <= ke; ++k) for (int j = js; j <= je; ++j) - for (int i = is; i <= ie; ++i) + for (int i = is; i <= ie; ++i) { data(IPR, k, j, i) = pthermo->GetTemp(pmb, k, j, i); + } } // clear cycle diff --git a/src/diagnostics/pressure_anomaly.cpp b/src/diagnostics/pressure_anomaly.cpp new file mode 100644 index 00000000..af9a3dca --- /dev/null +++ b/src/diagnostics/pressure_anomaly.cpp @@ -0,0 +1,66 @@ +// athena +#include +#include + +// canoe +#include + +// diagnostics +#include "diagnostics.hpp" + +PressureAnomaly::PressureAnomaly(MeshBlock *pmb) : Diagnostics(pmb, "presa") { + type = "SCALARS"; + mean_.NewAthenaArray(ncells1_); + data.NewAthenaArray(ncells3_, ncells2_, ncells1_); + + Regroup(pmb, X1DIR); + + // calculate total volume + total_vol_.ZeroClear(); + for (int k = pmb->ks; k <= pmb->ke; ++k) + for (int j = pmb->js; j <= pmb->je; ++j) { + pmb->pcoord->CellVolume(k, j, pmb->is, pmb->ie, vol_); + + for (int i = pmb->is; i <= pmb->ie; ++i) { + total_vol_(i) += vol_(i); + } + } + +#ifdef MPI_PARALLEL + MPI_Allreduce(MPI_IN_PLACE, total_vol_.data(), total_vol_.GetSize(), + MPI_ATHENA_REAL, MPI_SUM, mpi_comm_); +#endif +} + +void PressureAnomaly::Finalize(MeshBlock *pmb) { + Coordinates *pcoord = pmb->pcoord; + auto const &w = pmb->phydro->w; + + int is = pmb->is, js = pmb->js, ks = pmb->ks; + int ie = pmb->ie, je = pmb->je, ke = pmb->ke; + + mean_.ZeroClear(); + + // calculate horizontal mean + for (int k = ks; k <= ke; ++k) + for (int j = js; j <= je; ++j) { + pcoord->CellVolume(k, j, is, ie, vol_); + + for (int i = is; i <= ie; ++i) { + mean_(i) += vol_(i) * w(IPR, k, j, i); + } + } + + // gatherAllData23_(total_vol_, mean_); +#ifdef MPI_PARALLEL + MPI_Allreduce(MPI_IN_PLACE, mean_.data(), mean_.GetSize(), MPI_ATHENA_REAL, + MPI_SUM, mpi_comm_); +#endif + + // pressure anomaly + for (int k = ks; k <= ke; ++k) + for (int j = js; j <= je; ++j) + for (int i = is; i <= ie; ++i) { + data(k, j, i) = w(IPR, k, j, i) - mean_(i) / total_vol_(i); + } +} diff --git a/src/diagnostics/radiative_flux.cpp b/src/diagnostics/radiative_flux.cpp new file mode 100644 index 00000000..9db5199e --- /dev/null +++ b/src/diagnostics/radiative_flux.cpp @@ -0,0 +1,88 @@ +// C/C++ headers +// MPI headers +#ifdef MPI_PARALLEL +#include +#endif + +// athena +#include + +// harp +#include + +// canoe +#include + +// diagnostics +#include "diagnostics.hpp" + +RadiativeFlux::RadiativeFlux(MeshBlock *pmb) + : Diagnostics(pmb, "rflx_up,rflx_dn"), ncycle_(0) { + type = "VECTORS"; + + // 0: upward flux + // 1: downward flux + data.NewAthenaArray(2, ncells1_ + 1); + + Regroup(pmb, X1DIR); + + // calculate total face area + total_area_.ZeroClear(); + for (int k = pmb->ks; k <= pmb->ke; ++k) + for (int j = pmb->js; j <= pmb->je; ++j) { + pmb->pcoord->Face1Area(k, j, pmb->is, pmb->ie + 1, x1area_); + + for (int i = pmb->is; i <= pmb->ie + 1; ++i) { + total_area_(i) += x1area_(i); + } + } + +#ifdef MPI_PARALLEL + MPI_Allreduce(MPI_IN_PLACE, total_area_.data(), total_area_.GetSize(), + MPI_ATHENA_REAL, MPI_SUM, mpi_comm_); +#endif +} + +void RadiativeFlux::Progress(MeshBlock *pmb) { + Coordinates *pcoord = pmb->pcoord; + auto prad = pmb->pimpl->prad; + + int is = pmb->is, js = pmb->js, ks = pmb->ks; + int ie = pmb->ie, je = pmb->je, ke = pmb->ke; + + if (ncycle_ == 0) { + std::fill(data.data(), data.data() + data.GetSize(), 0.); + } + + // sum over horizontal grids weighted by area + for (int b = 0; b < prad->GetNumBands(); ++b) { + auto pband = prad->GetBand(b); + for (int k = ks; k <= ke; ++k) + for (int j = js; j <= je; ++j) { + pcoord->Face1Area(k, j, is, ie + 1, x1area_); + for (int i = is; i <= ie + 1; ++i) { + data(0, i) += x1area_(i) * pband->bflxup(k, j, i); + data(1, i) += x1area_(i) * pband->bflxdn(k, j, i); + } + } + } + ncycle_++; +} + +void RadiativeFlux::Finalize(MeshBlock *pmb) { + // take time and spatial average + if (ncycle_ > 0) { + // sum over all ranks +#ifdef MPI_PARALLEL + MPI_Allreduce(MPI_IN_PLACE, data.data(), data.GetSize(), MPI_ATHENA_REAL, + MPI_SUM, mpi_comm_); +#endif + for (int i = pmb->is; i <= pmb->ie + 1; ++i) { + data(0, i) /= ncycle_ * total_area_(i); + data(1, i) /= ncycle_ * total_area_(i); + } + } + + // clear cycle; + ncycle_ = 0; +} diff --git a/src/diagnostics/todo/temperature_anomaly.cpp b/src/diagnostics/temperature_anomaly.cpp similarity index 80% rename from src/diagnostics/todo/temperature_anomaly.cpp rename to src/diagnostics/temperature_anomaly.cpp index 31cb742b..4d5e3c81 100644 --- a/src/diagnostics/todo/temperature_anomaly.cpp +++ b/src/diagnostics/temperature_anomaly.cpp @@ -1,20 +1,23 @@ // athena #include -#include +#include + +// canoe +#include // snap #include -// canoe +// diagnostics #include "diagnostics.hpp" TemperatureAnomaly::TemperatureAnomaly(MeshBlock *pmb) - : Diagnostics(pmb, "tempa", "Temperature anomaly") { + : Diagnostics(pmb, "tempa") { type = "SCALARS"; mean_.NewAthenaArray(ncells1_); data.NewAthenaArray(ncells3_, ncells2_, ncells1_); - Regroup(X1DIR); + Regroup(pmb, X1DIR); // calculate total volume total_vol_.ZeroClear(); @@ -33,9 +36,9 @@ TemperatureAnomaly::TemperatureAnomaly(MeshBlock *pmb) #endif } -void TemperatureAnomaly::Finalize(AthenaArray const &w) { - MeshBlock *pmb = pmy_block_; +void TemperatureAnomaly::Finalize(MeshBlock *pmb) { Coordinates *pcoord = pmb->pcoord; + auto const &w = pmb->phydro->w; auto pthermo = Thermodynamics::GetInstance(); @@ -50,7 +53,7 @@ void TemperatureAnomaly::Finalize(AthenaArray const &w) { pcoord->CellVolume(k, j, is, ie, vol_); for (int i = is; i <= ie; ++i) - mean_(i) += vol_(i) * pthermo->GetTemp(w.at(k, j, i)); + mean_(i) += vol_(i) * pthermo->GetTemp(pmb, k, j, i); } #ifdef MPI_PARALLEL @@ -63,6 +66,6 @@ void TemperatureAnomaly::Finalize(AthenaArray const &w) { for (int j = js; j <= je; ++j) for (int i = is; i <= ie; ++i) { data(k, j, i) = - pthermo->GetTemp(w.at(k, j, i)) - mean_(i) / total_vol_(i); + pthermo->GetTemp(pmb, k, j, i) - mean_(i) / total_vol_(i); } } diff --git a/src/diagnostics/todo/pressure_anomaly.cpp b/src/diagnostics/todo/pressure_anomaly.cpp deleted file mode 100644 index 79ed2e64..00000000 --- a/src/diagnostics/todo/pressure_anomaly.cpp +++ /dev/null @@ -1,37 +0,0 @@ -// athena -#include - -// canoe -#include "diagnostics.hpp" - -PressureAnomaly::PressureAnomaly(MeshBlock *pmb) - : Diagnostics(pmb, "presa", "Pressure anomaly") { - type = "SCALARS"; - units = "pa"; - mean_.NewAthenaArray(ncells1_); - data.NewAthenaArray(ncells3_, ncells2_, ncells1_); -} - -void PressureAnomaly::Finalize(AthenaArray const &w) { - MeshBlock *pmb = pmy_block_; - Coordinates *pcoord = pmb->pcoord; - int is = pmb->is, js = pmb->js, ks = pmb->ks; - int ie = pmb->ie, je = pmb->je, ke = pmb->ke; - - mean_.ZeroClear(); - - // calculate horizontal mean - for (int k = ks; k <= ke; ++k) - for (int j = js; j <= je; ++j) { - pcoord->CellVolume(k, j, is, ie, vol_); - for (int i = is; i <= ie; ++i) mean_(i) += vol_(i) * w(IPR, k, j, i); - } - - gatherAllData23_(total_vol_, mean_); - - // pressure anomaly - for (int k = ks; k <= ke; ++k) - for (int j = js; j <= je; ++j) - for (int i = is; i <= ie; ++i) - data(k, j, i) = w(IPR, k, j, i) - mean_(i) / total_vol_(i); -} diff --git a/src/diagnostics/todo/radiative_flux.cpp b/src/diagnostics/todo/radiative_flux.cpp deleted file mode 100644 index c4be9414..00000000 --- a/src/diagnostics/todo/radiative_flux.cpp +++ /dev/null @@ -1,86 +0,0 @@ -// C/C++ headers -// MPI headers -#ifdef MPI_PARALLEL -#include -#endif - -// athena -#include - -// harp -#include - -// canoe -#include "diagnostics.hpp" - -RadiativeFlux::RadiativeFlux(MeshBlock *pmb) - : Diagnostics(pmb, "radflux", - "total upward radiative flux,total downward radiative flux") { - type = "VECTORS"; - grid = "--F"; - units = "w/m^2"; - // 0: upward flux - // 1: downward flux - data.NewAthenaArray(2, 1, 1, ncells1_ + 1); -} - -void RadiativeFlux::Progress(AthenaArray const &w) { - MeshBlock *pmb = pmy_block_; - - int is = pmb->is, js = pmb->js, ks = pmb->ks; - int ie = pmb->ie, je = pmb->je, ke = pmb->ke; - - if (ncycle == 0) std::fill(data.data(), data.data() + data.GetSize(), 0.); - - // sum over horizontal grids weighted by area - RadiationBand *pband = pmb->prad->pband; - while (pband != NULL) { - for (int k = ks; k <= ke; ++k) - for (int j = js; j <= je; ++j) { - pmb->pcoord->Face1Area(k, j, is, ie + 1, x1area_); - for (int i = is; i <= ie + 1; ++i) { - data(0, i) += x1area_(i) * pband->bflxup(k, j, i); - data(1, i) += x1area_(i) * pband->bflxdn(k, j, i); - } - } - pband = pband->next; - } - - ncycle++; -} - -void RadiativeFlux::Finalize(AthenaArray const &w) { - MeshBlock *pmb = pmy_block_; - - int is = pmb->is, js = pmb->js, ks = pmb->ks; - int ie = pmb->ie, je = pmb->je, ke = pmb->ke; - - Real *total_area = new Real[ncells1_ + 1]; - std::fill(total_area, total_area + ncells1_ + 1, 0.); - - // calculate total face area - for (int k = ks; k <= ke; ++k) - for (int j = js; j <= je; ++j) { - pmb->pcoord->Face1Area(k, j, is, ie + 1, x1area_); - for (int i = is; i <= ie + 1; ++i) total_area[i] += x1area_(i); - } - - // take time and spatial average - if (ncycle > 0) { - // sum over all ranks -#ifdef MPI_PARALLEL - MPI_Allreduce(MPI_IN_PLACE, data.data(), data.GetSize(), MPI_ATHENA_REAL, - MPI_SUM, MPI_COMM_WORLD); - MPI_Allreduce(MPI_IN_PLACE, total_area, ncells1_ + 1, MPI_ATHENA_REAL, - MPI_SUM, MPI_COMM_WORLD); -#endif - for (int i = is; i <= ie + 1; ++i) { - data(0, i) /= ncycle * total_area[i]; - data(1, i) /= ncycle * total_area[i]; - } - } - - // clear cycle; - delete[] total_area; - ncycle = 0; -} diff --git a/src/diagnostics/todo/total_flux.cpp b/src/diagnostics/todo/total_flux.cpp deleted file mode 100644 index 53ee221b..00000000 --- a/src/diagnostics/todo/total_flux.cpp +++ /dev/null @@ -1,77 +0,0 @@ -// athena -#include -#include - -// snap -#include - -// canoe -#include "diagnostics.hpp" - -HydroFlux::HydroFlux(MeshBlock *pmb) : Diagnostics(pmb, "hydroflux") { - type = "VECTORS"; - grid = "--C"; - varname = "v1rho,"; - long_name = "horizontally averaged mass flux,"; - units = "kg/(m^2.s),"; - - for (int n = 1; n <= NVAPOR; ++n) { - varname += "v1q" + std::to_string(n) + ","; - units += "kg/(kg.m^2.s),"; - long_name += "horizontally averaged vapor mass flux,"; - } - for (int n = 0; n < 3; ++n) { - units += "m/s^2,"; - varname += "v1v" + std::to_string(n + 1) + ","; - long_name += "horizontally averaged momentum flux,"; - } - units += "m.K/s"; - varname += "v1T"; - long_name += "horizontally averaged heat flux"; - - SetLongName(long_name); - - data.NewAthenaArray(NHYDRO, 1, 1, ncells1_); -} - -void HydroFlux::Progress(AthenaArray const &w) { - MeshBlock *pmb = pmy_block_; - Coordinates *pcoord = pmb->pcoord; - Thermodynamics *pthermo = pmb->pthermo; - - int is = pmb->is, js = pmb->js, ks = pmb->ks; - int ie = pmb->ie, je = pmb->je, ke = pmb->ke; - - if (ncycle == 0) std::fill(data.data(), data.data() + data.GetSize(), 0.); - - // sum over horizontal grids weighted by volume - for (int k = ks; k <= ke; ++k) - for (int j = js; j <= je; ++j) { - pcoord->CellVolume(k, j, is, ie, vol_); - for (int i = is; i <= ie; ++i) { - data(0, i) += vol_(i) * w(IDN, k, j, i) * w(IVX, k, j, i); - for (int n = 1; n < IPR; ++n) - data(n, i) += vol_(i) * w(n, k, j, i) * w(IVX, k, j, i); - data(IPR, i) += - vol_(i) * pthermo->GetTemp(w.at(k, j, i)) * w(IVX, k, j, i); - } - } - - ncycle++; -} - -void HydroFlux::Finalize(AthenaArray const &w) { - MeshBlock *pmb = pmy_block_; - - gatherAllData23_(total_vol_, data); - - // take time and spatial average - if (ncycle > 0) { - for (int n = 0; n < NHYDRO; ++n) - for (int i = pmb->is; i <= pmb->ie; ++i) - data(n, i) /= ncycle * total_vol_(i); - } - - // clear cycle; - ncycle = 0; -} diff --git a/src/exchanger/exchanger.cpp b/src/exchanger/exchanger.cpp index 529c961a..22f8efe8 100644 --- a/src/exchanger/exchanger.cpp +++ b/src/exchanger/exchanger.cpp @@ -1,9 +1,118 @@ // athena #include +#include #include +// canoe +#include "exchanger.hpp" + +ExchangerBase::ExchangerBase() { +#ifdef MPI_PARALLEL + mpi_comm_ = MPI_COMM_WORLD; + color_.resize(Globals::nranks); + brank_.resize(Globals::nranks); + + for (int i = 0; i < Globals::nranks; ++i) { + color_[i] = -1; + brank_[i] = -1; + } +#else + color_.resize(1); + brank_.resize(1); + color_[0] = 0; + brank_[0] = -1; +#endif // MPI_PARALLEL +} + +ExchangerBase::~ExchangerBase() { +#ifdef MPI_PARALLEL + if (mpi_comm_ != MPI_COMM_WORLD) { + MPI_Comm_free(&mpi_comm_); + } +#endif // MPI_PARALLEL +} + +void ExchangerBase::setColor(MeshBlock const *pmb, CoordinateDirection dir) { +#ifdef MPI_PARALLEL + NeighborBlock bblock, tblock; + ExchangerHelper::find_neighbors(pmb, dir, &bblock, &tblock); + + if (dir == X1DIR) { + if (pmb->block_size.x1min <= pmb->pmy_mesh->mesh_size.x1min) { + bblock.snb.gid = -1; + bblock.snb.rank = -1; + } + if (pmb->block_size.x1max >= pmb->pmy_mesh->mesh_size.x1max) { + tblock.snb.gid = -1; + tblock.snb.rank = -1; + } + } else if (dir == X2DIR) { + if (pmb->block_size.x2min <= pmb->pmy_mesh->mesh_size.x2min) { + bblock.snb.gid = -1; + bblock.snb.rank = -1; + } + if (pmb->block_size.x2max >= pmb->pmy_mesh->mesh_size.x2max) { + tblock.snb.gid = -1; + tblock.snb.rank = -1; + } + } else { // X3DIR + if (pmb->block_size.x3min <= pmb->pmy_mesh->mesh_size.x3min) { + bblock.snb.gid = -1; + bblock.snb.rank = -1; + } + if (pmb->block_size.x3max >= pmb->pmy_mesh->mesh_size.x3max) { + tblock.snb.gid = -1; + tblock.snb.rank = -1; + } + } + + MPI_Allgather(&bblock.snb.rank, 1, MPI_INT, brank_.data(), 1, MPI_INT, + MPI_COMM_WORLD); +#else + brank_[0] = -1; +#endif + + int c = 0; + for (int i = 0; i < Globals::nranks; ++i) { + // color[i] = brank_[i] == -1 ? color[i] : color[brank_[i]]; + if (brank_[i] == -1) { + if (color_[i] == -1) color_[i] = c++; + } else { + color_[i] = color_[brank_[i]]; + } + } + +#ifdef MPI_PARALLEL + if (mpi_comm_ != MPI_COMM_WORLD) { + MPI_Comm_free(&mpi_comm_); + } + + MPI_Comm_split(MPI_COMM_WORLD, color_[Globals::my_rank], Globals::my_rank, + &mpi_comm_); +#endif +} + +int ExchangerBase::GetRankInGroup() const { + int r = 0; + int b = brank_[Globals::my_rank]; + while (b != -1) { + r++; + b = brank_[b]; + } + return r; +} + namespace ExchangerHelper { +int mpi_tag_ub; + +int create_mpi_tag(int lid, int tid, std::string name) { + int tag = BoundaryBase::CreateBvalsMPITag(lid, tid, 0); + + std::string str = name + std::to_string(tag); + return std::hash{}(str) % (mpi_tag_ub); +} + NeighborBlock const *find_bot_neighbor(MeshBlock const *pmb) { NeighborBlock *pbot = nullptr; diff --git a/src/exchanger/exchanger.hpp b/src/exchanger/exchanger.hpp index c66b8c70..6898d86b 100644 --- a/src/exchanger/exchanger.hpp +++ b/src/exchanger/exchanger.hpp @@ -2,26 +2,24 @@ #define SRC_EXCHANGER_EXCHANGER_HPP_ // Athena++ +#include #include // canoe #include #include -// exchanger -#include "message_traits.hpp" +#ifdef MPI_PARALLEL +#include +#endif // MPI_PARALLEL class NeighborBlock; class MeshBlock; class ExchangerBase { public: // constructor and destructor - ExchangerBase() { -#ifdef MPI_PARALLEL - mpi_comm_ = MPI_COMM_WORLD; -#endif - } - virtual ~ExchangerBase() {} + ExchangerBase(); + virtual ~ExchangerBase(); //! \brief Pack data to send buffer virtual void PackData(MeshBlock const *pmb) {} @@ -30,52 +28,60 @@ class ExchangerBase { virtual bool UnpackData(MeshBlock const *pmb) { return true; } //! \brief Send and receive data - virtual void Transfer(MeshBlock const *pmb, int n = -1) = 0; + virtual void Transfer(MeshBlock const *pmb, int n = -1) {} + + virtual void Regroup(MeshBlock const *pmb, CoordinateDirection dir) {} + + int GetRankInGroup() const; protected: + void setColor(MeshBlock const *pmb, CoordinateDirection dir); + #ifdef MPI_PARALLEL MPI_Comm mpi_comm_; #endif + + //! \brief MPI color of each block + std::vector color_; + + //! \brief MPI rank of the bottom of each block + std::vector brank_; }; -template +template class Exchanger : public ExchangerBase { public: // constructor and destructor - using DataType = typename MessageTraits::DataType; - using BufferType = std::vector; + using BufferType = std::vector; + #ifdef MPI_PARALLEL using ExchangerBase::mpi_comm_; #endif Exchanger() { - for (int n = 0; n < MessageTraits::num_buffers; ++n) { #ifdef MPI_PARALLEL + for (int n = 0; n < N; ++n) { req_mpi_send_[n] = MPI_REQUEST_NULL; req_mpi_recv_[n] = MPI_REQUEST_NULL; -#endif // MPI_PARALLEL } +#endif // MPI_PARALLEL } virtual ~Exchanger() { #ifdef MPI_PARALLEL - for (int n = 0; n < MessageTraits::num_buffers; ++n) { + for (int n = 0; n < N; ++n) { req_mpi_send_[n] = MPI_REQUEST_NULL; req_mpi_recv_[n] = MPI_REQUEST_NULL; } - - if (mpi_comm_ != MPI_COMM_WORLD) { - MPI_Comm_free(&mpi_comm_); - } #endif // MPI_PARALLEL } //! \brief Clear buffer virtual void ClearBuffer(MeshBlock const *pmb) { - for (int n = 0; n < MessageTraits::num_buffers; ++n) { #ifdef MPI_PARALLEL + for (int n = 0; n < N; ++n) { MPI_Wait(&req_mpi_send_[n], MPI_STATUS_IGNORE); -#endif // MPI_PARALLEL } +#endif // MPI_PARALLEL } //! \brief Set the boundary status @@ -84,18 +90,21 @@ class Exchanger : public ExchangerBase { } protected: - enum BoundaryStatus status_flag_[MessageTraits::num_buffers]; - BufferType send_buffer_[MessageTraits::num_buffers]; - BufferType recv_buffer_[MessageTraits::num_buffers]; + std::array status_flag_; + std::array send_buffer_; + std::array recv_buffer_; #ifdef MPI_PARALLEL - MPI_Request req_mpi_send_[MessageTraits::num_buffers]; - MPI_Request req_mpi_recv_[MessageTraits::num_buffers]; + std::array req_mpi_send_; + std::array req_mpi_recv_; #endif // MPI_PARALLEL }; namespace ExchangerHelper { +extern int mpi_tag_ub; +int create_mpi_tag(int lid, int tid, std::string name); + //! find bottom neighbor block NeighborBlock const *find_bot_neighbor(MeshBlock const *pmb); @@ -117,55 +126,51 @@ NeighborBlock const *find_front_neighbor(MeshBlock const *pmb); //! find neighbors in one coordinate direction void find_neighbors(MeshBlock const *pmb, CoordinateDirection dir, NeighborBlock *bblock, NeighborBlock *tblock); + } // namespace ExchangerHelper -template -class NeighborExchanger : public Exchanger { - public: - using Base = Exchanger; - using Base::recv_buffer_; - using Base::send_buffer_; +template +class LinearExchanger : public Exchanger { + public: // constructor and destructor + using Base = Exchanger; + #ifdef MPI_PARALLEL using Base::mpi_comm_; - using Base::req_mpi_recv_; - using Base::req_mpi_send_; #endif // MPI_PARALLEL - NeighborExchanger() {} + LinearExchanger() {} - //! \brief Send and receive data - void Transfer(MeshBlock const *pmb, int n = -1) override; - - //! \brief Clear buffer - void ClearBuffer(MeshBlock const *pmb) override; + void Regroup(MeshBlock const *pmb, CoordinateDirection dir) override { + std::fill(Base::color_.begin(), Base::color_.end(), -1); + Base::setColor(pmb, dir); + } }; -template -class LinearExchanger : public Exchanger { +template +class PlanarExchanger : public Exchanger { public: // constructor and destructor - using Base = Exchanger; + using Base = Exchanger; + #ifdef MPI_PARALLEL using Base::mpi_comm_; #endif // MPI_PARALLEL - LinearExchanger(); + PlanarExchanger() {} - public: // member functions - int GetRankInGroup() const; - void Regroup(MeshBlock const *pmb, CoordinateDirection dir); - - private: - //! \brief MPI color of each block - std::vector color_; - - //! \brief MPI rank of the bottom of each block - std::vector brank_; -}; + void Regroup(MeshBlock const *pmb, CoordinateDirection dir) override { + std::fill(Base::color_.begin(), Base::color_.end(), -1); -template -class PlanarExchanger : public Exchanger { - public: - PlanarExchanger(); + if (dir == X1DIR) { + Base::setColor(pmb, X2DIR); + Base::setColor(pmb, X3DIR); + } else if (dir == X2DIR) { + Base::setColor(pmb, X1DIR); + Base::setColor(pmb, X3DIR); + } else { + Base::setColor(pmb, X1DIR); + Base::setColor(pmb, X2DIR); + } + } }; #endif // SRC_EXCHANGER_EXCHANGER_HPP_ diff --git a/src/exchanger/message_traits.cpp b/src/exchanger/message_traits.cpp deleted file mode 100644 index 4ab050cf..00000000 --- a/src/exchanger/message_traits.cpp +++ /dev/null @@ -1,33 +0,0 @@ -// C/C++ -#include -#include - -// athena++ -#include -#include - -// nbody -#include - -// harp -#include - -#ifdef MPI_PARALLEL -#include - -MPI_Datatype MPI_PARTICLE_DATA; -MPI_Datatype MessageTraits::mpi_type = MPI_PARTICLE_DATA; -MPI_Datatype MessageTraits::mpi_type = MPI_ATHENA_REAL; - -#endif // MPI_PARALLEL - -namespace MessageHelper { -int mpi_tag_ub; - -int create_mpi_tag(int lid, int tid, std::string name) { - int tag = BoundaryBase::CreateBvalsMPITag(lid, tid, 0); - - std::string str = name + std::to_string(tag); - return std::hash{}(str) % (mpi_tag_ub); -} -} // namespace MessageHelper diff --git a/src/exchanger/message_traits.hpp b/src/exchanger/message_traits.hpp deleted file mode 100644 index 757d768c..00000000 --- a/src/exchanger/message_traits.hpp +++ /dev/null @@ -1,64 +0,0 @@ -#ifndef SRC_EXCHANGER_MESSAGE_TRAITS_HPP_ -#define SRC_EXCHANGER_MESSAGE_TRAITS_HPP_ - -// C/C++ -#include - -// canoe -#include - -#ifdef MPI_PARALLEL -#include -#endif - -class RadiationBand; -class ParticleData; -class ParticleBase; - -//! \brief Traits class providing Message information for class T -template -struct MessageTraits { - using DataType = double; - - constexpr static int num_buffers = 1; - constexpr static const char* name = ""; - -#ifdef MPI_PARALLEL - static MPI_Datatype mpi_type; -#endif -}; - -// Specialization for RadiationBand Exchanger -template <> -struct MessageTraits { - using DataType = Real; - - constexpr static int num_buffers = 2; - constexpr static const char* name = "RadiationBand"; - -#ifdef MPI_PARALLEL - static MPI_Datatype mpi_type; -#endif // MPI_PARALLEL -}; - -// Specialization for Particle Exchanger -template <> -struct MessageTraits { - using DataType = ParticleData; - - constexpr static int num_buffers = 56; - constexpr static const char* name = "ParticleBase"; - -#ifdef MPI_PARALLEL - static MPI_Datatype mpi_type; -#endif // MPI_PARALLEL -}; - -namespace MessageHelper { - -extern int mpi_tag_ub; -int create_mpi_tag(int lid, int tid, std::string name); - -} // namespace MessageHelper - -#endif // SRC_COMMUNICATOR_COMMUNICATOR_HPP_ diff --git a/src/exchanger/neighbor_exchanger.hpp b/src/exchanger/neighbor_exchanger.hpp deleted file mode 100644 index 7a542ba0..00000000 --- a/src/exchanger/neighbor_exchanger.hpp +++ /dev/null @@ -1,70 +0,0 @@ -#ifndef SRC_EXCHANGER_NEIGHBOR_EXCHANGER_HPP_ -#define SRC_EXCHANGER_NEIGHBOR_EXCHANGER_HPP_ - -// athena -#include -#include - -// canoe -#include - -// exchanger -#include "exchanger.hpp" - -#ifdef MPI_PARALLEL -#include -#endif - -template -void NeighborExchanger::Transfer(MeshBlock const *pmb, int n) { - for (auto &nb : pmb->pbval->neighbor) { - if (nb.snb.rank == Globals::my_rank) { // on the same process - MeshBlock *neighbor = pmb->pmy_mesh->FindMeshBlock(nb.snb.gid); - auto exchanger = static_cast *>( - neighbor->pimpl->GetExchanger(MessageTraits::name)); - exchanger->recv_buffer_[nb.targetid].swap(send_buffer_[nb.bufid]); - exchanger->SetBoundaryStatus(nb.targetid, BoundaryStatus::arrived); - } -#ifdef MPI_PARALLEL - else { // MPI - int tag = MessageHelper::create_mpi_tag(nb.snb.lid, nb.targetid, - MessageTraits::name); - int ssize = send_buffer_[nb.bufid].size(); - MPI_Isend(send_buffer_[nb.bufid].data(), ssize, - MessageTraits::mpi_type, nb.snb.rank, tag, mpi_comm_, - &req_mpi_send_[nb.bufid]); - } -#endif - } - - int rsize, tag; - -#ifdef MPI_PARALLEL - MPI_Status status; - for (auto &nb : pmb->pbval->neighbor) { - if (nb.snb.rank == Globals::my_rank) continue; // local boundary received - - int tag = MessageHelper::create_mpi_tag(pmb->lid, nb.bufid, - MessageTraits::name); - - MPI_Probe(nb.snb.rank, tag, MPI_COMM_WORLD, &status); - MPI_Get_count(&status, MessageTraits::mpi_type, &rsize); - - recv_buffer_[nb.bufid].resize(rsize); - MPI_Irecv(recv_buffer_[nb.bufid].data(), rsize, MessageTraits::mpi_type, - nb.snb.rank, tag, mpi_comm_, &req_mpi_recv_[nb.bufid]); - } -#endif -} - -template -void NeighborExchanger::ClearBuffer(MeshBlock const *pmb) { - for (auto &nb : pmb->pbval->neighbor) { -#ifdef MPI_PARALLEL - if (nb.snb.rank != Globals::my_rank) - MPI_Wait(&req_mpi_send_[nb.bufid], MPI_STATUS_IGNORE); -#endif - } -} - -#endif // SRC_EXCHANGER_NEIGHBOR_EXCHANGER_HPP_ diff --git a/src/harp/radiation_band.hpp b/src/harp/radiation_band.hpp index 2f3ac173..70fe5dd2 100644 --- a/src/harp/radiation_band.hpp +++ b/src/harp/radiation_band.hpp @@ -18,7 +18,7 @@ #include // exchanger -#include +#include // harp #include "spectral_grid.hpp" @@ -33,7 +33,7 @@ class RadiationBand : public NamedGroup, public ParameterGroup, public ASCIIOutputGroup, public StringReprGroup, - public LinearExchanger { + public LinearExchanger { public: // public access data // implementation of RT Solver class RTSolver; diff --git a/src/harp/radiation_band_exchanger.cpp b/src/harp/radiation_band_exchanger.cpp index 6ce03c82..3862830a 100644 --- a/src/harp/radiation_band_exchanger.cpp +++ b/src/harp/radiation_band_exchanger.cpp @@ -5,7 +5,7 @@ #include // exchanger -#include +#include // harp #include "radiation_band.hpp" @@ -134,9 +134,8 @@ void RadiationBand::Transfer(MeshBlock const *pmb, int n) { if (nblocks > 1) { #ifdef MPI_PARALLEL - MPI_Allgather(&send_buffer_[n], size, - MessageTraits::mpi_type, &recv_buffer_[n], - size, MessageTraits::mpi_type, mpi_comm_); + MPI_Allgather(&send_buffer_[n], size, MPI_ATHENA_REAL, &recv_buffer_[n], + size, MPI_ATHENA_REAL, mpi_comm_); #endif // MPI_PARALLEL } else { recv_buffer_[n].swap(send_buffer_[n]); diff --git a/src/mesh_destroy.cpp b/src/mesh_destroy.cpp index aa132b5d..e86fdad2 100644 --- a/src/mesh_destroy.cpp +++ b/src/mesh_destroy.cpp @@ -72,5 +72,5 @@ void mesh_destroy(ParameterInput *&pinput, Mesh *&pmesh, int mbcnt) { Thermodynamics::Destroy(); IndexMap::Destroy(); MetadataTable::Destroy(); - ParticlesHelper::free_mpi_particle_data(); + ParticleHelper::free_mpi_particle_data(); } diff --git a/src/mesh_setup.cpp b/src/mesh_setup.cpp index ccacf21e..e72e14a7 100644 --- a/src/mesh_setup.cpp +++ b/src/mesh_setup.cpp @@ -65,7 +65,7 @@ void mesh_setup(ParameterInput*& pinput, Mesh*& pmesh) { Thermodynamics::InitFromAthenaInput(pinput); // n-body - ParticlesHelper::commit_mpi_particle_data(); + ParticleHelper::commit_mpi_particle_data(); try { if (cli->res_flag == 0) { diff --git a/src/nbody/particle_data.cpp b/src/nbody/particle_data.cpp index 0b38cb22..7bf99706 100644 --- a/src/nbody/particle_data.cpp +++ b/src/nbody/particle_data.cpp @@ -10,9 +10,6 @@ // canoe #include -// exchanger -#include - // nbody #include "particle_data.hpp" #include "particles.hpp" @@ -35,7 +32,7 @@ std::ostream& operator<<(std::ostream& os, ParticleData const& pt) { return os; } -namespace ParticlesHelper { +namespace ParticleHelper { bool check_in_meshblock(ParticleData const& pd, MeshBlock const* pmb) { auto pm = pmb->pmy_mesh; @@ -53,6 +50,8 @@ bool check_in_meshblock(ParticleData const& pd, MeshBlock const* pmb) { #ifdef MPI_PARALLEL #include +MPI_Datatype MPI_PARTICLE_DATA; + void commit_mpi_particle_data() { int counts[3] = {1, 2 + NINT_PARTICLE_DATA, 8 + NREAL_PARTICLE_DATA}; MPI_Datatype types[3] = {MPI_AINT, MPI_INT, MPI_ATHENA_REAL}; @@ -60,14 +59,11 @@ void commit_mpi_particle_data() { offsetof(ParticleData, pid), offsetof(ParticleData, time)}; - MPI_Type_create_struct(3, counts, disps, types, - &MessageTraits::mpi_type); - MPI_Type_commit(&MessageTraits::mpi_type); + MPI_Type_create_struct(3, counts, disps, types, &MPI_PARTICLE_DATA); + MPI_Type_commit(&MPI_PARTICLE_DATA); } -void free_mpi_particle_data() { - MPI_Type_free(&MessageTraits::mpi_type); -} +void free_mpi_particle_data() { MPI_Type_free(&MPI_PARTICLE_DATA); } #else // NOT_MPI_PARALLEL @@ -76,4 +72,4 @@ void free_mpi_particle_data() {} #endif // MPI_PARALLEL -} // namespace ParticlesHelper +} // namespace ParticleHelper diff --git a/src/nbody/particle_data.hpp b/src/nbody/particle_data.hpp index fd052dea..1bf0e059 100644 --- a/src/nbody/particle_data.hpp +++ b/src/nbody/particle_data.hpp @@ -47,12 +47,16 @@ struct ParticleData { std::ostream& operator<<(std::ostream& os, ParticleData const& mp); // helper functions -namespace ParticlesHelper { +namespace ParticleHelper { bool check_in_meshblock(ParticleData const& pd, MeshBlock const* pmb); void commit_mpi_particle_data(); void free_mpi_particle_data(); -} // namespace ParticlesHelper +#ifdef MPI_PARALLEL +extern MPI_Datatype MPI_PARTICLE_DATA; +#endif + +} // namespace ParticleHelper #endif // SRC_NBODY_PARTICLE_DATA_HPP_ diff --git a/src/nbody/particle_exchanger.cpp b/src/nbody/particle_exchanger.cpp index dfa0ec8b..b21e37cc 100644 --- a/src/nbody/particle_exchanger.cpp +++ b/src/nbody/particle_exchanger.cpp @@ -13,10 +13,17 @@ #include #include +// canoe +#include + // n-body #include "particle_data.hpp" #include "particles.hpp" +#ifdef MPI_PARALLEL +#include +#endif + bool ParticleBase::UnpackData(MeshBlock const *pmb) { bool success = true; int test; @@ -68,7 +75,7 @@ bool ParticleBase::UnpackData(MeshBlock const *pmb) { it->x3 += pm->mesh_size.x3max - pm->mesh_size.x3min; } - bool in_meshblock = ParticlesHelper::check_in_meshblock(*it, pmb); + bool in_meshblock = ParticleHelper::check_in_meshblock(*it, pmb); if (!in_meshblock) { throw RuntimeError("ParticleBase::AttachParticles", "Particle moved out of MeshBlock limits"); @@ -175,3 +182,53 @@ void ParticleBase::PackData(MeshBlock const *pmb) { // particles beyond qi are inactive particles. Remove them from the list pc.resize(qi - pc.begin()); } + +void ParticleBase::Transfer(MeshBlock const *pmb, int n) { + for (auto &nb : pmb->pbval->neighbor) { + if (nb.snb.rank == Globals::my_rank) { // on the same process + MeshBlock *neighbor = pmb->pmy_mesh->FindMeshBlock(nb.snb.gid); + auto exchanger = static_cast( + neighbor->pimpl->GetExchanger("Particle")); + exchanger->recv_buffer_[nb.targetid].swap(send_buffer_[nb.bufid]); + exchanger->SetBoundaryStatus(nb.targetid, BoundaryStatus::arrived); + } +#ifdef MPI_PARALLEL + else { // MPI + int tag = + ExchangerHelper::create_mpi_tag(nb.snb.lid, nb.targetid, "Particle"); + int ssize = send_buffer_[nb.bufid].size(); + MPI_Isend(send_buffer_[nb.bufid].data(), ssize, + ParticleHelper::MPI_PARTICLE_DATA, nb.snb.rank, tag, mpi_comm_, + &req_mpi_send_[nb.bufid]); + } +#endif + } + + int rsize, tag; + +#ifdef MPI_PARALLEL + MPI_Status status; + for (auto &nb : pmb->pbval->neighbor) { + if (nb.snb.rank == Globals::my_rank) continue; // local boundary received + + int tag = ExchangerHelper::create_mpi_tag(pmb->lid, nb.bufid, "Particle"); + + MPI_Probe(nb.snb.rank, tag, MPI_COMM_WORLD, &status); + MPI_Get_count(&status, ParticleHelper::MPI_PARTICLE_DATA, &rsize); + + recv_buffer_[nb.bufid].resize(rsize); + MPI_Irecv(recv_buffer_[nb.bufid].data(), rsize, + ParticleHelper::MPI_PARTICLE_DATA, nb.snb.rank, tag, mpi_comm_, + &req_mpi_recv_[nb.bufid]); + } +#endif +} + +void ParticleBase::ClearBuffer(MeshBlock const *pmb) { + for (auto &nb : pmb->pbval->neighbor) { +#ifdef MPI_PARALLEL + if (nb.snb.rank != Globals::my_rank) + MPI_Wait(&req_mpi_send_[nb.bufid], MPI_STATUS_IGNORE); +#endif + } +} diff --git a/src/nbody/particles.hpp b/src/nbody/particles.hpp index 00062c21..efecf18f 100644 --- a/src/nbody/particles.hpp +++ b/src/nbody/particles.hpp @@ -13,7 +13,7 @@ #include // exchanger -#include +#include // integrator #include @@ -32,7 +32,7 @@ class ParticleBase : public NamedGroup, // BinaryOutputGroup, public MeshOutputGroup, public MultiStageIntegrator, - public NeighborExchanger { + public Exchanger { public: /// public data //! particle data container. pc1 is reserved for multi-stage integration @@ -65,9 +65,11 @@ class ParticleBase : public NamedGroup, void TimeIntegrate(Real time, Real dt) override; void WeightedAverage(Real ave_wghts[]) override; - public: // NeighborExchanger functions + public: // Exchanger functions void PackData(MeshBlock const *pmb) override; bool UnpackData(MeshBlock const *pmb) override; + void Transfer(MeshBlock const *pmb, int n = -1) override; + void ClearBuffer(MeshBlock const *pmb) override; protected: /// protected data diff --git a/src/outputs/load_user_output_data.cpp b/src/outputs/load_user_output_data.cpp index 8513c44c..d5699ee4 100644 --- a/src/outputs/load_user_output_data.cpp +++ b/src/outputs/load_user_output_data.cpp @@ -27,7 +27,7 @@ void OutputType::loadUserOutputData(MeshBlock *pmb) { // diagnostic if (output_params.variable.compare("diag") == 0) { for (auto &diag : all_diags) { - diag->Finalize(phyd->w); + diag->Finalize(pmb); pod = new OutputData; pod->type = diag->type; pod->name = diag->GetName(); diff --git a/src/outputs/netcdf.cpp b/src/outputs/netcdf.cpp index 8c98c63f..78a4645d 100644 --- a/src/outputs/netcdf.cpp +++ b/src/outputs/netcdf.cpp @@ -232,8 +232,10 @@ void NetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { // count total variables (vector variables are expanded into flat scalars) int total_vars = 0; while (pdata != nullptr) { - std::string grid = pmeta->GetGridType(pdata->name); + auto names = Vectorize(pdata->name.c_str(), ","); + std::string grid = pmeta->GetGridType(names[0]); int nvar = get_num_variables(grid, pdata->data); + total_vars += nvar; pdata = pdata->pnext; } @@ -249,7 +251,6 @@ void NetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { pdata = pfirst_data_; while (pdata != nullptr) { auto names = Vectorize(pdata->name.c_str(), ","); - std::string grid = pmeta->GetGridType(names[0]); int nvar = get_num_variables(grid, pdata->data); @@ -384,7 +385,8 @@ void NetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { ivar = var_ids; pdata = pfirst_data_; while (pdata != nullptr) { - std::string grid = pmeta->GetGridType(pdata->name); + auto names = Vectorize(pdata->name.c_str(), ","); + std::string grid = pmeta->GetGridType(names[0]); int nvar = get_num_variables(grid, pdata->data); if (grid == "RCC") { // radiation rays diff --git a/src/outputs/output_utils.cpp b/src/outputs/output_utils.cpp index e6d698ef..5bb8be09 100644 --- a/src/outputs/output_utils.cpp +++ b/src/outputs/output_utils.cpp @@ -52,15 +52,26 @@ __attribute__((weak)) MetadataTable::MetadataTable() { {"div", "divergence", "1/s", "CCC"}, {"b", "buoyancy", "m/s^2", "CCC"}, {"rho_bar", "mean density", "kg/m^3", "CCC"}, - {"q1_bar", "mean vapor 1 mixing ratio", "kg/kg", "CCC"}, - {"q2_bar", "mean vapor 2 mixing ratio", "kg/kg", "CCC"}, - {"q3_bar", "mean vapor 3 mixing ratio", "kg/kg", "CCC"}, + {"q1_bar", "mean vapor1 mixing ratio", "kg/kg", "CCC"}, + {"q2_bar", "mean vapor2 mixing ratio", "kg/kg", "CCC"}, + {"q3_bar", "mean vapor3 mixing ratio", "kg/kg", "CCC"}, {"vel1_bar", "mean velocity 1", "m/s", "CCC"}, {"vel2_bar", "mean velocity 2", "m/s", "CCC"}, {"vel3_bar", "mean velocity 3", "m/s", "CCC"}, {"T_bar", "mean temperature", "K", "CCC"}, + {"div_h", "horizontal divergence", "1/s", "CCC"}, {"tempa", "horizontal temperature anomaly", "K", "CCC"}, {"presa", "horizontal pressure anomaly", "pa", "CCC"}, + {"rflx_up", "total upward radiative flux", "w/m^2", "--F"}, + {"rflx_dn", "total downward radiative flux", "w/m^2", "--F"}, + {"v1rho", "total upward mass flux", "kg/(m^2.s)", "--F"}, + {"v1q1", "total upward vapor1 flux", "1/(m^2.s)", "--C"}, + {"v1q2", "total upward vapor2 flux", "1/(m^2.s)", "--C"}, + {"v1q3", "total upward vapor3 flux", "1/(m^2.s)", "--C"}, + {"v1v1", "total upward velocity 1 flux", "(m/s)/(m^2.s)", "--C"}, + {"v1v2", "total upward velocity 2 flux", "(m/s)/(m^2.s)", "--C"}, + {"v1v3", "total upward velocity 3 flux", "(m/s)/(m^2.s)", "--C"}, + {"v1T", "total upward temperature flux", "K/(m^2.s)", "--C"}, }; } diff --git a/src/outputs/pnetcdf.cpp b/src/outputs/pnetcdf.cpp index 2d9549b5..1d3ec8c5 100644 --- a/src/outputs/pnetcdf.cpp +++ b/src/outputs/pnetcdf.cpp @@ -94,10 +94,10 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { int ifile; err = ncmpi_create(MPI_COMM_WORLD, fname.c_str(), NC_CLOBBER, MPI_INFO_NULL, &ifile); - ERR + ERR; - // 2. coordinate structure - size_t nx1 = pm->mesh_size.nx1; + // 2. coordinate structure + size_t nx1 = pm->mesh_size.nx1; size_t nx2 = pm->mesh_size.nx2; size_t nx3 = pm->mesh_size.nx3; size_t nx1f = nx1; @@ -106,11 +106,9 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { if (nx2 > 1) nx2f++; size_t nx3f = nx3; if (nx3 > 1) nx3f++; - //! \todo This applies to the first block. Does it work for all blocks? + //! \todo This applies to the first block. Does it work for all blocks? -#if ENABLE_HARP - size_t nrays = pm->my_blocks(0)->pimpl->prad->radiance.GetDim3(); -#endif + size_t nrays = pm->my_blocks(0)->pimpl->prad->GetNumOutgoingRays(); // 2. define coordinate int idt, idx1, idx2, idx3, idx1f, idx2f, idx3f, iray; @@ -139,12 +137,10 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { ndims++; } -#if ENABLE_HARP if (nrays > 0) { ncmpi_def_dim(ifile, "ray", nrays, &iray); ndims += 2; } -#endif // 3. define variables int ivt, ivx1, ivx2, ivx3, ivx1f, ivx2f, ivx3f, imu, iphi; @@ -227,7 +223,6 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { } } -#if ENABLE_HARP if (nrays > 0) { ncmpi_def_var(ifile, "mu_out", NC_FLOAT, 1, &iray, &imu); ncmpi_put_att_text(ifile, imu, "units", 1, "1"); @@ -236,7 +231,6 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { ncmpi_put_att_text(ifile, iphi, "units", 3, "rad"); ncmpi_put_att_text(ifile, iphi, "long_name", 15, "azimuthal angle"); } -#endif MeshBlock *pmb = pm->my_blocks(0); LoadOutputData(pmb); @@ -245,8 +239,10 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { // count total variables (vector variables are expanded into flat scalars) int total_vars = 0; while (pdata != nullptr) { - std::string grid = pmeta->GetGridType(pdata->name); + auto names = Vectorize(pdata->name.c_str(), ","); + std::string grid = pmeta->GetGridType(names[0]); int nvar = get_num_variables(grid, pdata->data); + total_vars += nvar; pdata = pdata->pnext; } @@ -262,7 +258,6 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { pdata = pfirst_data_; while (pdata != nullptr) { auto names = Vectorize(pdata->name.c_str(), ","); - std::string grid = pmeta->GetGridType(names[0]); int nvar = get_num_variables(grid, pdata->data); @@ -333,18 +328,18 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { } err = ncmpi_put_att_text(ifile, NC_GLOBAL, "Conventions", 6, "COARDS"); - ERR if (strcmp(COORDINATE_SYSTEM, "spherical_polar") == 0) { + ERR; + if (strcmp(COORDINATE_SYSTEM, "spherical_polar") == 0) { err = ncmpi_put_att_float(ifile, NC_GLOBAL, "PlanetRadius", NC_FLOAT, 1, &radius); - ERR + ERR; } err = ncmpi_enddef(ifile); - ERR + ERR; - // 4. allocate data buffer, MPI requests and status - int max_ncells = 0, - nmb = 0; + // 4. allocate data buffer, MPI requests and status + int max_ncells = 0, nmb = 0; for (int b = 0; b < pm->nblocal; ++b) { MeshBlock *pmb = pm->my_blocks(b); int nf1 = pmb->ie - pmb->is + 2 * NGHOST; @@ -366,7 +361,7 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { float time = (float)pm->time; MPI_Offset stime = 0, etime = 1; err = ncmpi_put_vara_float_all(ifile, ivt, &stime, &etime, &time); - ERR + ERR; ClearOutputData(); // required when LoadOutputData() is used. @@ -418,9 +413,7 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { MPI_Offset startr[4] = {0, 0, ncells2 * pmb->loc.lx2, ncells3 * pmb->loc.lx3}; -#if ENABLE_HARP MPI_Offset countr[4] = {1, (MPI_Offset)nrays, ncells2, ncells3}; -#endif // MPI_Offset start_ax1[2] = {0, ncells1*pmb->loc.lx1}; // MPI_Offset count_ax1[2] = {1, ncells1}; @@ -434,9 +427,9 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { (*ib)[i - out_is] = (float)(pmb->pcoord->x1v(i)); } err = ncmpi_iput_vara_float(ifile, ivx1, start + 1, count + 1, *ib++, ir++); - ERR + ERR; - if (nx1 > 1) { + if (nx1 > 1) { if (strcmp(COORDINATE_SYSTEM, "spherical_polar") == 0) { for (int i = out_is; i <= out_ie + 1; ++i) (*ib)[i - out_is] = (float)(pmb->pcoord->x1f(i)) - radius; @@ -446,7 +439,7 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { } err = ncmpi_iput_vara_float(ifile, ivx1f, start + 1, count1 + 1, *ib++, ir++); - ERR + ERR; } if (strcmp(COORDINATE_SYSTEM, "spherical_polar") == 0) { @@ -457,9 +450,9 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { (*ib)[j - out_js] = (float)(pmb->pcoord->x2v(j)); } err = ncmpi_iput_vara_float(ifile, ivx2, start + 2, count + 2, *ib++, ir++); - ERR + ERR; - if (nx2 > 1) { + if (nx2 > 1) { if (strcmp(COORDINATE_SYSTEM, "spherical_polar") == 0) { for (int j = out_js; j <= out_je + 1; ++j) (*ib)[j - out_js] = 90. - (float)rad2deg(pmb->pcoord->x2f(j)); @@ -469,7 +462,7 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { } err = ncmpi_iput_vara_float(ifile, ivx2f, start + 2, count2 + 2, *ib++, ir++); - ERR + ERR; } if (strcmp(COORDINATE_SYSTEM, "spherical_polar") == 0) { @@ -480,9 +473,9 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { (*ib)[k - out_ks] = (float)(pmb->pcoord->x3v(k)); } err = ncmpi_iput_vara_float(ifile, ivx3, start + 3, count + 3, *ib++, ir++); - ERR + ERR; - if (nx3 > 1) { + if (nx3 > 1) { if (strcmp(COORDINATE_SYSTEM, "spherical_polar") == 0) { for (int k = out_ks; k <= out_ke + 1; ++k) (*ib)[k - out_ks] = (float)rad2deg(pmb->pcoord->x3f(k)); @@ -492,37 +485,38 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { } err = ncmpi_iput_vara_float(ifile, ivx3f, start + 3, count3 + 3, *ib++, ir++); - ERR + ERR; } -#if ENABLE_HARP if (nrays > 0) { + auto prad = pmb->pimpl->prad; int m = 0; - for (auto p : pmb->pimpl->prad->bands) { - for (int n = 0; n < p->getNumOutgoingRays(); ++n) - (*ib)[m++] = (float)(p->getCosinePolarAngle(n)); + for (int b = 0; b < prad->GetNumBands(); ++b) { + auto p = prad->GetBand(b); + for (int n = 0; n < p->GetNumOutgoingRays(); ++n) + (*ib)[m++] = (float)(p->GetCosinePolarAngle(n)); } err = ncmpi_iput_var_float(ifile, imu, *ib++, ir++); - ERR + ERR; - m = 0; - for (auto p : pmb->pimpl->prad->bands) { - for (int n = 0; n < p->getNumOutgoingRays(); ++n) - (*ib)[m++] = (float)(p->getAzimuthalAngle(n)); + m = 0; + for (int b = 0; b < prad->GetNumBands(); ++b) { + auto p = prad->GetBand(b); + for (int n = 0; n < p->GetNumOutgoingRays(); ++n) + (*ib)[m++] = (float)(p->GetAzimuthalAngle(n)); } err = ncmpi_iput_var_float(ifile, iphi, *ib++, ir++); - ERR + ERR; } -#endif ivar = var_ids; pdata = pfirst_data_; while (pdata != nullptr) { - std::string grid = pmeta->GetGridType(pdata->name); + auto names = Vectorize(pdata->name.c_str(), ","); + std::string grid = pmeta->GetGridType(names[0]); int nvar = get_num_variables(grid, pdata->data); if (grid == "RCC") { // radiation rays -#if ENABLE_HARP for (int n = 0; n < nvar; n++) { float *it = *ib; for (int m = 0; m < nrays; ++m) @@ -531,9 +525,8 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { *it++ = (float)pdata->data(n, m, k, j); err = ncmpi_iput_vara_float(ifile, *ivar++, startr, countr, *ib++, ir++); - ERR + ERR; } -#endif } else if (grid == "CCF") { for (int n = 0; n < nvar; n++) { float *it = *ib; @@ -543,7 +536,7 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { *it++ = (float)pdata->data(n, k, j, i); err = ncmpi_iput_vara_float(ifile, *ivar++, start, count1, *ib++, ir++); - ERR + ERR; } } else if ((grid == "CFC") && (nx2 > 1)) { for (int n = 0; n < nvar; n++) { @@ -554,7 +547,7 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { *it++ = (float)pdata->data(n, k, j, i); err = ncmpi_iput_vara_float(ifile, *ivar++, start, count2, *ib++, ir++); - ERR + ERR; } } else if ((grid == "FCC") && (nx3 > 1)) { for (int n = 0; n < nvar; n++) { @@ -565,7 +558,7 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { *it++ = (float)pdata->data(n, k, j, i); err = ncmpi_iput_vara_float(ifile, *ivar++, start, count3, *ib++, ir++); - ERR + ERR; } } else if (grid == "--C") { for (int n = 0; n < nvar; n++) { @@ -574,7 +567,7 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { *it++ = (float)pdata->data(n, i); err = ncmpi_iput_vara_float(ifile, *ivar++, start, count, *ib++, ir++); - ERR + ERR; } } else if (grid == "--F") { for (int n = 0; n < nvar; n++) { @@ -583,14 +576,14 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { *it++ = (float)pdata->data(n, i); err = ncmpi_iput_vara_float(ifile, *ivar++, start, count1, *ib++, ir++); - ERR + ERR; } } else if (grid == "---") { for (int n = 0; n < nvar; n++) { **ib = (float)pdata->data(n, 0); err = ncmpi_iput_vara_float(ifile, *ivar++, start, count, *ib++, ir++); - ERR + ERR; } } else { for (int n = 0; n < nvar; n++) { @@ -601,7 +594,7 @@ void PnetcdfOutput::WriteOutputFile(Mesh *pm, ParameterInput *pin, bool flag) { *it++ = (float)pdata->data(n, k, j, i); err = ncmpi_iput_vara_float(ifile, *ivar++, start, count, *ib++, ir++); - ERR + ERR; } }