From 5329e3ba9f25606187fbe1bc2d6b2f55ef3cae01 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 24 Sep 2024 18:52:13 -0700 Subject: [PATCH 001/278] Python: `warpx.multifab` legacy signature (#5321) * Python: `warpx.multifab` legacy signature Keep the legacy signature of the fully qualified name a bit longer to avoid breakage. Throw warnings for users to migrate. * Update Docs * Simplify Example String No f-string needed here. * Old API: Do not warn yet * Clang-Tidy --- Docs/source/usage/workflows/python_extend.rst | 7 ++-- Source/Python/WarpX.cpp | 34 ++++++++++++------- Source/ablastr/fields/MultiFabRegister.H | 13 +++++-- Source/ablastr/fields/MultiFabRegister.cpp | 22 ++++++++---- 4 files changed, 52 insertions(+), 24 deletions(-) diff --git a/Docs/source/usage/workflows/python_extend.rst b/Docs/source/usage/workflows/python_extend.rst index 47610e0d7ba..275a4dd134d 100644 --- a/Docs/source/usage/workflows/python_extend.rst +++ b/Docs/source/usage/workflows/python_extend.rst @@ -134,9 +134,12 @@ This example accesses the :math:`E_x(x,y,z)` field at level 0 after every time s warpx = sim.extension.warpx # data access - E_x_mf = warpx.multifab(f"Efield_fp[x][level=0]") + # vector field E, component x, on the fine patch of MR level 0 + E_x_mf = warpx.multifab("Efield_fp", dir=0, level=0) + # scalar field rho, on the fine patch of MR level 0 + rho_mf = warpx.multifab("rho_fp", level=0) - # compute + # compute on E_x_mf # iterate over mesh-refinement levels for lev in range(warpx.finest_level + 1): # grow (aka guard/ghost/halo) regions diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 857d23dc588..70d91445d8b 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -120,17 +120,27 @@ void init_WarpX (py::module& m) R"doc(Registry to all WarpX MultiFab (fields).)doc" ) .def("multifab", - [](WarpX & wx, std::string multifab_name, int level) { - if (wx.m_fields.has(multifab_name, level)) { - return wx.m_fields.get(multifab_name, level); + [](WarpX & wx, std::string internal_name) { + if (wx.m_fields.internal_has(internal_name)) { + return wx.m_fields.internal_get(internal_name); + } else { + throw std::runtime_error("MultiFab '" + internal_name + "' is unknown or is not allocated!"); + } + }, + py::arg("internal_name") + ) + .def("multifab", + [](WarpX & wx, std::string scalar_name, int level) { + if (wx.m_fields.has(scalar_name, level)) { + return wx.m_fields.get(scalar_name, level); } else { - throw std::runtime_error("The MultiFab '" + multifab_name + "' is unknown or is not allocated!"); + throw std::runtime_error("The scalar field '" + scalar_name + "' is unknown or is not allocated!"); } }, - py::arg("multifab_name"), + py::arg("scalar_name"), py::arg("level"), py::return_value_policy::reference_internal, - R"doc(Return MultiFabs by name and level, e.g., ``\"Efield_aux\"``, ``\"Efield_fp"``, ... + R"doc(Return scalar fields (MultiFabs) by name and level, e.g., ``\"rho_fp\"``, ``\"phi_fp"``, ... The physical fields in WarpX have the following naming: @@ -141,18 +151,18 @@ The physical fields in WarpX have the following naming: (only for level 1 and higher).)doc" ) .def("multifab", - [](WarpX & wx, std::string multifab_name, Direction dir, int level) { - if (wx.m_fields.has(multifab_name, dir, level)) { - return wx.m_fields.get(multifab_name, dir, level); + [](WarpX & wx, std::string vector_name, Direction dir, int level) { + if (wx.m_fields.has(vector_name, dir, level)) { + return wx.m_fields.get(vector_name, dir, level); } else { - throw std::runtime_error("The MultiFab '" + multifab_name + "' is unknown or is not allocated!"); + throw std::runtime_error("The vector field '" + vector_name + "' is unknown or is not allocated!"); } }, - py::arg("multifab_name"), + py::arg("vector_name"), py::arg("dir"), py::arg("level"), py::return_value_policy::reference_internal, - R"doc(Return MultiFabs by name, direction, and level, e.g., ``\"Efield_aux\"``, ``\"Efield_fp"``, ... + R"doc(Return the component of a vector field (MultiFab) by name, direction, and level, e.g., ``\"Efield_aux\"``, ``\"Efield_fp"``, ... The physical fields in WarpX have the following naming: diff --git a/Source/ablastr/fields/MultiFabRegister.H b/Source/ablastr/fields/MultiFabRegister.H index f33eed1c5a6..21df20c1678 100644 --- a/Source/ablastr/fields/MultiFabRegister.H +++ b/Source/ablastr/fields/MultiFabRegister.H @@ -661,14 +661,21 @@ namespace ablastr::fields int level ) const; - private: + /** Temporary test function for legacy Python bindings */ + [[nodiscard]] bool + internal_has ( + std::string const & internal_name + ); [[nodiscard]] amrex::MultiFab * internal_get ( - std::string const & key + std::string const & internal_name ); + + private: + [[nodiscard]] amrex::MultiFab const * internal_get ( - std::string const & key + std::string const & internal_name ) const; amrex::MultiFab* diff --git a/Source/ablastr/fields/MultiFabRegister.cpp b/Source/ablastr/fields/MultiFabRegister.cpp index 106a3aede79..2c384a90089 100644 --- a/Source/ablastr/fields/MultiFabRegister.cpp +++ b/Source/ablastr/fields/MultiFabRegister.cpp @@ -336,32 +336,40 @@ namespace ablastr::fields return count == 3; } + bool + MultiFabRegister::internal_has ( + std::string const & internal_name + ) + { + return m_mf_register.count(internal_name) > 0; + } + amrex::MultiFab* MultiFabRegister::internal_get ( - std::string const & key + std::string const & internal_name ) { - if (m_mf_register.count(key) == 0) { + if (m_mf_register.count(internal_name) == 0) { // FIXME: temporary, throw a std::runtime_error // throw std::runtime_error("MultiFabRegister::get name does not exist in register: " + key); return nullptr; } - amrex::MultiFab & mf = m_mf_register.at(key).m_mf; + amrex::MultiFab & mf = m_mf_register.at(internal_name).m_mf; return &mf; } amrex::MultiFab const * MultiFabRegister::internal_get ( - std::string const & key + std::string const & internal_name ) const { - if (m_mf_register.count(key) == 0) { + if (m_mf_register.count(internal_name) == 0) { // FIXME: temporary, throw a std::runtime_error - // throw std::runtime_error("MultiFabRegister::get name does not exist in register: " + key); + // throw std::runtime_error("MultiFabRegister::get name does not exist in register: " + internal_name); return nullptr; } - amrex::MultiFab const & mf = m_mf_register.at(key).m_mf; + amrex::MultiFab const & mf = m_mf_register.at(internal_name).m_mf; return &mf; } From ae7aa62274f2b59a83eb02cb8034e0c58f7f45cb Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Tue, 24 Sep 2024 21:16:46 -0700 Subject: [PATCH 002/278] Divide rho by epsilon to undo the multiplication before computing phi (#5324) * un-multiply done * Update Source/ablastr/fields/PoissonSolver.H --------- Co-authored-by: Remi Lehe --- .../inputs_test_rz_spacecraft_charging_picmi.py | 10 +++++----- Source/ablastr/fields/PoissonSolver.H | 4 +++- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/Examples/Physics_applications/spacecraft_charging/inputs_test_rz_spacecraft_charging_picmi.py b/Examples/Physics_applications/spacecraft_charging/inputs_test_rz_spacecraft_charging_picmi.py index e3bc888f600..9ce8bb8433c 100644 --- a/Examples/Physics_applications/spacecraft_charging/inputs_test_rz_spacecraft_charging_picmi.py +++ b/Examples/Physics_applications/spacecraft_charging/inputs_test_rz_spacecraft_charging_picmi.py @@ -121,13 +121,13 @@ def compute_virtual_charge_on_spacecraft(): # Compute integral of rho over volume of the domain # (i.e. total charge of the plasma particles) rho_integral = ( - (rho[1 : nr - 1, 1 : nz - 1] * r[1 : nr - 1, np.newaxis]).sum() * dr * dz + (rho[1 : nr - 1, 1 : nz - 1] * r[1 : nr - 1, np.newaxis]).sum() + * 2 + * np.pi + * dr + * dz ) - # Due to an oddity in WarpX (which will probably be solved later) - # we need to multiply `rho` by `-epsilon_0` to get the correct charge - rho_integral *= 2 * np.pi * -scc.epsilon_0 # does this oddity still exist? - # Compute charge of the spacecraft, based on Gauss theorem q_spacecraft = -rho_integral - scc.epsilon_0 * grad_phi_integral print("Virtual charge on the spacecraft: %e" % q_spacecraft) diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index 727280d630b..ad825fdbe6b 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -275,7 +275,7 @@ computePhi ( #endif // Use the Multigrid (MLMG) solver if selected or on refined patches // but first scale rho appropriately - rho[lev]->mult(-1._rt / ablastr::constant::SI::ep0); // TODO: when do we "un-multiply" this? We need to document this side-effect! + rho[lev]->mult(-1._rt / ablastr::constant::SI::ep0); #ifdef WARPX_DIM_RZ constexpr bool is_rz = true; @@ -409,6 +409,8 @@ computePhi ( post_phi_calculation.value()(mlmg, lev); } } + rho[lev]->mult(-ablastr::constant::SI::ep0); // Multiply rho by epsilon again + } // loop over lev(els) } // computePhi } // namespace ablastr::fields From d889ac8da301e40d2d4444211d0e40fa12b4f776 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 24 Sep 2024 21:59:13 -0700 Subject: [PATCH 003/278] Fix Legacy Python MF API (#5325) Keep ownership on the C++ side. --- Source/Python/WarpX.cpp | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 70d91445d8b..0aab95f78f8 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -127,7 +127,15 @@ void init_WarpX (py::module& m) throw std::runtime_error("MultiFab '" + internal_name + "' is unknown or is not allocated!"); } }, - py::arg("internal_name") + py::arg("internal_name"), + py::return_value_policy::reference_internal, + R"doc(Return a MultiFab by its internal name (deprecated). + +The multifab('internal_name') signature is deprecated. +Please use: +- multifab('prefix', level=...) for scalar fields +- multifab('prefix', dir=..., level=...) for vector field components +where 'prefix' is the part of 'internal_name';' before the [])doc" ) .def("multifab", [](WarpX & wx, std::string scalar_name, int level) { From 30ac54887d2fc01a8b4f853fbc1f1dfda9d98fd6 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 25 Sep 2024 01:04:04 -0700 Subject: [PATCH 004/278] PoissonSolver: Missing Include (#5327) Fix the missing include for `MultiLevelRegister`, which includes the MultiLevel MF types. --- Source/ablastr/fields/PoissonSolver.H | 1 + 1 file changed, 1 insertion(+) diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index ad825fdbe6b..e6eaec4f4ad 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -14,6 +14,7 @@ #include #include #include +#include #include #if defined(ABLASTR_USE_FFT) && defined(WARPX_DIM_3D) From 3e76edc05ffcb86092831c1cc81c2e0e015a9971 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 25 Sep 2024 06:02:06 -0700 Subject: [PATCH 005/278] PoissonSolver: Missing Include (#5327) (#5328) Fix the missing include for `MultiLevelRegister`, which includes the MultiLevel MF types. --- Source/ablastr/fields/PoissonSolver.H | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index e6eaec4f4ad..8b4f9cea9a1 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -54,6 +54,7 @@ #include #include +#include namespace ablastr::fields { @@ -311,13 +312,17 @@ computePhi ( auto linop_nodelap = std::make_unique(); if (eb_enabled) { #if defined(AMREX_USE_EB) - linop_nodelap->define( - amrex::Vector{geom[lev]}, - amrex::Vector{grids[lev]}, - amrex::Vector{dmap[lev]}, - info, - amrex::Vector{eb_farray_box_factory.value()[lev]} - ); + if constexpr(std::is_same_v) { + throw std::runtime_error("EB requested by eb_farray_box_factory not provided!"); + } else { + linop_nodelap->define( + amrex::Vector{geom[lev]}, + amrex::Vector{grids[lev]}, + amrex::Vector{dmap[lev]}, + info, + amrex::Vector{eb_farray_box_factory.value()[lev]} + ); + } #endif } else { From e3e6ab8690e9222f0ad84ebb21a92c85874c4775 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Wed, 25 Sep 2024 13:49:54 -0700 Subject: [PATCH 006/278] =?UTF-8?q?Fixed=20a=20bug=20where=20centering=20c?= =?UTF-8?q?oefficients=20used=20in=20Magentostatic=20solver=E2=80=A6=20(#5?= =?UTF-8?q?289)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Fixed a bug where centering coefficients used in Magentostatic solver were not being initialized when doing an energy-conserving field gather. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Pointer type passed into the magnetostatic solver for current is no longer smart, so removed the call to get() to return the raw pointer. Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --------- Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../MagnetostaticSolver/MagnetostaticSolver.cpp | 15 +++++++-------- Source/WarpX.cpp | 5 +++-- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp index 84efe8bf45a..5c28ff1f3c7 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp @@ -170,15 +170,14 @@ WarpX::computeVectorPotential (ablastr::fields::MultiLevelVectorField const& cur amrex::Vector> sorted_curr; amrex::Vector> sorted_A; for (int lev = 0; lev <= finest_level; ++lev) { - sorted_curr.emplace_back(amrex::Array ({curr[lev][Direction{0}], - curr[lev][Direction{1}], - curr[lev][Direction{2}]})); - sorted_A.emplace_back(amrex::Array ({A[lev][Direction{0}], - A[lev][Direction{1}], - A[lev][Direction{2}]})); + sorted_curr.emplace_back(amrex::Array ({curr[lev][0], + curr[lev][1], + curr[lev][2]})); + sorted_A.emplace_back(amrex::Array ({A[lev][0], + A[lev][1], + A[lev][2]})); } -#if defined(AMREX_USE_EB) const ablastr::fields::MultiLevelVectorField Bfield_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level); const std::optional post_A_calculation( { @@ -187,13 +186,13 @@ WarpX::computeVectorPotential (ablastr::fields::MultiLevelVectorField const& cur m_fields.get_mr_levels_alldirs(FieldType::vector_potential_grad_buf_b_stag, finest_level) }); +#if defined(AMREX_USE_EB) amrex::Vector factories; for (int lev = 0; lev <= finest_level; ++lev) { factories.push_back(&WarpX::fieldEBFactory(lev)); } const std::optional > eb_farray_box_factory({factories}); #else - const std::optional post_A_calculation; const std::optional > eb_farray_box_factory; #endif diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 60374133a52..7f9288debb7 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -1388,8 +1388,9 @@ WarpX::ReadParameters () // Instead, if warpx.grid_type=collocated, the momentum-conserving and // energy conserving field gathering algorithms are equivalent (forces // gathered from the collocated grid) and no fields centering occurs. - if (WarpX::field_gathering_algo == GatheringAlgo::MomentumConserving && - WarpX::grid_type != GridType::Collocated) + if ((WarpX::field_gathering_algo == GatheringAlgo::MomentumConserving + && WarpX::grid_type != GridType::Collocated) + || WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) { utils::parser::queryWithParser( pp_warpx, "field_centering_nox", field_centering_nox); From d9935f48ee5773693d1e41f11ba0dc992fec0220 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Wed, 25 Sep 2024 22:45:56 -0700 Subject: [PATCH 007/278] `MultiFabRegister`: use `has_vector` when possible (#5334) --- Source/BoundaryConditions/PML.cpp | 8 ++++---- Source/Evolve/WarpXEvolve.cpp | 2 +- Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp | 8 ++++---- Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp | 8 ++++---- .../FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp | 2 +- Source/Parallelization/WarpXComm.cpp | 8 ++++---- Source/Particles/LaserParticleContainer.cpp | 2 +- Source/Particles/PhysicalParticleContainer.cpp | 10 +++++----- 8 files changed, 24 insertions(+), 24 deletions(-) diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index 91d821d6646..f45ca222e69 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -1234,7 +1234,7 @@ PML::CheckPoint ( { using ablastr::fields::Direction; - if (fields.has(FieldType::pml_E_fp, Direction{0}, 0)) + if (fields.has_vector(FieldType::pml_E_fp, 0)) { ablastr::fields::VectorField pml_E_fp = fields.get_alldirs(FieldType::pml_E_fp, 0); ablastr::fields::VectorField pml_B_fp = fields.get_alldirs(FieldType::pml_B_fp, 0); @@ -1246,7 +1246,7 @@ PML::CheckPoint ( VisMF::AsyncWrite(*pml_B_fp[2], dir+"_Bz_fp"); } - if (fields.has(FieldType::pml_E_cp, Direction{0}, 0)) + if (fields.has_vector(FieldType::pml_E_cp, 0)) { ablastr::fields::VectorField pml_E_cp = fields.get_alldirs(FieldType::pml_E_cp, 0); ablastr::fields::VectorField pml_B_cp = fields.get_alldirs(FieldType::pml_B_cp, 0); @@ -1267,7 +1267,7 @@ PML::Restart ( { using ablastr::fields::Direction; - if (fields.has(FieldType::pml_E_fp, Direction{0}, 0)) + if (fields.has_vector(FieldType::pml_E_fp, 0)) { ablastr::fields::VectorField pml_E_fp = fields.get_alldirs(FieldType::pml_E_fp, 0); ablastr::fields::VectorField pml_B_fp = fields.get_alldirs(FieldType::pml_B_fp, 0); @@ -1279,7 +1279,7 @@ PML::Restart ( VisMF::Read(*pml_B_fp[2], dir+"_Bz_fp"); } - if (fields.has(FieldType::pml_E_cp, Direction{0}, 0)) + if (fields.has_vector(FieldType::pml_E_cp, 0)) { ablastr::fields::VectorField pml_E_cp = fields.get_alldirs(FieldType::pml_E_cp, 0); ablastr::fields::VectorField pml_B_cp = fields.get_alldirs(FieldType::pml_B_cp, 0); diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 93d265d598f..a685afd28e7 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -1147,7 +1147,7 @@ WarpX::PushParticlesandDeposit (int lev, amrex::Real cur_time, DtType a_dt_type, m_fields.get(FieldType::current_fp, Direction{1}, lev), m_fields.get(FieldType::current_fp, Direction{2}, lev), lev); - if (m_fields.has(FieldType::current_buf, Direction{0}, lev)) { + if (m_fields.has_vector(FieldType::current_buf, lev)) { ApplyInverseVolumeScalingToCurrentDensity( m_fields.get(FieldType::current_buf, Direction{0}, lev), m_fields.get(FieldType::current_buf, Direction{1}, lev), diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp index 63b51cb8416..c6a1e206200 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveB.cpp @@ -79,19 +79,19 @@ void FiniteDifferenceSolver::EvolveB ( fields.get(FieldType::G_fp, lev) : fields.get(FieldType::G_cp, lev); } ablastr::fields::VectorField face_areas; - if (fields.has(FieldType::face_areas, Direction{0}, lev)) { + if (fields.has_vector(FieldType::face_areas, lev)) { face_areas = fields.get_alldirs(FieldType::face_areas, lev); } ablastr::fields::VectorField area_mod; - if (fields.has(FieldType::area_mod, Direction{0}, lev)) { + if (fields.has_vector(FieldType::area_mod, lev)) { area_mod = fields.get_alldirs(FieldType::area_mod, lev); } ablastr::fields::VectorField ECTRhofield; - if (fields.has(FieldType::ECTRhofield, Direction{0}, lev)) { + if (fields.has_vector(FieldType::ECTRhofield, lev)) { ECTRhofield = fields.get_alldirs(FieldType::ECTRhofield, lev); } ablastr::fields::VectorField Venl; - if (fields.has(FieldType::Venl, Direction{0}, lev)) { + if (fields.has_vector(FieldType::Venl, lev)) { Venl = fields.get_alldirs(FieldType::Venl, lev); } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp index db8e80cc972..03a9866fb98 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp @@ -73,19 +73,19 @@ void FiniteDifferenceSolver::EvolveE ( } ablastr::fields::VectorField edge_lengths; - if (fields.has(FieldType::edge_lengths, Direction{0}, lev)) { + if (fields.has_vector(FieldType::edge_lengths, lev)) { edge_lengths = fields.get_alldirs(FieldType::edge_lengths, lev); } ablastr::fields::VectorField face_areas; - if (fields.has(FieldType::face_areas, Direction{0}, lev)) { + if (fields.has_vector(FieldType::face_areas, lev)) { face_areas = fields.get_alldirs(FieldType::face_areas, lev); } ablastr::fields::VectorField area_mod; - if (fields.has(FieldType::area_mod, Direction{0}, lev)) { + if (fields.has_vector(FieldType::area_mod, lev)) { area_mod = fields.get_alldirs(FieldType::area_mod, lev); } ablastr::fields::VectorField ECTRhofield; - if (fields.has(FieldType::ECTRhofield, Direction{0}, lev)) { + if (fields.has_vector(FieldType::ECTRhofield, lev)) { ECTRhofield = fields.get_alldirs(FieldType::ECTRhofield, lev); } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp index 9ecae05516d..7a1a05d560d 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveEPML.cpp @@ -69,7 +69,7 @@ void FiniteDifferenceSolver::EvolveEPML ( const ablastr::fields::VectorField Jfield = (patch_type == PatchType::fine) ? fields.get_alldirs(FieldType::pml_j_fp, level) : fields.get_alldirs(FieldType::pml_j_cp, level); ablastr::fields::VectorField edge_lengths; - if (fields.has(FieldType::pml_edge_lengths, Direction{0}, level)) { + if (fields.has_vector(FieldType::pml_edge_lengths, level)) { edge_lengths = fields.get_alldirs(FieldType::pml_edge_lengths, level); } amrex::MultiFab * Ffield = nullptr; diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index ac797d1e706..d64632d964a 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -196,7 +196,7 @@ WarpX::UpdateAuxilaryDataStagToNodal () { if (electromagnetic_solver_id != ElectromagneticSolverAlgo::None) { Array,3> Btmp; - if (m_fields.has(FieldType::Bfield_cax, Direction{0}, lev)) { + if (m_fields.has_vector(FieldType::Bfield_cax, lev)) { for (int i = 0; i < 3; ++i) { Btmp[i] = std::make_unique( *m_fields.get(FieldType::Bfield_cax, Direction{i}, lev), amrex::make_alias, 0, 1); @@ -290,7 +290,7 @@ WarpX::UpdateAuxilaryDataStagToNodal () { if (electromagnetic_solver_id != ElectromagneticSolverAlgo::None) { Array,3> Etmp; - if (m_fields.has(FieldType::Efield_cax, Direction{0}, lev)) { + if (m_fields.has_vector(FieldType::Efield_cax, lev)) { for (int i = 0; i < 3; ++i) { Etmp[i] = std::make_unique( *m_fields.get(FieldType::Efield_cax, Direction{i}, lev), amrex::make_alias, 0, 1); @@ -450,7 +450,7 @@ WarpX::UpdateAuxilaryDataSameType () Bfield_aux[lev - 1][2]->nComp(), ng_src, ng, WarpX::do_single_precision_comms, crse_period); - if (m_fields.has(FieldType::Bfield_cax, Direction{0}, lev)) + if (m_fields.has_vector(FieldType::Bfield_cax, lev)) { MultiFab::Copy(*m_fields.get(FieldType::Bfield_cax, Direction{0}, lev), dBx, 0, 0, m_fields.get(FieldType::Bfield_cax, Direction{0}, lev)->nComp(), ng); MultiFab::Copy(*m_fields.get(FieldType::Bfield_cax, Direction{1}, lev), dBy, 0, 0, m_fields.get(FieldType::Bfield_cax, Direction{1}, lev)->nComp(), ng); @@ -535,7 +535,7 @@ WarpX::UpdateAuxilaryDataSameType () WarpX::do_single_precision_comms, crse_period); - if (m_fields.has(FieldType::Efield_cax, Direction{0}, lev)) + if (m_fields.has_vector(FieldType::Efield_cax, lev)) { MultiFab::Copy(*m_fields.get(FieldType::Efield_cax, Direction{0}, lev), dEx, 0, 0, m_fields.get(FieldType::Efield_cax, Direction{0}, lev)->nComp(), ng); MultiFab::Copy(*m_fields.get(FieldType::Efield_cax, Direction{1}, lev), dEy, 0, 0, m_fields.get(FieldType::Efield_cax, Direction{1}, lev)->nComp(), ng); diff --git a/Source/Particles/LaserParticleContainer.cpp b/Source/Particles/LaserParticleContainer.cpp index 10849a0c0d5..c804bb12797 100644 --- a/Source/Particles/LaserParticleContainer.cpp +++ b/Source/Particles/LaserParticleContainer.cpp @@ -586,7 +586,7 @@ LaserParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, amrex::LayoutData* cost = WarpX::getCosts(lev); const bool has_rho = fields.has(FieldType::rho_fp, lev); - const bool has_buffer = fields.has(FieldType::current_buf, lev); + const bool has_buffer = fields.has_vector(FieldType::current_buf, lev); #ifdef AMREX_USE_OMP #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 07997a61f0c..26f9fee38d3 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -1753,9 +1753,9 @@ PhysicalParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, const iMultiFab* gather_masks = WarpX::GatherBufferMasks(lev); const bool has_rho = fields.has(FieldType::rho_fp, lev); - const bool has_cjx = fields.has(FieldType::current_buf, Direction{0}, lev); - const bool has_cEx = fields.has(FieldType::Efield_cax, Direction{0}, lev); - const bool has_buffer = has_cEx || has_cjx; + const bool has_J_buf = fields.has_vector(FieldType::current_buf, lev); + const bool has_E_cax = fields.has_vector(FieldType::Efield_cax, lev); + const bool has_buffer = has_E_cax || has_J_buf; amrex::MultiFab & Ex = *fields.get(FieldType::Efield_aux, Direction{0}, lev); amrex::MultiFab & Ey = *fields.get(FieldType::Efield_aux, Direction{1}, lev); @@ -1850,7 +1850,7 @@ PhysicalParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, pti, lev, current_masks, gather_masks ); } - const long np_current = has_cjx ? nfine_current : np; + const long np_current = has_J_buf ? nfine_current : np; if (has_rho && ! skip_deposition && ! do_not_deposit) { // Deposit charge before particle push, in component 0 of MultiFab rho. @@ -1870,7 +1870,7 @@ PhysicalParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, if (! do_not_push) { - const long np_gather = has_cEx ? nfine_gather : np; + const long np_gather = has_E_cax ? nfine_gather : np; int e_is_nodal = Ex.is_nodal() and Ey.is_nodal() and Ez.is_nodal(); From 284287d29e9865070a9de7e966f32ccd06e86e75 Mon Sep 17 00:00:00 2001 From: David Grote Date: Thu, 26 Sep 2024 02:35:17 -0700 Subject: [PATCH 008/278] Remove m_cell_centered_data from multifab map (#5322) * Remove m_cell_centered_data from multifab map * Register cell_centered_data MultiFab * Cleaning & Member Variable Convention * Shorten: fields as variable Prepares to remove `WarpX` class altogether from here. --------- Co-authored-by: Axel Huebl --- Source/Diagnostics/BTDiagnostics.H | 4 +- Source/Diagnostics/BTDiagnostics.cpp | 75 ++++++++++++++++------------ 2 files changed, 45 insertions(+), 34 deletions(-) diff --git a/Source/Diagnostics/BTDiagnostics.H b/Source/Diagnostics/BTDiagnostics.H index d5dd67226b7..d11db98276b 100644 --- a/Source/Diagnostics/BTDiagnostics.H +++ b/Source/Diagnostics/BTDiagnostics.H @@ -241,7 +241,7 @@ private: * will be used by all snapshots to obtain lab-frame data at the respective * z slice location. */ - amrex::Vector > m_cell_centered_data; + std::string const m_cell_centered_data_name; /** Vector of pointers to compute cell-centered data, per level, per component * using the coarsening-ratio provided by the user. */ @@ -346,7 +346,7 @@ private: * \param[in] i_buffer snapshot index */ void SetSnapshotFullStatus (int i_buffer); - /** Vector of field-data stored in the cell-centered multifab, m_cell_centered_data. + /** Vector of field-data stored in the cell-centered MultiFab. * All the fields are stored regardless of the specific fields to plot selected * by the user. */ diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index e00c30aa78e..631de298861 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -56,7 +56,8 @@ namespace } BTDiagnostics::BTDiagnostics (int i, const std::string& name) - : Diagnostics{i, name} + : Diagnostics{i, name}, + m_cell_centered_data_name("BTD_cell_centered_data_" + name) { ReadParameters(); } @@ -83,7 +84,6 @@ void BTDiagnostics::DerivedInitData () m_old_z_boost.resize(m_num_buffers); m_buffer_counter.resize(m_num_buffers); m_snapshot_ncells_lab.resize(m_num_buffers); - m_cell_centered_data.resize(nmax_lev); m_cell_center_functors.resize(nmax_lev); m_max_buffer_multifabs.resize(m_num_buffers); m_buffer_flush_counter.resize(m_num_buffers); @@ -519,7 +519,10 @@ BTDiagnostics::DefineCellCenteredMultiFab(int lev) #else const int ncomps = static_cast(m_cellcenter_varnames.size()); #endif - WarpX::AllocInitMultiFab(m_cell_centered_data[lev], ba, dmap, ncomps, amrex::IntVect(ngrow), lev, "cellcentered_BTD", 0._rt); + bool const remake = false; + bool const redistribute_on_remake = false; + warpx.m_fields.alloc_init(m_cell_centered_data_name, lev, ba, dmap, ncomps, amrex::IntVect(ngrow), 0.0_rt, + remake, redistribute_on_remake); } @@ -540,12 +543,14 @@ BTDiagnostics::InitializeFieldFunctors (int lev) #else auto & warpx = WarpX::GetInstance(); + auto & fields = warpx.m_fields; + // Clear any pre-existing vector to release stored data // This ensures that when domain is load-balanced, the functors point // to the correct field-data pointers m_all_field_functors[lev].clear(); // For back-transformed data, all the components are cell-centered and stored - // in a single multifab, m_cell_centered_data. + // in a single multifab. // Therefore, size of functors at all levels is 1. const int num_BT_functors = 1; m_all_field_functors[lev].resize(num_BT_functors); @@ -554,11 +559,11 @@ BTDiagnostics::InitializeFieldFunctors (int lev) // Create an object of class BackTransformFunctor for (int i = 0; i < num_BT_functors; ++i) { - // coarsening ratio is not provided since the source MultiFab, m_cell_centered_data + // coarsening ratio is not provided since the source MultiFab // is coarsened based on the user-defined m_crse_ratio const int nvars = static_cast(m_varnames.size()); m_all_field_functors[lev][i] = std::make_unique( - m_cell_centered_data[lev].get(), lev, + fields.get(m_cell_centered_data_name, lev), lev, nvars, m_num_buffers, m_varnames, m_varnames_fields); } @@ -570,23 +575,23 @@ BTDiagnostics::InitializeFieldFunctors (int lev) m_cell_center_functors.at(lev).size()); for (int comp=0; comp(warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Ey" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Ez" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Bx" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "By" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "Bz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "jx" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp,Direction{0}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp,Direction{0}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "jy" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp,Direction{1}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp,Direction{1}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "jz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp,Direction{2}, lev), lev, m_crse_ratio); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp,Direction{2}, lev), lev, m_crse_ratio); } else if ( m_cellcenter_varnames[comp] == "rho" ){ m_cell_center_functors[lev][comp] = std::make_unique(lev, m_crse_ratio); } @@ -601,8 +606,9 @@ BTDiagnostics::UpdateVarnamesForRZopenPMD () { #ifdef WARPX_DIM_RZ auto & warpx = WarpX::GetInstance(); + auto & fields = warpx.m_fields; using ablastr::fields::Direction; - const int ncomp_multimodefab = warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); + const int ncomp_multimodefab = fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); const int ncomp = ncomp_multimodefab; @@ -663,21 +669,22 @@ BTDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) using ablastr::fields::Direction; auto & warpx = WarpX::GetInstance(); - const int ncomp_multimodefab = warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); + auto & fields = warpx.m_fields; + const int ncomp_multimodefab = fields.get(FieldType::Efield_aux, Direction{0}, 0)->nComp(); const int ncomp = ncomp_multimodefab; // Clear any pre-existing vector to release stored data // This ensures that when domain is load-balanced, the functors point // to the correct field-data pointers m_all_field_functors[lev].clear(); // For back-transformed data, all the components are cell-centered and stored - // in a single multifab, m_cell_centered_data. + // in a single MultiFab. // Therefore, size of functors at all levels is 1 const int num_BT_functors = 1; m_all_field_functors[lev].resize(num_BT_functors); for (int i = 0; i < num_BT_functors; ++i) { const int nvars = static_cast(m_varnames.size()); m_all_field_functors[lev][i] = std::make_unique( - m_cell_centered_data[lev].get(), lev, + fields.get(m_cell_centered_data_name, lev), lev, nvars, m_num_buffers, m_varnames, m_varnames_fields); } @@ -689,23 +696,23 @@ BTDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) const auto m_cell_center_functors_at_lev_size = static_cast(m_cell_center_functors.at(lev).size()); for (int comp=0; comp(warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Et" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Ez" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Br" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Bt" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "Bz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "jr" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp, Direction{0}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "jt" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp, Direction{1}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "jz" ){ - m_cell_center_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::current_fp, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); + m_cell_center_functors[lev][comp] = std::make_unique(fields.get(FieldType::current_fp, Direction{2}, lev), lev, m_crse_ratio, false, ncomp); } else if ( m_cellcenter_varnames_fields[comp] == "rho" ){ m_cell_center_functors[lev][comp] = std::make_unique(lev, m_crse_ratio, false, -1, false, ncomp); } @@ -795,6 +802,8 @@ BTDiagnostics::PrepareFieldDataForOutput () if (!m_do_back_transformed_fields) { return; } auto & warpx = WarpX::GetInstance(); + auto & fields = warpx.m_fields; + // In this function, we will get cell-centered data for every level, lev, // using the cell-center functors and their respective operators() // Call m_cell_center_functors->operator @@ -804,21 +813,23 @@ BTDiagnostics::PrepareFieldDataForOutput () for (int icomp = 0; icompoperator()(*m_cell_centered_data[lev], icomp_dst); + // stores it in cell-centered MultiFab. + m_cell_center_functors[lev][icomp]->operator()(*fields.get(m_cell_centered_data_name, lev), icomp_dst); icomp_dst += m_cell_center_functors[lev][icomp]->nComp(); } // Check that the proper number of user-requested components are cell-centered AMREX_ALWAYS_ASSERT( icomp_dst == m_cellcenter_varnames.size() ); // fill boundary call is required to average_down (flatten) data to // the coarsest level. - ablastr::utils::communication::FillBoundary(*m_cell_centered_data[lev], WarpX::do_single_precision_comms, + ablastr::utils::communication::FillBoundary(*fields.get(m_cell_centered_data_name, lev), + WarpX::do_single_precision_comms, warpx.Geom(lev).periodicity()); } // Flattening out MF over levels for (int lev = warpx.finestLevel(); lev > 0; --lev) { - ablastr::coarsen::sample::Coarsen(*m_cell_centered_data[lev - 1], *m_cell_centered_data[lev], 0, 0, + ablastr::coarsen::sample::Coarsen(*fields.get(m_cell_centered_data_name, lev - 1), + *fields.get(m_cell_centered_data_name, lev), 0, 0, static_cast(m_cellcenter_varnames.size()), 0, WarpX::RefRatio(lev-1) ); } From 192e1675d1843d0a01a1c5447cdd64aac1542be3 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 26 Sep 2024 11:29:21 -0700 Subject: [PATCH 009/278] `FieldType`: Ensure Doxygen Works (#5330) * `FieldType`: Top Doxygen Comment Add a general top level comment. * Doxygen: Expand `AMREX_ENUM` * `FieldType`: Doxygen Oneline Use a comment form that will still work after it gets pasted and squashed into a single line after macro expansion. * Doc: Include in Sphinx * RTD: Doxygen 1.9.1 to latest --- .readthedocs.yml | 13 +++-- Docs/Doxyfile | 8 +-- Docs/README.md | 5 +- Docs/conda.yml | 12 +++++ Docs/requirements.txt | 2 +- Docs/source/developers/documentation.rst | 7 ++- Docs/source/developers/fields.rst | 7 +++ Source/Fields.H | 67 +++++++++++++----------- 8 files changed, 75 insertions(+), 46 deletions(-) create mode 100644 Docs/conda.yml diff --git a/.readthedocs.yml b/.readthedocs.yml index 3da9bc77140..95f86fe4ff2 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -9,14 +9,17 @@ version: 2 build: os: ubuntu-22.04 tools: - python: "3.11" + python: "mambaforge-latest" + # python: "3.11" sphinx: - configuration: Docs/source/conf.py + configuration: Docs/source/conf.py -python: - install: - - requirements: Docs/requirements.txt +conda: + environment: Docs/conda.yml +# python: +# install: +# - requirements: Docs/requirements.txt formats: - htmlzip diff --git a/Docs/Doxyfile b/Docs/Doxyfile index 5fbb7651b18..f7740bc0328 100644 --- a/Docs/Doxyfile +++ b/Docs/Doxyfile @@ -2245,7 +2245,7 @@ ENABLE_PREPROCESSING = YES # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -MACRO_EXPANSION = NO +MACRO_EXPANSION = YES # If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then # the macro expansion is limited to the macros specified with the PREDEFINED and @@ -2253,7 +2253,7 @@ MACRO_EXPANSION = NO # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -EXPAND_ONLY_PREDEF = NO +EXPAND_ONLY_PREDEF = YES # If the SEARCH_INCLUDES tag is set to YES, the include files in the # INCLUDE_PATH will be searched if a #include is found. @@ -2305,6 +2305,8 @@ PREDEFINED = AMREX_Linux=1 \ WARPX_QED=1 \ WARPX_QED_TABLE_GEN=1 +PREDEFINED += "AMREX_ENUM(CLASS,...)=\"enum class CLASS : int { __VA_ARGS__ };\"" + # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this # tag can be used to specify a list of macro names that should be expanded. The # macro definition that is found in the sources will be used. Use the PREDEFINED @@ -2312,7 +2314,7 @@ PREDEFINED = AMREX_Linux=1 \ # definition found in the source code. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -EXPAND_AS_DEFINED = +EXPAND_AS_DEFINED = AMREX_ENUM # If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will # remove all references to function-like macros that are alone on a line, have diff --git a/Docs/README.md b/Docs/README.md index e6fac921b04..6d3903ab327 100644 --- a/Docs/README.md +++ b/Docs/README.md @@ -9,12 +9,13 @@ More information can be found in Docs/source/developers/documentation.rst. Install the Python requirements for compiling the documentation: ``` -python3 -m pip install -r Docs/requirements.txt +cd Docs/ +python3 -m pip install -r requirements.txt ``` ### Compiling the documentation -`cd` into the `Docs/` directory and type +Still in the `Docs/` directory, type ``` make html ``` diff --git a/Docs/conda.yml b/Docs/conda.yml new file mode 100644 index 00000000000..1e23c203b2b --- /dev/null +++ b/Docs/conda.yml @@ -0,0 +1,12 @@ +name: readthedocs + +channels: + - conda-forge + - nodefaults + +dependencies: + - python + - doxygen + - pip + - pip: + - -r requirements.txt diff --git a/Docs/requirements.txt b/Docs/requirements.txt index a8c2af0e474..bc34e69cd65 100644 --- a/Docs/requirements.txt +++ b/Docs/requirements.txt @@ -5,7 +5,7 @@ # License: BSD-3-Clause-LBNL # WarpX PICMI bindings w/o C++ component (used for autoclass docs) --e Python +-e ../Python breathe docutils>=0.17.1 diff --git a/Docs/source/developers/documentation.rst b/Docs/source/developers/documentation.rst index a5013299336..5d604bcf9b3 100644 --- a/Docs/source/developers/documentation.rst +++ b/Docs/source/developers/documentation.rst @@ -56,16 +56,15 @@ First, make sure you are in the root directory of WarpX's source and install the .. code-block:: sh - python3 -m pip install -r Docs/requirements.txt + cd Docs/ + python3 -m pip install -r requirements.txt You will also need Doxygen (macOS: ``brew install doxygen``; Ubuntu: ``sudo apt install doxygen``). -Then, to compile the documentation, use +Still in the ``Docs/`` directory, compile the documentation via .. code-block:: sh - cd Docs/ - make html # This will first compile the Doxygen documentation (execute doxygen) # and then build html pages from rst files using sphinx and breathe. diff --git a/Docs/source/developers/fields.rst b/Docs/source/developers/fields.rst index d0af160afef..9d980119814 100644 --- a/Docs/source/developers/fields.rst +++ b/Docs/source/developers/fields.rst @@ -37,6 +37,13 @@ The ``MultiFab`` constructor (for, e.g., ``Ex`` on level ``lev``) is called in ` By default, the ``MultiFab`` are set to ``0`` at initialization. They can be assigned a different value in ``WarpX::InitLevelData``. +Field Names +----------- + +The commonly used WarpX field names are defined in: + +.. doxygenenum:: warpx::fields::FieldType + Field solver ------------ diff --git a/Source/Fields.H b/Source/Fields.H index 0aa3cbdd0c0..f85b6c4584c 100644 --- a/Source/Fields.H +++ b/Source/Fields.H @@ -18,25 +18,30 @@ namespace warpx::fields { + /** Unique identifiers for WarpX scalar and vector fields. + * + * These are implemented as amrex::MultiFab (one or one per component "direction", + * respectively) and stored in the ablastr::fields::MultiFabRegister . + */ AMREX_ENUM(FieldType, None, - Efield_aux, //!< Field that the particles gather from. Obtained from Efield_fp (and Efield_cp when using MR); see UpdateAuxilaryData - Bfield_aux, //!< Field that the particles gather from. Obtained from Bfield_fp (and Bfield_cp when using MR); see UpdateAuxilaryData - Efield_fp, //!< The field that is updated by the field solver at each timestep - Bfield_fp, //!< The field that is updated by the field solver at each timestep - Efield_fp_external, //!< Stores grid particle fields provided by the user as through an openPMD file - Bfield_fp_external, //!< Stores grid particle fields provided by the user as through an openPMD file - current_fp, //!< The current that is used as a source for the field solver - current_fp_nodal, //!< Only used when using nodal current deposition - current_fp_vay, //!< Only used when using Vay current deposition - current_buf, //!< Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. - current_store, //!< Only used when doing subcycling with mesh refinement, for book-keeping of currents - rho_buf, //!< Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. - rho_fp, //!< The charge density that is used as a source for the field solver (mostly for labframe electrostatic and PSATD) - F_fp, //!< Used for divE cleaning - G_fp, //!< Used for divB cleaning - phi_fp, //!< Obtained by the Poisson solver, for labframe electrostatic - vector_potential_fp, //!< Obtained by the magnetostatic solver + Efield_aux, /**< Field that the particles gather from. Obtained from Efield_fp (and Efield_cp when using MR); see UpdateAuxilaryData */ + Bfield_aux, /**< Field that the particles gather from. Obtained from Bfield_fp (and Bfield_cp when using MR); see UpdateAuxilaryData */ + Efield_fp, /**< The field that is updated by the field solver at each timestep */ + Bfield_fp, /**< The field that is updated by the field solver at each timestep */ + Efield_fp_external, /**< Stores grid particle fields provided by the user as through an openPMD file */ + Bfield_fp_external, /**< Stores grid particle fields provided by the user as through an openPMD file */ + current_fp, /**< The current that is used as a source for the field solver */ + current_fp_nodal, /**< Only used when using nodal current deposition */ + current_fp_vay, /**< Only used when using Vay current deposition */ + current_buf, /**< Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. */ + current_store, /**< Only used when doing subcycling with mesh refinement, for book-keeping of currents */ + rho_buf, /**< Particles that are close to the edge of the MR patch (i.e. in the deposition buffer) deposit to this field. */ + rho_fp, /**< The charge density that is used as a source for the field solver (mostly for labframe electrostatic and PSATD) */ + F_fp, /**< Used for divE cleaning */ + G_fp, /**< Used for divB cleaning */ + phi_fp, /**< Obtained by the Poisson solver, for labframe electrostatic */ + vector_potential_fp, /**< Obtained by the magnetostatic solver */ vector_potential_fp_nodal, vector_potential_grad_buf_e_stag, vector_potential_grad_buf_b_stag, @@ -45,19 +50,19 @@ namespace warpx::fields hybrid_current_fp_temp, hybrid_current_fp_ampere, hybrid_current_fp_external, - Efield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level - Bfield_cp, //!< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level - current_cp, //!< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level - rho_cp, //!< Only used with MR. The charge density that is used as a source for the field solver, on the coarse patch of each level - F_cp, //!< Only used with MR. Used for divE cleaning, on the coarse patch of each level - G_cp, //!< Only used with MR. Used for divB cleaning, on the coarse patch of each level - Efield_cax, //!< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field - Bfield_cax, //!< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field - E_external_particle_field, //!< Stores external particle fields provided by the user as through an openPMD file - B_external_particle_field, //!< Stores external particle fields provided by the user as through an openPMD file - distance_to_eb, //!< Only used with embedded boundaries (EB). Stores the distance to the nearest EB - edge_lengths, //!< Only used with embedded boundaries (EB). Indicates the length of the cell edge that is covered by the EB, in SI units - face_areas, //!< Only used with embedded boundaries (EB). Indicates the area of the cell face that is covered by the EB, in SI units + Efield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ + Bfield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ + current_cp, /**< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level */ + rho_cp, /**< Only used with MR. The charge density that is used as a source for the field solver, on the coarse patch of each level */ + F_cp, /**< Only used with MR. Used for divE cleaning, on the coarse patch of each level */ + G_cp, /**< Only used with MR. Used for divB cleaning, on the coarse patch of each level */ + Efield_cax, /**< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field */ + Bfield_cax, /**< Only used with MR. Particles that are close to the edge of the MR patch (i.e. in the gather buffer) gather from this field */ + E_external_particle_field, /**< Stores external particle fields provided by the user as through an openPMD file */ + B_external_particle_field, /**< Stores external particle fields provided by the user as through an openPMD file */ + distance_to_eb, /**< Only used with embedded boundaries (EB). Stores the distance to the nearest EB */ + edge_lengths, /**< Only used with embedded boundaries (EB). Indicates the length of the cell edge that is covered by the EB, in SI units */ + face_areas, /**< Only used with embedded boundaries (EB). Indicates the area of the cell face that is covered by the EB, in SI units */ area_mod, pml_E_fp, pml_B_fp, @@ -74,7 +79,7 @@ namespace warpx::fields Bfield_avg_fp, Efield_avg_cp, Bfield_avg_cp, - B_old, //!< Stores the value of B at the beginning of the timestep, for the implicit solver + B_old, /**< Stores the value of B at the beginning of the timestep, for the implicit solver */ ECTRhofield, Venl ); From 36b55449dccb0a8bf60a431341c730c5b74cc5a1 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 26 Sep 2024 15:27:45 -0700 Subject: [PATCH 010/278] Skip Guard in IGF Solver (Revert #5284) (#5335) * Skip Guard in IGF Solver (Revert #5284) This reverts the inclusion of the guard cells when copying rho. We could not yet determine why, but with this change the FFT 3D solver in ImpactX does not converge to the analytical solutions anymore. * Reset Checksum --- .../test_3d_open_bc_poisson_solver.json | 24 +++++++++---------- .../fields/IntegratedGreenFunctionSolver.cpp | 2 +- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json b/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json index 0ca6bde570a..af9ab3a0bdd 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json +++ b/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json @@ -1,20 +1,20 @@ { "lev=0": { - "Bx": 100915975.15403552, - "By": 157610677.3147734, - "Bz": 1.2276713711194638e-13, - "Ex": 4.725066923359797e+16, - "Ey": 3.025396149317578e+16, - "Ez": 3276584.4383433824, - "rho": 10994013582437.197 + "Bx": 100915933.446046, + "By": 157610622.18548763, + "Bz": 2.76973993530483e-13, + "Ex": 4.725065270619211e+16, + "Ey": 3.0253948989388292e+16, + "Ez": 3276573.9514776673, + "rho": 10994013582437.193 }, "electron": { - "particle_momentum_x": 5.701279599509506e-19, - "particle_momentum_y": 3.650453172383178e-19, + "particle_momentum_x": 5.701277606055763e-19, + "particle_momentum_y": 3.6504516636842883e-19, "particle_momentum_z": 1.145432768297242e-10, - "particle_position_x": 17.31408691249785, - "particle_position_y": 0.25836912671878015, + "particle_position_x": 17.314086912497864, + "particle_position_y": 0.25836912671877965, "particle_position_z": 10066.329600000008, "particle_weight": 19969036501.910976 } -} \ No newline at end of file +} diff --git a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp index ae11ad5087d..40b36740ae5 100644 --- a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp +++ b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp @@ -121,7 +121,7 @@ computePhiIGF ( amrex::MultiFab const & rho, BL_PROFILE_VAR_START(timer_pcopies); // Copy from rho including its ghost cells to tmp_rho - tmp_rho.ParallelCopy( rho, 0, 0, 1, rho.nGrowVect(), amrex::IntVect::TheZeroVector() ); + tmp_rho.ParallelCopy( rho, 0, 0, 1, amrex::IntVect::TheZeroVector(), amrex::IntVect::TheZeroVector() ); BL_PROFILE_VAR_STOP(timer_pcopies); #if !defined(ABLASTR_USE_HEFFTE) From eef12e96149b80f92600654c52994a230658b199 Mon Sep 17 00:00:00 2001 From: Brian Naranjo Date: Sat, 28 Sep 2024 07:37:38 -0700 Subject: [PATCH 011/278] General moving-window transformations in boosted-frame simulations (#5226) * General moving-window transformations in boosted-frame simulations * Default speed of moving window to speed of boosted frame * Extend simulation volume enough so that particles don't exit * Include moving window speed in diag_lo and diag_hi transformations * Modify bounds so as to produce the same results as in previous versions --------- Co-authored-by: Remi Lehe --- ...uts_test_3d_hard_edged_quadrupoles_boosted | 4 +- .../inputs_test_3d_plasma_lens_boosted | 4 +- ...est_3d_hard_edged_quadrupoles_boosted.json | 34 +++++++------- .../test_3d_plasma_lens_boosted.json | 32 +++++++------- Source/Diagnostics/Diagnostics.cpp | 5 ++- Source/Utils/WarpXUtil.H | 4 ++ Source/Utils/WarpXUtil.cpp | 44 ++++++++++++++++++- Source/WarpX.cpp | 43 ++++-------------- 8 files changed, 94 insertions(+), 76 deletions(-) diff --git a/Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_boosted b/Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_boosted index 668ec73d2dd..c056ff1fc66 100644 --- a/Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_boosted +++ b/Examples/Tests/accelerator_lattice/inputs_test_3d_hard_edged_quadrupoles_boosted @@ -2,8 +2,8 @@ max_step = 50 amr.n_cell = 16 16 8 amr.max_level = 0 geometry.dims = 3 -geometry.prob_lo = -0.2 -0.2 -0.1 -geometry.prob_hi = +0.2 +0.2 +0.1 +geometry.prob_lo = -0.2 -0.2 -0.1866 +geometry.prob_hi = +0.2 +0.2 +0.1866 # Boundary condition boundary.field_lo = pec pec pec diff --git a/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_boosted b/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_boosted index fa18ac439c4..b00779bae65 100644 --- a/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_boosted +++ b/Examples/Tests/plasma_lens/inputs_test_3d_plasma_lens_boosted @@ -8,8 +8,8 @@ amr.max_level = 0 # Geometry geometry.dims = 3 -geometry.prob_lo = -1.0 -1.0 -1.0 # physical domain -geometry.prob_hi = 1.0 1.0 2.0 +geometry.prob_lo = -1.0 -1.0 -1.866 # physical domain +geometry.prob_hi = 1.0 1.0 3.732 boundary.field_lo = pec pec pec boundary.field_hi = pec pec pec diff --git a/Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_boosted.json b/Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_boosted.json index acec34286f7..0a601b7b437 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_boosted.json +++ b/Regression/Checksum/benchmarks_json/test_3d_hard_edged_quadrupoles_boosted.json @@ -1,22 +1,22 @@ { + "lev=0": { + "Bx": 3.254604354043409e-14, + "By": 3.2768679907552955e-14, + "Bz": 1.0615351421410278e-16, + "Ex": 2.3084916770539354e-05, + "Ey": 2.2657235922655432e-05, + "Ez": 1.9978004351148e-05, + "jx": 1.781971994166362e-10, + "jy": 4.2163624424546344e-20, + "jz": 1.0378980680353126e-07 + }, "electron": { - "particle_momentum_x": 5.955475926588059e-26, - "particle_momentum_y": 1.4612764777454504e-35, - "particle_momentum_z": 3.4687284535374423e-23, - "particle_position_x": 0.049960237123814574, - "particle_position_y": 8.397636119991403e-15, - "particle_position_z": 0.10931687737912647, + "particle_momentum_x": 5.955475927655105e-26, + "particle_momentum_y": 1.4613271542201658e-35, + "particle_momentum_z": 3.468728453537439e-23, + "particle_position_x": 0.04996023704063194, + "particle_position_y": 8.398113230295983e-15, + "particle_position_z": 0.10931682580470406, "particle_weight": 1.0 - }, - "lev=0": { - "Bx": 3.254531465641299e-14, - "By": 3.2768092409497234e-14, - "Bz": 1.0615286316115558e-16, - "Ex": 2.30845657253269e-05, - "Ey": 2.2656898931877975e-05, - "Ez": 1.997747654112569e-05, - "jx": 1.7819477343635878e-10, - "jy": 4.2163030523377745e-20, - "jz": 1.0378839382497739e-07 } } diff --git a/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_boosted.json b/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_boosted.json index 6d5eabb492e..e1fa54618ee 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_boosted.json +++ b/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_boosted.json @@ -1,21 +1,21 @@ { "lev=0": { - "Bx": 1.3073041371012706e-14, - "By": 1.3033038210840872e-14, - "Bz": 5.595105968291083e-17, - "Ex": 2.801134785671445e-06, - "Ey": 2.8088613469887243e-06, - "Ez": 3.343430731047825e-06, - "jx": 2.5155716299904363e-11, - "jy": 2.013718424043256e-11, - "jz": 6.00631499206418e-09 + "Bx": 1.307357220398482e-14, + "By": 1.3033571630685163e-14, + "Bz": 5.594998319468307e-17, + "Ex": 2.8010832905044288e-06, + "Ey": 2.8088096742407935e-06, + "Ez": 3.3433681277560495e-06, + "jx": 2.5151718871714067e-11, + "jy": 2.013398608921663e-11, + "jz": 6.0063967622563335e-09 }, "electrons": { - "particle_momentum_x": 7.437088723328491e-24, - "particle_momentum_y": 5.9495056615288754e-24, - "particle_momentum_z": 5.117548636687908e-22, - "particle_position_x": 0.036489969262013186, - "particle_position_y": 0.029201200231260247, - "particle_position_z": 6.9681085285694095 + "particle_momentum_x": 7.43708887164806e-24, + "particle_momentum_y": 5.949505779760011e-24, + "particle_momentum_z": 5.117548636790359e-22, + "particle_position_x": 0.03648994812700447, + "particle_position_y": 0.029201183320618985, + "particle_position_z": 6.968107021318396 } -} +} \ No newline at end of file diff --git a/Source/Diagnostics/Diagnostics.cpp b/Source/Diagnostics/Diagnostics.cpp index dc28aeda095..fd079479285 100644 --- a/Source/Diagnostics/Diagnostics.cpp +++ b/Source/Diagnostics/Diagnostics.cpp @@ -229,8 +229,9 @@ Diagnostics::BaseReadParameters () if (WarpX::boost_direction[ dim_map[WarpX::moving_window_dir] ] == 1) { // Convert user-defined lo and hi for diagnostics to account for boosted-frame // simulations with moving window - const amrex::Real convert_factor = 1._rt/(WarpX::gamma_boost * (1._rt - WarpX::beta_boost) ); - // Assuming that the window travels with speed c + const amrex::Real beta_window = WarpX::moving_window_v / PhysConst::c; + const amrex::Real convert_factor = 1._rt/( + WarpX::gamma_boost * (1._rt - WarpX::beta_boost * beta_window) ); m_lo[WarpX::moving_window_dir] *= convert_factor; m_hi[WarpX::moving_window_dir] *= convert_factor; } diff --git a/Source/Utils/WarpXUtil.H b/Source/Utils/WarpXUtil.H index e35b0cdb313..46399b439d6 100644 --- a/Source/Utils/WarpXUtil.H +++ b/Source/Utils/WarpXUtil.H @@ -33,6 +33,10 @@ void ParseGeometryInput(); void ReadBoostedFrameParameters(amrex::Real& gamma_boost, amrex::Real& beta_boost, amrex::Vector& boost_direction); +void ReadMovingWindowParameters( + int& do_moving_window, int& start_moving_window_step, int& end_moving_window_step, + int& moving_window_dir, amrex::Real& moving_window_v); + void ConvertLabParamsToBoost(); /** diff --git a/Source/Utils/WarpXUtil.cpp b/Source/Utils/WarpXUtil.cpp index 856e021abb3..d6f465fa901 100644 --- a/Source/Utils/WarpXUtil.cpp +++ b/Source/Utils/WarpXUtil.cpp @@ -140,6 +140,43 @@ void ReadBoostedFrameParameters(Real& gamma_boost, Real& beta_boost, } } +void ReadMovingWindowParameters( + int& do_moving_window, int& start_moving_window_step, int& end_moving_window_step, + int& moving_window_dir, amrex::Real& moving_window_v) +{ + const ParmParse pp_warpx("warpx"); + pp_warpx.query("do_moving_window", do_moving_window); + if (do_moving_window) { + utils::parser::queryWithParser( + pp_warpx, "start_moving_window_step", start_moving_window_step); + utils::parser::queryWithParser( + pp_warpx, "end_moving_window_step", end_moving_window_step); + std::string s; + pp_warpx.get("moving_window_dir", s); + + if (s == "z" || s == "Z") { + moving_window_dir = WARPX_ZINDEX; + } +#if defined(WARPX_DIM_3D) + else if (s == "y" || s == "Y") { + moving_window_dir = 1; + } +#endif +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_3D) + else if (s == "x" || s == "X") { + moving_window_dir = 0; + } +#endif + else { + WARPX_ABORT_WITH_MESSAGE("Unknown moving_window_dir: "+s); + } + + utils::parser::getWithParser( + pp_warpx, "moving_window_v", moving_window_v); + moving_window_v *= PhysConst::c; + } +} + void ConvertLabParamsToBoost() { Real gamma_boost = 1., beta_boost = 0.; @@ -196,8 +233,11 @@ void ConvertLabParamsToBoost() { if (boost_direction[dim_map[idim]]) { amrex::Real convert_factor; - // Assume that the window travels with speed +c - convert_factor = 1._rt/( gamma_boost * ( 1 - beta_boost ) ); + amrex::Real beta_window = beta_boost; + if (WarpX::do_moving_window && idim == WarpX::moving_window_dir) { + beta_window = WarpX::moving_window_v / PhysConst::c; + } + convert_factor = 1._rt/( gamma_boost * ( 1 - beta_boost * beta_window ) ); prob_lo[idim] *= convert_factor; prob_hi[idim] *= convert_factor; if (max_level > 0){ diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 7f9288debb7..89254e05c98 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -200,6 +200,10 @@ void WarpX::MakeWarpX () { ParseGeometryInput(); + ReadMovingWindowParameters( + do_moving_window, start_moving_window_step, end_moving_window_step, + moving_window_dir, moving_window_v); + ConvertLabParamsToBoost(); ReadBCParams(); @@ -623,42 +627,11 @@ WarpX::ReadParameters () pp_warpx.query("compute_max_step_from_btd", compute_max_step_from_btd); - pp_warpx.query("do_moving_window", do_moving_window); - if (do_moving_window) - { - utils::parser::queryWithParser( - pp_warpx, "start_moving_window_step", start_moving_window_step); - utils::parser::queryWithParser( - pp_warpx, "end_moving_window_step", end_moving_window_step); - std::string s; - pp_warpx.get("moving_window_dir", s); - - if (s == "z" || s == "Z") { - moving_window_dir = WARPX_ZINDEX; - } -#if defined(WARPX_DIM_3D) - else if (s == "y" || s == "Y") { - moving_window_dir = 1; - } -#endif -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_3D) - else if (s == "x" || s == "X") { - moving_window_dir = 0; - } -#endif - - else { - WARPX_ABORT_WITH_MESSAGE("Unknown moving_window_dir: "+s); - } - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(Geom(0).isPeriodic(moving_window_dir) == 0, - "The problem must be non-periodic in the moving window direction"); - + if (do_moving_window) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + Geom(0).isPeriodic(moving_window_dir) == 0, + "The problem must be non-periodic in the moving window direction"); moving_window_x = geom[0].ProbLo(moving_window_dir); - - utils::parser::getWithParser( - pp_warpx, "moving_window_v", moving_window_v); - moving_window_v *= PhysConst::c; } m_p_ext_field_params = std::make_unique(pp_warpx); From b1aa846a7968bb30c1f49d2129a46efb4891609c Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 30 Sep 2024 08:45:12 -0700 Subject: [PATCH 012/278] AMReX/pyAMReX/PICSAR: Weekly Update (#5310) * AMReX: Weekly Update * pyAMReX: Weekly Update --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- cmake/dependencies/pyAMReX.cmake | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 28bfaaf57a7..ce1f4454345 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -131,7 +131,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 3734079379bb6b2a3850d197241f6b2c3b3bfa7d && cd - + cd ../amrex && git checkout --detach 467dd681af11043304757f11d761cf0661c97e56 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 72642b575e8..498d56f5f81 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -279,7 +279,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "3734079379bb6b2a3850d197241f6b2c3b3bfa7d" +set(WarpX_amrex_branch "467dd681af11043304757f11d761cf0661c97e56" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index e93851443c0..69711866f74 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "41c856b8a588c3c8b04bb35d2d05b56f6ce0dd7f" +set(WarpX_pyamrex_branch "1c66690f83244196c5655293f1381303a7d1589d" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") From 84d85ee07144379cce999a37cdef3346d1da40bc Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 30 Sep 2024 13:52:11 -0700 Subject: [PATCH 013/278] AMReX: Weekly Update (#5343) --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index ce1f4454345..2bc5d35bb4a 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -131,7 +131,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 467dd681af11043304757f11d761cf0661c97e56 && cd - + cd ../amrex && git checkout --detach 103d0e9f7c7e7fbedbeb61512d3f37f3e2b03770 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 498d56f5f81..7524d919c61 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -279,7 +279,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "467dd681af11043304757f11d761cf0661c97e56" +set(WarpX_amrex_branch "103d0e9f7c7e7fbedbeb61512d3f37f3e2b03770" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") From d1a338e90ed1ad7ac2f010f47409aa48a2265c88 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 30 Sep 2024 14:16:50 -0700 Subject: [PATCH 014/278] Poisson `computePhi`: Simplify Boundary Handler (#5346) Move the boundary handler to become an optional argument, which otherwise defaults to Dirichlet conditions, e.g., in non-EB cases. This simplifies the ImpactX implementation and fixes a linker issue with CUDA for ImpactX. --- .../ElectrostaticSolver.cpp | 2 +- Source/ablastr/fields/PoissonSolver.H | 39 +++++++++++++------ 2 files changed, 29 insertions(+), 12 deletions(-) diff --git a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp index 1ced0a07152..0b1dca675be 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp @@ -201,12 +201,12 @@ ElectrostaticSolver::computePhi ( warpx.DistributionMap(), warpx.boxArray(), WarpX::grid_type, - *m_poisson_boundary_handler, is_solver_igf_on_lev0, EB::enabled(), WarpX::do_single_precision_comms, warpx.refRatio(), post_phi_calculation, + *m_poisson_boundary_handler, warpx.gett_new(0), eb_farray_box_factory ); diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index 8b4f9cea9a1..d7eeecead1b 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -164,8 +164,8 @@ inline void interpolatePhiBetweenLevels ( * \vec{\nabla}^2 r \phi - (\vec{\beta}\cdot\vec{\nabla})^2 r \phi = -\frac{r \rho}{\epsilon_0} * \f] * - * \tparam T_BoundaryHandler handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler * \tparam T_PostPhiCalculationFunctor a calculation per level directly after phi was calculated + * \tparam T_BoundaryHandler handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler (EB ONLY) * \tparam T_FArrayBoxFactory usually nothing or an amrex::EBFArrayBoxFactory (EB ONLY) * \param[in] rho The charge density a given species * \param[out] phi The potential to be computed by this function @@ -188,8 +188,8 @@ inline void interpolatePhiBetweenLevels ( * \param[in] eb_farray_box_factory a factory for field data, @see amrex::EBFArrayBoxFactory; required for embedded boundaries (default: none) */ template< - typename T_BoundaryHandler, typename T_PostPhiCalculationFunctor = std::nullopt_t, + typename T_BoundaryHandler = std::nullopt_t, typename T_FArrayBoxFactory = void > void @@ -205,12 +205,12 @@ computePhi ( amrex::Vector const& dmap, amrex::Vector const& grids, utils::enums::GridType grid_type, - T_BoundaryHandler const boundary_handler, bool is_solver_igf_on_lev0, bool eb_enabled = false, bool do_single_precision_comms = false, std::optional > rel_ref_ratio = std::nullopt, [[maybe_unused]] T_PostPhiCalculationFunctor post_phi_calculation = std::nullopt, + [[maybe_unused]] T_BoundaryHandler const boundary_handler = std::nullopt, // only used for EB [[maybe_unused]] std::optional current_time = std::nullopt, // only used for EB [[maybe_unused]] std::optional > eb_farray_box_factory = std::nullopt // only used for EB ) @@ -349,12 +349,18 @@ computePhi ( #endif #if defined(AMREX_USE_EB) if (eb_enabled) { - // if the EB potential only depends on time, the potential can be passed - // as a float instead of a callable - if (boundary_handler.phi_EB_only_t) { - linop_nodelap->setEBDirichlet(boundary_handler.potential_eb_t(current_time.value())); - } else { - linop_nodelap->setEBDirichlet(boundary_handler.getPhiEB(current_time.value())); + if constexpr (!std::is_same_v) { + // if the EB potential only depends on time, the potential can be passed + // as a float instead of a callable + if (boundary_handler.phi_EB_only_t) { + linop_nodelap->setEBDirichlet(boundary_handler.potential_eb_t(current_time.value())); + } else { + linop_nodelap->setEBDirichlet(boundary_handler.getPhiEB(current_time.value())); + } + } else + { + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, + "EB Poisson solver enabled but no 'boundary_handler' passed!"); } } #endif @@ -372,9 +378,20 @@ computePhi ( linop = std::move(linop_tenslap); } - // Solve the Poisson equation - linop->setDomainBC(boundary_handler.lobc, boundary_handler.hibc); + // Level 0 domain boundary + if constexpr (std::is_same_v) { + amrex::Array const lobc = {AMREX_D_DECL( + amrex::LinOpBCType::Dirichlet, + amrex::LinOpBCType::Dirichlet, + amrex::LinOpBCType::Dirichlet + )}; + amrex::Array const hibc = lobc; + linop->setDomainBC(lobc, hibc); + } else { + linop->setDomainBC(boundary_handler.lobc, boundary_handler.hibc); + } + // Solve the Poisson equation amrex::MLMG mlmg(*linop); // actual solver defined here mlmg.setVerbose(verbosity); mlmg.setMaxIter(max_iters); From 2f2b66787fe1405f9cf6db43853832cdcbb673a4 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 30 Sep 2024 16:09:28 -0700 Subject: [PATCH 015/278] [pre-commit.ci] pre-commit autoupdate (#5347) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.7 → v0.6.8](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.7...v0.6.8) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ae8881150c9..d2b15b8af95 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.7 + rev: v0.6.8 hooks: # Run the linter - id: ruff From 3b447589c7eb167bc71fe06b1478bb39306750f4 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 30 Sep 2024 18:44:32 -0700 Subject: [PATCH 016/278] Doc: HPC no heFFTe yet (#5348) Do not default-advertise to enable heFFTe on Perlmutter and Lonestar yet. Introduce user-facing default for all HPC systems at a later point when it makes more sense. --- Docs/source/install/hpc/lonestar6.rst | 4 ++-- Docs/source/install/hpc/perlmutter.rst | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Docs/source/install/hpc/lonestar6.rst b/Docs/source/install/hpc/lonestar6.rst index 81795545da3..f1512e4a508 100644 --- a/Docs/source/install/hpc/lonestar6.rst +++ b/Docs/source/install/hpc/lonestar6.rst @@ -90,7 +90,7 @@ Use the following :ref:`cmake commands ` to compile the applicat cd $HOME/src/warpx rm -rf build_pm_gpu - cmake -S . -B build_gpu -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_gpu -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_gpu -j 16 The WarpX application executables are now in ``$HOME/src/warpx/build_gpu/bin/``. @@ -101,7 +101,7 @@ Additionally, the following commands will install WarpX as a Python module: cd $HOME/src/warpx rm -rf build_pm_gpu_py - cmake -S . -B build_gpu_py -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_gpu_py -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_gpu_py -j 16 --target pip_install Now, you can :ref:`submit Lonestar6 compute jobs ` for WarpX :ref:`Python (PICMI) scripts ` (:ref:`example scripts `). diff --git a/Docs/source/install/hpc/perlmutter.rst b/Docs/source/install/hpc/perlmutter.rst index dc5a985e99f..9612b64476d 100644 --- a/Docs/source/install/hpc/perlmutter.rst +++ b/Docs/source/install/hpc/perlmutter.rst @@ -153,7 +153,7 @@ Use the following :ref:`cmake commands ` to compile the applicat cd $HOME/src/warpx rm -rf build_pm_gpu - cmake -S . -B build_pm_gpu -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_pm_gpu -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_pm_gpu -j 16 The WarpX application executables are now in ``$HOME/src/warpx/build_pm_gpu/bin/``. @@ -164,7 +164,7 @@ Use the following :ref:`cmake commands ` to compile the applicat cd $HOME/src/warpx rm -rf build_pm_gpu_py - cmake -S . -B build_pm_gpu_py -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_pm_gpu_py -DWarpX_COMPUTE=CUDA -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_pm_gpu_py -j 16 --target pip_install .. tab-item:: CPU Nodes @@ -174,7 +174,7 @@ Use the following :ref:`cmake commands ` to compile the applicat cd $HOME/src/warpx rm -rf build_pm_cpu - cmake -S . -B build_pm_cpu -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_pm_cpu -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_pm_cpu -j 16 The WarpX application executables are now in ``$HOME/src/warpx/build_pm_cpu/bin/``. @@ -184,7 +184,7 @@ Use the following :ref:`cmake commands ` to compile the applicat rm -rf build_pm_cpu_py - cmake -S . -B build_pm_cpu_py -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_HEFFTE=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" + cmake -S . -B build_pm_cpu_py -DWarpX_COMPUTE=OMP -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_pm_cpu_py -j 16 --target pip_install Now, you can :ref:`submit Perlmutter compute jobs ` for WarpX :ref:`Python (PICMI) scripts ` (:ref:`example scripts `). From 617d7bafd1ed1740494cecba47c536c5fb0fae4a Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 30 Sep 2024 23:48:41 -0700 Subject: [PATCH 017/278] Doc: NVTX in Nvidia Conda (#5345) The package `cuda-nvtx-dev` does not seem to be part of the `cuda` package. https://anaconda.org/nvidia/repo This fixes: ``` CMake Error at build/_deps/fetchedamrex-src/Tools/CMake/AMReXParallelBackends.cmake:71 (target_link_libraries): Target "amrex_3d" links to: CUDA::nvToolsExt but the target was not found. Possible reasons include: * There is a typo in the target name. * A find_package call is missing for an IMPORTED target. * An ALIAS target is missing. Call Stack (most recent call first): build/_deps/fetchedamrex-src/Src/CMakeLists.txt:40 (include) ``` --- Docs/source/install/dependencies.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index 72c599ae2bd..71a607eae6a 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -117,7 +117,7 @@ For Nvidia CUDA GPU support, you will need to have `a recent CUDA driver install .. code-block:: bash - conda install -c nvidia -c conda-forge cuda cupy + conda install -c nvidia -c conda-forge cuda cuda-nvtx-dev cupy More info for `CUDA-enabled ML packages `__. From 2d61720395eaf520e51784eb49bec0e994477e6c Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Tue, 1 Oct 2024 00:29:12 -0700 Subject: [PATCH 018/278] Use only plasma current in `HybridPICSolveE` (#5273) * use plasma current rather than total current in `HybridPICSolveE` * remove logic to subtract J_ext from plasma current in `JdispFunctor` * add one ghost cell to the hybrid-pic external current since we interpolate to a nodal grid * Fix Doxygen Signed-off-by: roelof-groenewald --- ..._ohm_solver_magnetic_reconnection_picmi.py | 2 +- Python/pywarpx/fields.py | 14 ++-- .../ComputeDiagFunctors/JdispFunctor.cpp | 56 ++------------ .../FiniteDifferenceSolver.H | 22 +++--- .../HybridPICModel/HybridPICModel.H | 10 +-- .../HybridPICModel/HybridPICModel.cpp | 76 ++++++++++--------- .../HybridPICSolveE.cpp | 39 ++++------ .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 2 +- Source/Fields.H | 12 +-- 9 files changed, 92 insertions(+), 141 deletions(-) diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py b/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py index 4f13c76e208..f074c81cbb3 100644 --- a/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py @@ -303,7 +303,7 @@ def check_fields(self): rho = fields.RhoFPWrapper(include_ghosts=False)[:, :] Jiy = fields.JyFPWrapper(include_ghosts=False)[...] / self.J0 - Jy = fields.JyFPAmpereWrapper(include_ghosts=False)[...] / self.J0 + Jy = fields.JyFPPlasmaWrapper(include_ghosts=False)[...] / self.J0 Bx = fields.BxFPWrapper(include_ghosts=False)[...] / self.B0 By = fields.ByFPWrapper(include_ghosts=False)[...] / self.B0 Bz = fields.BzFPWrapper(include_ghosts=False)[...] / self.B0 diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index 0100f64f261..5d3b892b543 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -33,7 +33,7 @@ ExFPPMLWrapper, EyFPPMLWrapper, EzFPPMLWrapper BxFPPMLWrapper, ByFPPMLWrapper, BzFPPMLWrapper JxFPPMLWrapper, JyFPPMLWrapper, JzFPPMLWrapper -JxFPAmpereWrapper, JyFPAmpereWrapper, JzFPAmpereWrapper +JxFPPlasmaWrapper, JyFPPlasmaWrapper, JzFPPlasmaWrapper FFPPMLWrapper, GFPPMLWrapper ExCPPMLWrapper, EyCPPMLWrapper, EzCPPMLWrapper @@ -873,27 +873,27 @@ def FaceAreaszWrapper(level=0, include_ghosts=False): ) -def JxFPAmpereWrapper(level=0, include_ghosts=False): +def JxFPPlasmaWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="hybrid_current_fp_ampere", + mf_name="hybrid_current_fp_plasma", idir=0, level=level, include_ghosts=include_ghosts, ) -def JyFPAmpereWrapper(level=0, include_ghosts=False): +def JyFPPlasmaWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="hybrid_current_fp_ampere", + mf_name="hybrid_current_fp_plasma", idir=1, level=level, include_ghosts=include_ghosts, ) -def JzFPAmpereWrapper(level=0, include_ghosts=False): +def JzFPPlasmaWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( - mf_name="hybrid_current_fp_ampere", + mf_name="hybrid_current_fp_plasma", idir=2, level=level, include_ghosts=include_ghosts, diff --git a/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp index b4f286506a8..e06f90b5f0c 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/JdispFunctor.cpp @@ -1,8 +1,11 @@ -/* This file is part of Warpx. +/* Copyright 2023-2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Avigdor Veksler (TAE Technologies) * - * Authors: Avigdor Veksler * License: BSD-3-Clause-LBNL -*/ + */ #include "JdispFunctor.H" #include "WarpX.H" @@ -40,7 +43,7 @@ JdispFunctor::operator() (amrex::MultiFab& mf_dst, int dcomp, const int /*i_buff AMREX_ASSUME(hybrid_pic_model != nullptr); /** pointer to current calculated from Ampere's Law (Jamp) multifab */ - amrex::MultiFab* mf_curlB = warpx.m_fields.get(FieldType::hybrid_current_fp_ampere, Direction{m_dir}, m_lev); + amrex::MultiFab* mf_curlB = warpx.m_fields.get(FieldType::hybrid_current_fp_plasma, Direction{m_dir}, m_lev); //if (!hybrid_pic_model) { // To finish this implementation, we need to implement a method to @@ -63,51 +66,6 @@ JdispFunctor::operator() (amrex::MultiFab& mf_dst, int dcomp, const int /*i_buff -1, *mf_j, 0, 0, 1, Jdisp.nGrowVect() ); - if (hybrid_pic_model) { - // Subtract the interpolated j_external value from j_displacement. - /** pointer to external currents (Jext) multifab */ - amrex::MultiFab* mf_j_external = warpx.m_fields.get(FieldType::hybrid_current_fp_external, Direction{m_dir}, m_lev); - - // Index type required for interpolating Jext from their respective - // staggering (nodal) to the Jx_displacement, Jy_displacement, Jz_displacement - // locations. The staggering of J_displacement is the same as the - // staggering for J, so we use J_stag as the interpolation map. - // For interp to work below, the indices of the undefined dimensions - // must match. We set them as (1,1,1). - amrex::GpuArray Jext_IndexType = {1, 1, 1}; - amrex::GpuArray J_IndexType = {1, 1, 1}; - amrex::IntVect Jext_stag = mf_j_external->ixType().toIntVect(); - amrex::IntVect J_stag = mf_j->ixType().toIntVect(); - - // Index types for the dimensions simulated are overwritten. - for ( int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - Jext_IndexType[idim] = Jext_stag[idim]; - J_IndexType[idim] = J_stag[idim]; - } - - // Parameters for `interp` that maps from Jext to J. - // The "coarsening is just 1 i.e. no coarsening" - amrex::GpuArray const& coarsen = {1, 1, 1}; - - // Loop through the grids, and over the tiles within each grid to - // subtract the interpolated Jext from J_displacement. -#ifdef AMREX_USE_OMP -#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) -#endif - for ( MFIter mfi(Jdisp, TilingIfNotGPU()); mfi.isValid(); ++mfi ) { - - Array4 const& Jdisp_arr = Jdisp.array(mfi); - Array4 const& Jext = mf_j_external->const_array(mfi); - - // Loop over cells and update the Jdisp MultiFab - amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // Interpolate Jext to the staggering of J - auto const jext_interp = ablastr::coarsen::sample::Interp(Jext, Jext_IndexType, J_IndexType, coarsen, i, j, k, 0); - Jdisp_arr(i, j, k, 0) -= jext_interp; - }); - } - } - InterpolateMFForDiag(mf_dst, Jdisp, dcomp, warpx.DistributionMap(m_lev), m_convertRZmodes2cartesian); } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 03f51f7ba62..45c06584fda 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -141,9 +141,8 @@ class FiniteDifferenceSolver * https://link.springer.com/chapter/10.1007/3-540-36530-3_8 * * \param[out] Efield vector of electric field MultiFabs updated at a given level - * \param[in] Jfield vector of total current MultiFabs at a given level + * \param[in] Jfield vector of total plasma current MultiFabs at a given level * \param[in] Jifield vector of ion current density MultiFabs at a given level - * \param[in] Jextfield vector of external current density MultiFabs at a given level * \param[in] Bfield vector of magnetic field MultiFabs at a given level * \param[in] rhofield scalar ion charge density Multifab at a given level * \param[in] Pefield scalar electron pressure MultiFab at a given level @@ -153,15 +152,14 @@ class FiniteDifferenceSolver * \param[in] solve_for_Faraday boolean flag for whether the E-field is solved to be used in Faraday's equation */ void HybridPICSolveE ( ablastr::fields::VectorField const& Efield, - ablastr::fields::VectorField & Jfield, - ablastr::fields::VectorField const& Jifield, - ablastr::fields::VectorField const& Jextfield, - ablastr::fields::VectorField const& Bfield, - amrex::MultiFab const& rhofield, - amrex::MultiFab const& Pefield, - ablastr::fields::VectorField const& edge_lengths, - int lev, HybridPICModel const* hybrid_model, - bool solve_for_Faraday ); + ablastr::fields::VectorField & Jfield, + ablastr::fields::VectorField const& Jifield, + ablastr::fields::VectorField const& Bfield, + amrex::MultiFab const& rhofield, + amrex::MultiFab const& Pefield, + ablastr::fields::VectorField const& edge_lengths, + int lev, HybridPICModel const* hybrid_model, + bool solve_for_Faraday ); /** * \brief Calculation of total current using Ampere's law (without @@ -241,7 +239,6 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Jifield, - ablastr::fields::VectorField const& Jextfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, @@ -346,7 +343,6 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Jifield, - ablastr::fields::VectorField const& Jextfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index b0f63dd8018..ec4a53b2edd 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -63,18 +63,18 @@ public: /** * \brief - * Function to calculate the total current based on Ampere's law while - * neglecting displacement current (J = curl x B). Used in the Ohm's law - * solver (kinetic-fluid hybrid model). + * Function to calculate the total plasma current based on Ampere's law while + * neglecting displacement current (J = curl x B). Any external current is + * subtracted as well. Used in the Ohm's law solver (kinetic-fluid hybrid model). * * \param[in] Bfield Magnetic field from which the current is calculated. * \param[in] edge_lengths Length of cell edges taking embedded boundaries into account */ - void CalculateCurrentAmpere ( + void CalculatePlasmaCurrent ( ablastr::fields::MultiLevelVectorField const& Bfield, ablastr::fields::MultiLevelVectorField const& edge_lengths ); - void CalculateCurrentAmpere ( + void CalculatePlasmaCurrent ( ablastr::fields::VectorField const& Bfield, ablastr::fields::VectorField const& edge_lengths, int lev diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index dbf56a0e899..d7d6a43b4d5 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -67,18 +67,18 @@ void HybridPICModel::AllocateLevelMFs (ablastr::fields::MultiFabRegister & field // The "hybrid_electron_pressure_fp" multifab stores the electron pressure calculated // from the specified equation of state. - // The "hybrid_rho_fp_temp" multifab is used to store the ion charge density - // interpolated or extrapolated to appropriate timesteps. - // The "hybrid_current_fp_temp" multifab is used to store the ion current density - // interpolated or extrapolated to appropriate timesteps. - // The "hybrid_current_fp_ampere" multifab stores the total current calculated as - // the curl of B. fields.alloc_init(FieldType::hybrid_electron_pressure_fp, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngRho, 0.0_rt); + + // The "hybrid_rho_fp_temp" multifab is used to store the ion charge density + // interpolated or extrapolated to appropriate timesteps. fields.alloc_init(FieldType::hybrid_rho_fp_temp, lev, amrex::convert(ba, rho_nodal_flag), dm, ncomps, ngRho, 0.0_rt); + + // The "hybrid_current_fp_temp" multifab is used to store the ion current density + // interpolated or extrapolated to appropriate timesteps. fields.alloc_init(FieldType::hybrid_current_fp_temp, Direction{0}, lev, amrex::convert(ba, jx_nodal_flag), dm, ncomps, ngJ, 0.0_rt); @@ -89,28 +89,29 @@ void HybridPICModel::AllocateLevelMFs (ablastr::fields::MultiFabRegister & field lev, amrex::convert(ba, jz_nodal_flag), dm, ncomps, ngJ, 0.0_rt); - fields.alloc_init(FieldType::hybrid_current_fp_ampere, Direction{0}, + // The "hybrid_current_fp_plasma" multifab stores the total plasma current calculated + // as the curl of B minus any external current. + fields.alloc_init(FieldType::hybrid_current_fp_plasma, Direction{0}, lev, amrex::convert(ba, jx_nodal_flag), dm, ncomps, ngJ, 0.0_rt); - fields.alloc_init(FieldType::hybrid_current_fp_ampere, Direction{1}, + fields.alloc_init(FieldType::hybrid_current_fp_plasma, Direction{1}, lev, amrex::convert(ba, jy_nodal_flag), dm, ncomps, ngJ, 0.0_rt); - fields.alloc_init(FieldType::hybrid_current_fp_ampere, Direction{2}, + fields.alloc_init(FieldType::hybrid_current_fp_plasma, Direction{2}, lev, amrex::convert(ba, jz_nodal_flag), dm, ncomps, ngJ, 0.0_rt); - // the external current density multifab is made nodal to avoid needing to interpolate - // to a nodal grid as has to be done for the ion and total current density multifabs - // this also allows the external current multifab to not have any ghost cells + // the external current density multifab matches the current staggering and + // one ghost cell is used since we interpolate the current to a nodal grid fields.alloc_init(FieldType::hybrid_current_fp_external, Direction{0}, - lev, amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), - dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), 0.0_rt); + lev, amrex::convert(ba, jx_nodal_flag), + dm, ncomps, IntVect(1), 0.0_rt); fields.alloc_init(FieldType::hybrid_current_fp_external, Direction{1}, - lev, amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), - dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), 0.0_rt); + lev, amrex::convert(ba, jy_nodal_flag), + dm, ncomps, IntVect(1), 0.0_rt); fields.alloc_init(FieldType::hybrid_current_fp_external, Direction{2}, - lev, amrex::convert(ba, IntVect(AMREX_D_DECL(1,1,1))), - dm, ncomps, IntVect(AMREX_D_DECL(0,0,0)), 0.0_rt); + lev, amrex::convert(ba, jz_nodal_flag), + dm, ncomps, IntVect(1), 0.0_rt); #ifdef WARPX_DIM_RZ WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -352,7 +353,7 @@ void HybridPICModel::GetCurrentExternal ( const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; #endif // Initialize the y-component of the field. - mfyfab(i,j,k) = Jy_external(x,y,z,t); + mfyfab(i,j,k) = Jy_external(x,y,z,t); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { // skip if node is covered by an embedded boundary @@ -384,35 +385,44 @@ void HybridPICModel::GetCurrentExternal ( } } -void HybridPICModel::CalculateCurrentAmpere ( +void HybridPICModel::CalculatePlasmaCurrent ( ablastr::fields::MultiLevelVectorField const& Bfield, ablastr::fields::MultiLevelVectorField const& edge_lengths) { auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - CalculateCurrentAmpere(Bfield[lev], edge_lengths[lev], lev); + CalculatePlasmaCurrent(Bfield[lev], edge_lengths[lev], lev); } } -void HybridPICModel::CalculateCurrentAmpere ( +void HybridPICModel::CalculatePlasmaCurrent ( ablastr::fields::VectorField const& Bfield, ablastr::fields::VectorField const& edge_lengths, const int lev) { - WARPX_PROFILE("WarpX::CalculateCurrentAmpere()"); + WARPX_PROFILE("HybridPICModel::CalculatePlasmaCurrent()"); auto& warpx = WarpX::GetInstance(); - ablastr::fields::VectorField current_fp_ampere = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_ampere, lev); + ablastr::fields::VectorField current_fp_plasma = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_plasma, lev); warpx.get_pointer_fdtd_solver_fp(lev)->CalculateCurrentAmpere( - current_fp_ampere, Bfield, edge_lengths, lev + current_fp_plasma, Bfield, edge_lengths, lev ); // we shouldn't apply the boundary condition to J since J = J_i - J_e but // the boundary correction was already applied to J_i and the B-field // boundary ensures that J itself complies with the boundary conditions, right? // ApplyJfieldBoundary(lev, Jfield[0].get(), Jfield[1].get(), Jfield[2].get()); - for (int i=0; i<3; i++) { current_fp_ampere[i]->FillBoundary(warpx.Geom(lev).periodicity()); } + for (int i=0; i<3; i++) { current_fp_plasma[i]->FillBoundary(warpx.Geom(lev).periodicity()); } + + // Subtract external current from "Ampere" current calculated above. Note + // we need to include 1 ghost cell since later we will interpolate the + // plasma current to a nodal grid. + ablastr::fields::VectorField current_fp_external = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_external, lev); + for (int i=0; i<3; i++) { + current_fp_plasma[i]->minus(*current_fp_external[i], 0, 1, 1); + } + } void HybridPICModel::HybridPICSolveE ( @@ -463,19 +473,15 @@ void HybridPICModel::HybridPICSolveE ( const int lev, PatchType patch_type, const bool solve_for_Faraday) const { - auto& warpx = WarpX::GetInstance(); - ablastr::fields::VectorField current_fp_ampere = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_ampere, lev); - const ablastr::fields::VectorField current_fp_external = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_external, lev); + ablastr::fields::VectorField current_fp_plasma = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_plasma, lev); const ablastr::fields::ScalarField electron_pressure_fp = warpx.m_fields.get(FieldType::hybrid_electron_pressure_fp, lev); // Solve E field in regular cells warpx.get_pointer_fdtd_solver_fp(lev)->HybridPICSolveE( - Efield, current_fp_ampere, Jfield, current_fp_external, - Bfield, rhofield, - *electron_pressure_fp, - edge_lengths, lev, this, solve_for_Faraday + Efield, current_fp_plasma, Jfield, Bfield, rhofield, + *electron_pressure_fp, edge_lengths, lev, this, solve_for_Faraday ); warpx.ApplyEfieldBoundary(lev, patch_type); } @@ -679,8 +685,8 @@ void HybridPICModel::FieldPush ( { auto& warpx = WarpX::GetInstance(); - // Calculate J = curl x B / mu0 - CalculateCurrentAmpere(Bfield, edge_lengths); + // Calculate J = curl x B / mu0 - J_ext + CalculatePlasmaCurrent(Bfield, edge_lengths); // Calculate the E-field from Ohm's law HybridPICSolveE(Efield, Jfield, Bfield, rhofield, edge_lengths, true); warpx.FillBoundaryE(ng, nodal_sync); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 34a84756203..76fedbf4dea 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -354,7 +354,6 @@ void FiniteDifferenceSolver::HybridPICSolveE ( ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField& Jfield, ablastr::fields::VectorField const& Jifield, - ablastr::fields::VectorField const& Jextfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, @@ -368,14 +367,14 @@ void FiniteDifferenceSolver::HybridPICSolveE ( #ifdef WARPX_DIM_RZ HybridPICSolveECylindrical ( - Efield, Jfield, Jifield, Jextfield, Bfield, rhofield, Pefield, + Efield, Jfield, Jifield, Bfield, rhofield, Pefield, edge_lengths, lev, hybrid_model, solve_for_Faraday ); #else HybridPICSolveECartesian ( - Efield, Jfield, Jifield, Jextfield, Bfield, rhofield, Pefield, + Efield, Jfield, Jifield, Bfield, rhofield, Pefield, edge_lengths, lev, hybrid_model, solve_for_Faraday ); @@ -392,7 +391,6 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Jifield, - ablastr::fields::VectorField const& Jextfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, @@ -471,9 +469,6 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& Jir = Jifield[0]->const_array(mfi); Array4 const& Jit = Jifield[1]->const_array(mfi); Array4 const& Jiz = Jifield[2]->const_array(mfi); - Array4 const& Jextr = Jextfield[0]->const_array(mfi); - Array4 const& Jextt = Jextfield[1]->const_array(mfi); - Array4 const& Jextz = Jextfield[2]->const_array(mfi); Array4 const& Br = Bfield[0]->const_array(mfi); Array4 const& Bt = Bfield[1]->const_array(mfi); Array4 const& Bz = Bfield[2]->const_array(mfi); @@ -498,16 +493,16 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // calculate enE = (J - Ji) x B enE_nodal(i, j, 0, 0) = ( - (jt_interp - jit_interp - Jextt(i, j, 0)) * Bz_interp - - (jz_interp - jiz_interp - Jextz(i, j, 0)) * Bt_interp + (jt_interp - jit_interp) * Bz_interp + - (jz_interp - jiz_interp) * Bt_interp ); enE_nodal(i, j, 0, 1) = ( - (jz_interp - jiz_interp - Jextz(i, j, 0)) * Br_interp - - (jr_interp - jir_interp - Jextr(i, j, 0)) * Bz_interp + (jz_interp - jiz_interp) * Br_interp + - (jr_interp - jir_interp) * Bz_interp ); enE_nodal(i, j, 0, 2) = ( - (jr_interp - jir_interp - Jextr(i, j, 0)) * Bt_interp - - (jt_interp - jit_interp - Jextt(i, j, 0)) * Br_interp + (jr_interp - jir_interp) * Bt_interp + - (jt_interp - jit_interp) * Br_interp ); }); @@ -707,7 +702,6 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Jifield, - ablastr::fields::VectorField const& Jextfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, @@ -780,9 +774,6 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& Jix = Jifield[0]->const_array(mfi); Array4 const& Jiy = Jifield[1]->const_array(mfi); Array4 const& Jiz = Jifield[2]->const_array(mfi); - Array4 const& Jextx = Jextfield[0]->const_array(mfi); - Array4 const& Jexty = Jextfield[1]->const_array(mfi); - Array4 const& Jextz = Jextfield[2]->const_array(mfi); Array4 const& Bx = Bfield[0]->const_array(mfi); Array4 const& By = Bfield[1]->const_array(mfi); Array4 const& Bz = Bfield[2]->const_array(mfi); @@ -790,7 +781,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Loop over the cells and update the nodal E field amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // interpolate the total current to a nodal grid + // interpolate the total plasma current to a nodal grid auto const jx_interp = Interp(Jx, Jx_stag, nodal, coarsen, i, j, k, 0); auto const jy_interp = Interp(Jy, Jy_stag, nodal, coarsen, i, j, k, 0); auto const jz_interp = Interp(Jz, Jz_stag, nodal, coarsen, i, j, k, 0); @@ -807,16 +798,16 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // calculate enE = (J - Ji) x B enE_nodal(i, j, k, 0) = ( - (jy_interp - jiy_interp - Jexty(i, j, k)) * Bz_interp - - (jz_interp - jiz_interp - Jextz(i, j, k)) * By_interp + (jy_interp - jiy_interp) * Bz_interp + - (jz_interp - jiz_interp) * By_interp ); enE_nodal(i, j, k, 1) = ( - (jz_interp - jiz_interp - Jextz(i, j, k)) * Bx_interp - - (jx_interp - jix_interp - Jextx(i, j, k)) * Bz_interp + (jz_interp - jiz_interp) * Bx_interp + - (jx_interp - jix_interp) * Bz_interp ); enE_nodal(i, j, k, 2) = ( - (jx_interp - jix_interp - Jextx(i, j, k)) * By_interp - - (jy_interp - jiy_interp - Jexty(i, j, k)) * Bx_interp + (jx_interp - jix_interp) * By_interp + - (jy_interp - jiy_interp) * Bx_interp ); }); diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 556b8f8fca4..be2d40459ac 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -165,7 +165,7 @@ void WarpX::HybridPICEvolveFields () m_hybrid_pic_model->CalculateElectronPressure(); // Update the E field to t=n+1 using the extrapolated J_i^n+1 value - m_hybrid_pic_model->CalculateCurrentAmpere( + m_hybrid_pic_model->CalculatePlasmaCurrent( m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level)); m_hybrid_pic_model->HybridPICSolveE( diff --git a/Source/Fields.H b/Source/Fields.H index f85b6c4584c..b07661254c4 100644 --- a/Source/Fields.H +++ b/Source/Fields.H @@ -45,11 +45,11 @@ namespace warpx::fields vector_potential_fp_nodal, vector_potential_grad_buf_e_stag, vector_potential_grad_buf_b_stag, - hybrid_electron_pressure_fp, - hybrid_rho_fp_temp, - hybrid_current_fp_temp, - hybrid_current_fp_ampere, - hybrid_current_fp_external, + hybrid_electron_pressure_fp, /**< Used with Ohm's law solver. Stores the electron pressure */ + hybrid_rho_fp_temp, /**< Used with Ohm's law solver. Stores the time interpolated/extrapolated charge density */ + hybrid_current_fp_temp, /**< Used with Ohm's law solver. Stores the time interpolated/extrapolated current density */ + hybrid_current_fp_plasma, /**< Used with Ohm's law solver. Stores plasma current calculated as J_plasma = curl x B / mu0 - J_ext */ + hybrid_current_fp_external, /**< Used with Ohm's law solver. Stores external current */ Efield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ Bfield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ current_cp, /**< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level */ @@ -100,7 +100,7 @@ namespace warpx::fields FieldType::vector_potential_grad_buf_e_stag, FieldType::vector_potential_grad_buf_b_stag, FieldType::hybrid_current_fp_temp, - FieldType::hybrid_current_fp_ampere, + FieldType::hybrid_current_fp_plasma, FieldType::hybrid_current_fp_external, FieldType::Efield_cp, FieldType::Bfield_cp, From 025f70ec1cfea5e687517b0516bfeb794250b722 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Tue, 1 Oct 2024 17:34:34 -0700 Subject: [PATCH 019/278] CI: avoid duplicate runs for secondary branches on main fork (#5308) When `pre-commit` gets auto-updated, we typically see a PR from the branch `pre-commit-ci-update-config`. This branch is created directly in the main fork (unlike the individual branches that WarpX contributors create from their own forks) and all CI checks run twice: - once for the activity on the PR that `pre-commit` automatically opens (these CI checks are labeled "PR automated") - once for the activity on the branch `pre-commit-ci-update-config` (these CI checks are labeled "individual CI") Here's an example: ![Screenshot from 2024-09-23 15-54-57](https://github.com/user-attachments/assets/836476f4-e6f6-4ca8-92a6-128050727ed5) On top of this, once the PR is merged, CI runs a third time, because the merge is counted as activity on the branch `development` (again "individual CI"). We should be able to safely skip "individual CI" for the activity on the branch `pre-commit-ci-update-config`. This PR should do the trick, although it's good to double check the syntax for GitHub Actions and Azure pipelines. My understanding is that the cleanup-cache and post-PR workflows don't need to be updated, also to be double checked. --------- Co-authored-by: Axel Huebl --- .azure-pipelines.yml | 3 +++ .github/workflows/clang_sanitizers.yml | 6 +++++- .github/workflows/clang_tidy.yml | 6 +++++- .github/workflows/cuda.yml | 6 +++++- .github/workflows/hip.yml | 6 +++++- .github/workflows/insitu.yml | 6 +++++- .github/workflows/intel.yml | 6 +++++- .github/workflows/macos.yml | 6 +++++- .github/workflows/source.yml | 6 +++++- .github/workflows/ubuntu.yml | 6 +++++- .github/workflows/windows.yml | 6 +++++- 11 files changed, 53 insertions(+), 10 deletions(-) diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index 1355dc2f647..a32ecb8fa24 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -6,6 +6,9 @@ pool: pr: autoCancel: true drafts: false + branches: + include: + - development jobs: - job: diff --git a/.github/workflows/clang_sanitizers.yml b/.github/workflows/clang_sanitizers.yml index 119a893eb72..48d8068c80c 100644 --- a/.github/workflows/clang_sanitizers.yml +++ b/.github/workflows/clang_sanitizers.yml @@ -1,6 +1,10 @@ name: 🧴 clang sanitizers -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-clangsanitizers diff --git a/.github/workflows/clang_tidy.yml b/.github/workflows/clang_tidy.yml index 5a4f83f01f1..9088e3af134 100644 --- a/.github/workflows/clang_tidy.yml +++ b/.github/workflows/clang_tidy.yml @@ -1,6 +1,10 @@ name: 🧹 clang-tidy -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-clangtidy diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 2bc5d35bb4a..8d6cc45d714 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -1,6 +1,10 @@ name: 🐧 CUDA -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-cuda diff --git a/.github/workflows/hip.yml b/.github/workflows/hip.yml index ba537e776d4..12513caa19a 100644 --- a/.github/workflows/hip.yml +++ b/.github/workflows/hip.yml @@ -1,6 +1,10 @@ name: 🐧 HIP -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-hip diff --git a/.github/workflows/insitu.yml b/.github/workflows/insitu.yml index 35f16842935..0cc6a1ced5e 100644 --- a/.github/workflows/insitu.yml +++ b/.github/workflows/insitu.yml @@ -1,6 +1,10 @@ name: 🐧 In Situ Vis -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-insituvis diff --git a/.github/workflows/intel.yml b/.github/workflows/intel.yml index 4d0b9ebe9c6..1365fa76865 100644 --- a/.github/workflows/intel.yml +++ b/.github/workflows/intel.yml @@ -1,6 +1,10 @@ name: 🐧 Intel -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-intel diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 596920a3911..463b2dc2501 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -1,6 +1,10 @@ name: 🍏 macOS -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-macos diff --git a/.github/workflows/source.yml b/.github/workflows/source.yml index 7a2086cfdff..b97afe016c0 100644 --- a/.github/workflows/source.yml +++ b/.github/workflows/source.yml @@ -6,7 +6,11 @@ name: 📜 Source -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-source diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 68d2b2156e9..a14f66b8cd9 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -1,6 +1,10 @@ name: 🐧 OpenMP -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-ubuntu diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index fc75ccb0141..1d8b0fd0495 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -1,6 +1,10 @@ name: 🪟 Windows -on: [push, pull_request] +on: + push: + branches: + - "development" + pull_request: concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-windows From 6757b4cdce07dd1d62d9ee872d36d59de07f7b99 Mon Sep 17 00:00:00 2001 From: Brian Jensen <127121969+budjensen@users.noreply.github.com> Date: Wed, 2 Oct 2024 14:04:52 -0400 Subject: [PATCH 020/278] Expose Synchronize to picmi (#5355) Added the ability to synchronize particle velocities and positions inside a python script via simulation.extension.warx.synchronize(). Initiated after discussion with @roelof-groenewald in https://github.com/ECP-WarpX/WarpX/discussions/5331. Implemented and tested by comparing to Turner's benchmark results (https://doi.org/10.1063/1.4775084).

--- Source/Python/WarpX.cpp | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 0aab95f78f8..39baf0289b2 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -253,6 +253,10 @@ The physical fields in WarpX have the following naming: [] () { WarpX::ProjectionCleanDivB(); }, "Executes projection based divergence cleaner on loaded Bfield_fp_external." ) + .def("synchronize", + [] (WarpX& wx) { wx.Synchronize(); }, + "Synchronize particle velocities and positions." + ) ; py::class_(m, "Config") From 841d7ee45a4d9c4ac0b5438658ca1c760e72201c Mon Sep 17 00:00:00 2001 From: kli-jfp <107398873+kli-jfp@users.noreply.github.com> Date: Wed, 2 Oct 2024 20:30:37 +0200 Subject: [PATCH 021/278] General parser function for external fields (#5349) This feature replaces the InitializeExternalFieldsOnGridUsingParser with a more general function ComputeExternalFieldOnGridUsingParser. The new function takes parsed functions with four dimensions (x,y,z,t), so that it can be used to evaluate functions throughout the simulation, for example time-dependent external fields. The function ComputeExternalFieldOnGridUsingParser has optional edge length and face areas. --------- Co-authored-by: Kristoffer Lindvall --- .../HybridPICModel/HybridPICModel.H | 8 +- .../HybridPICModel/HybridPICModel.cpp | 169 ++-------------- .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 3 +- Source/Fluids/WarpXFluidContainer.cpp | 17 +- Source/Initialization/ExternalField.cpp | 12 +- Source/Initialization/WarpXInitData.cpp | 188 +++++++++--------- Source/Particles/Gather/GetExternalFields.cpp | 14 +- Source/WarpX.H | 31 ++- 8 files changed, 144 insertions(+), 298 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index ec4a53b2edd..7e8dd260a6e 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -53,13 +53,7 @@ public: * external current multifab. Note the external current can be a function * of time and therefore this should be re-evaluated at every step. */ - void GetCurrentExternal ( - ablastr::fields::MultiLevelVectorField const& edge_lengths - ); - void GetCurrentExternal ( - ablastr::fields::VectorField const& edge_lengths, - int lev - ); + void GetCurrentExternal (); /** * \brief diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index d7d6a43b4d5..424f194ff37 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -221,167 +221,32 @@ void HybridPICModel::InitData () // if the current is time dependent which is what needs to be done to // write time independent fields on the first step. for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - auto edge_lengths = std::array, 3>(); -#ifdef AMREX_USE_EB - if (EB::enabled()) { - using ablastr::fields::Direction; - auto const & edge_lengths_x = *warpx.m_fields.get(FieldType::edge_lengths, Direction{0}, lev); - auto const & edge_lengths_y = *warpx.m_fields.get(FieldType::edge_lengths, Direction{1}, lev); - auto const & edge_lengths_z = *warpx.m_fields.get(FieldType::edge_lengths, Direction{2}, lev); - - edge_lengths = std::array< std::unique_ptr, 3 >{ - std::make_unique( - edge_lengths_x, amrex::make_alias, 0, edge_lengths_x.nComp()), - std::make_unique( - edge_lengths_y, amrex::make_alias, 0, edge_lengths_y.nComp()), - std::make_unique( - edge_lengths_z, amrex::make_alias, 0, edge_lengths_z.nComp()) - }; - } -#endif - GetCurrentExternal(ablastr::fields::a2m(edge_lengths), lev); + warpx.ComputeExternalFieldOnGridUsingParser( + FieldType::hybrid_current_fp_external, + m_J_external[0], + m_J_external[1], + m_J_external[2], + lev, PatchType::fine, 'e', + warpx.m_fields.get_alldirs(FieldType::edge_lengths, lev), + warpx.m_fields.get_alldirs(FieldType::face_areas, lev)); } } -void HybridPICModel::GetCurrentExternal ( - ablastr::fields::MultiLevelVectorField const& edge_lengths) +void HybridPICModel::GetCurrentExternal () { if (!m_external_field_has_time_dependence) { return; } auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - GetCurrentExternal(edge_lengths[lev], lev); - } -} - - -void HybridPICModel::GetCurrentExternal ( - ablastr::fields::VectorField const& edge_lengths, - int lev) -{ - // This logic matches closely to WarpX::InitializeExternalFieldsOnGridUsingParser - // except that the parsers include time dependence. - auto & warpx = WarpX::GetInstance(); - - auto t = warpx.gett_new(lev); - - auto dx_lev = warpx.Geom(lev).CellSizeArray(); - const RealBox& real_box = warpx.Geom(lev).ProbDomain(); - - using ablastr::fields::Direction; - amrex::MultiFab * mfx = warpx.m_fields.get(FieldType::hybrid_current_fp_external, Direction{0}, lev); - amrex::MultiFab * mfy = warpx.m_fields.get(FieldType::hybrid_current_fp_external, Direction{1}, lev); - amrex::MultiFab * mfz = warpx.m_fields.get(FieldType::hybrid_current_fp_external, Direction{2}, lev); - - const amrex::IntVect x_nodal_flag = mfx->ixType().toIntVect(); - const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); - const amrex::IntVect z_nodal_flag = mfz->ixType().toIntVect(); - - // avoid implicit lambda capture - auto Jx_external = m_J_external[0]; - auto Jy_external = m_J_external[1]; - auto Jz_external = m_J_external[2]; - - for ( MFIter mfi(*mfx, TilingIfNotGPU()); mfi.isValid(); ++mfi) - { - const amrex::Box& tbx = mfi.tilebox( x_nodal_flag, mfx->nGrowVect() ); - const amrex::Box& tby = mfi.tilebox( y_nodal_flag, mfy->nGrowVect() ); - const amrex::Box& tbz = mfi.tilebox( z_nodal_flag, mfz->nGrowVect() ); - - auto const& mfxfab = mfx->array(mfi); - auto const& mfyfab = mfy->array(mfi); - auto const& mfzfab = mfz->array(mfi); - - amrex::Array4 lx, ly, lz; - if (EB::enabled()) { - lx = edge_lengths[0]->array(mfi); - ly = edge_lengths[1]->array(mfi); - lz = edge_lengths[2]->array(mfi); - } - - amrex::ParallelFor (tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - // skip if node is covered by an embedded boundary - if (lx && lx(i, j, k) <= 0) { return; } - - // Shift required in the x-, y-, or z- position - // depending on the index type of the multifab -#if defined(WARPX_DIM_1D_Z) - const amrex::Real x = 0._rt; - const amrex::Real y = 0._rt; - const amrex::Real fac_z = (1._rt - x_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real z = j*dx_lev[0] + real_box.lo(0) + fac_z; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const amrex::Real fac_x = (1._rt - x_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real y = 0._rt; - const amrex::Real fac_z = (1._rt - x_nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_z; -#else - const amrex::Real fac_x = (1._rt - x_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real fac_y = (1._rt - x_nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real y = j*dx_lev[1] + real_box.lo(1) + fac_y; - const amrex::Real fac_z = (1._rt - x_nodal_flag[2]) * dx_lev[2] * 0.5_rt; - const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; -#endif - // Initialize the x-component of the field. - mfxfab(i,j,k) = Jx_external(x,y,z,t); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - // skip if node is covered by an embedded boundary - if (ly && ly(i, j, k) <= 0) { return; } - -#if defined(WARPX_DIM_1D_Z) - const amrex::Real x = 0._rt; - const amrex::Real y = 0._rt; - const amrex::Real fac_z = (1._rt - y_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real z = j*dx_lev[0] + real_box.lo(0) + fac_z; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const amrex::Real fac_x = (1._rt - y_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real y = 0._rt; - const amrex::Real fac_z = (1._rt - y_nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_z; -#elif defined(WARPX_DIM_3D) - const amrex::Real fac_x = (1._rt - y_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real fac_y = (1._rt - y_nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real y = j*dx_lev[1] + real_box.lo(1) + fac_y; - const amrex::Real fac_z = (1._rt - y_nodal_flag[2]) * dx_lev[2] * 0.5_rt; - const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; -#endif - // Initialize the y-component of the field. - mfyfab(i,j,k) = Jy_external(x,y,z,t); - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - // skip if node is covered by an embedded boundary - if (lz && lz(i, j, k) <= 0) { return; } - -#if defined(WARPX_DIM_1D_Z) - const amrex::Real x = 0._rt; - const amrex::Real y = 0._rt; - const amrex::Real fac_z = (1._rt - z_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real z = j*dx_lev[0] + real_box.lo(0) + fac_z; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const amrex::Real fac_x = (1._rt - z_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real y = 0._rt; - const amrex::Real fac_z = (1._rt - z_nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real z = j*dx_lev[1] + real_box.lo(1) + fac_z; -#elif defined(WARPX_DIM_3D) - const amrex::Real fac_x = (1._rt - z_nodal_flag[0]) * dx_lev[0] * 0.5_rt; - const amrex::Real x = i*dx_lev[0] + real_box.lo(0) + fac_x; - const amrex::Real fac_y = (1._rt - z_nodal_flag[1]) * dx_lev[1] * 0.5_rt; - const amrex::Real y = j*dx_lev[1] + real_box.lo(1) + fac_y; - const amrex::Real fac_z = (1._rt - z_nodal_flag[2]) * dx_lev[2] * 0.5_rt; - const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; -#endif - // Initialize the z-component of the field. - mfzfab(i,j,k) = Jz_external(x,y,z,t); - } - ); + warpx.ComputeExternalFieldOnGridUsingParser( + FieldType::hybrid_current_fp_external, + m_J_external[0], + m_J_external[1], + m_J_external[2], + lev, PatchType::fine, 'e', + warpx.m_fields.get_alldirs(FieldType::edge_lengths, lev), + warpx.m_fields.get_alldirs(FieldType::face_areas, lev)); } } diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index be2d40459ac..5220419f822 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -68,8 +68,7 @@ void WarpX::HybridPICEvolveFields () const int sub_steps = m_hybrid_pic_model->m_substeps; // Get the external current - m_hybrid_pic_model->GetCurrentExternal( - m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level)); + m_hybrid_pic_model->GetCurrentExternal(); // Reference hybrid-PIC multifabs ablastr::fields::MultiLevelScalarField rho_fp_temp = m_fields.get_mr_levels(FieldType::hybrid_rho_fp_temp, finest_level); diff --git a/Source/Fluids/WarpXFluidContainer.cpp b/Source/Fluids/WarpXFluidContainer.cpp index 326ce30c844..0a0ca4b8818 100644 --- a/Source/Fluids/WarpXFluidContainer.cpp +++ b/Source/Fluids/WarpXFluidContainer.cpp @@ -1010,24 +1010,23 @@ void WarpXFluidContainer::GatherAndPush ( // External field parsers external_e_fields = (m_E_ext_s == "parse_e_ext_function"); external_b_fields = (m_B_ext_s == "parse_b_ext_function"); + amrex::ParserExecutor<4> Exfield_parser; amrex::ParserExecutor<4> Eyfield_parser; amrex::ParserExecutor<4> Ezfield_parser; amrex::ParserExecutor<4> Bxfield_parser; amrex::ParserExecutor<4> Byfield_parser; amrex::ParserExecutor<4> Bzfield_parser; + if (external_e_fields){ - constexpr int num_arguments = 4; //x,y,z,t - Exfield_parser = m_Ex_parser->compile(); - Eyfield_parser = m_Ey_parser->compile(); - Ezfield_parser = m_Ez_parser->compile(); + Exfield_parser = m_Ex_parser->compile<4>(); + Eyfield_parser = m_Ey_parser->compile<4>(); + Ezfield_parser = m_Ez_parser->compile<4>(); } - if (external_b_fields){ - constexpr int num_arguments = 4; //x,y,z,t - Bxfield_parser = m_Bx_parser->compile(); - Byfield_parser = m_By_parser->compile(); - Bzfield_parser = m_Bz_parser->compile(); + Bxfield_parser = m_Bx_parser->compile<4>(); + Byfield_parser = m_By_parser->compile<4>(); + Bzfield_parser = m_Bz_parser->compile<4>(); } diff --git a/Source/Initialization/ExternalField.cpp b/Source/Initialization/ExternalField.cpp index d86c0a484bf..504fb1ce7a5 100644 --- a/Source/Initialization/ExternalField.cpp +++ b/Source/Initialization/ExternalField.cpp @@ -127,11 +127,11 @@ ExternalFieldParams::ExternalFieldParams(const amrex::ParmParse& pp_warpx) str_Bz_ext_grid_function); Bxfield_parser = std::make_unique( - utils::parser::makeParser(str_Bx_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_Bx_ext_grid_function,{"x","y","z","t"})); Byfield_parser = std::make_unique( - utils::parser::makeParser(str_By_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_By_ext_grid_function,{"x","y","z","t"})); Bzfield_parser = std::make_unique( - utils::parser::makeParser(str_Bz_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_Bz_ext_grid_function,{"x","y","z","t"})); } //___________________________________________________________________________ @@ -163,11 +163,11 @@ ExternalFieldParams::ExternalFieldParams(const amrex::ParmParse& pp_warpx) str_Ez_ext_grid_function); Exfield_parser = std::make_unique( - utils::parser::makeParser(str_Ex_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_Ex_ext_grid_function,{"x","y","z","t"})); Eyfield_parser = std::make_unique( - utils::parser::makeParser(str_Ey_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_Ey_ext_grid_function,{"x","y","z","t"})); Ezfield_parser = std::make_unique( - utils::parser::makeParser(str_Ez_ext_grid_function,{"x","y","z"})); + utils::parser::makeParser(str_Ez_ext_grid_function,{"x","y","z","t"})); } //___________________________________________________________________________ diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 70bf20d0905..ce9c3d50a1e 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -972,30 +972,23 @@ WarpX::InitLevelData (int lev, Real /*time*/) // The default maxlevel_extEMfield_init value is the total number of levels in the simulation if ((m_p_ext_field_params->B_ext_grid_type == ExternalFieldType::parse_ext_grid_function) && (lev > 0) && (lev <= maxlevel_extEMfield_init)) { - - InitializeExternalFieldsOnGridUsingParser( - m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), - m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), - m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), - m_p_ext_field_params->Bxfield_parser->compile<3>(), - m_p_ext_field_params->Byfield_parser->compile<3>(), - m_p_ext_field_params->Bzfield_parser->compile<3>(), + ComputeExternalFieldOnGridUsingParser( + FieldType::Bfield_aux, + m_p_ext_field_params->Bxfield_parser->compile<4>(), + m_p_ext_field_params->Byfield_parser->compile<4>(), + m_p_ext_field_params->Bzfield_parser->compile<4>(), + lev, PatchType::fine, 'f', m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev), - 'B', - lev, PatchType::fine); - - InitializeExternalFieldsOnGridUsingParser( - m_fields.get(FieldType::Bfield_cp, Direction{0}, lev), - m_fields.get(FieldType::Bfield_cp, Direction{1}, lev), - m_fields.get(FieldType::Bfield_cp, Direction{2}, lev), - m_p_ext_field_params->Bxfield_parser->compile<3>(), - m_p_ext_field_params->Byfield_parser->compile<3>(), - m_p_ext_field_params->Bzfield_parser->compile<3>(), + m_fields.get_alldirs(FieldType::face_areas, lev)); + + ComputeExternalFieldOnGridUsingParser( + FieldType::Bfield_cp, + m_p_ext_field_params->Bxfield_parser->compile<4>(), + m_p_ext_field_params->Byfield_parser->compile<4>(), + m_p_ext_field_params->Bzfield_parser->compile<4>(), + lev, PatchType::coarse, 'f', m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_mr_levels_alldirs(FieldType::face_areas, max_level)[lev], - 'B', - lev, PatchType::coarse); + m_fields.get_mr_levels_alldirs(FieldType::face_areas, max_level)[lev]); } // if the input string for the E-field is "parse_e_ext_grid_function", @@ -1021,29 +1014,23 @@ WarpX::InitLevelData (int lev, Real /*time*/) #endif if (lev > 0) { - InitializeExternalFieldsOnGridUsingParser( - m_fields.get(FieldType::Efield_aux, Direction{0}, lev), - m_fields.get(FieldType::Efield_aux, Direction{1}, lev), - m_fields.get(FieldType::Efield_aux, Direction{2}, lev), - m_p_ext_field_params->Exfield_parser->compile<3>(), - m_p_ext_field_params->Eyfield_parser->compile<3>(), - m_p_ext_field_params->Ezfield_parser->compile<3>(), + ComputeExternalFieldOnGridUsingParser( + FieldType::Efield_aux, + m_p_ext_field_params->Exfield_parser->compile<4>(), + m_p_ext_field_params->Eyfield_parser->compile<4>(), + m_p_ext_field_params->Ezfield_parser->compile<4>(), + lev, PatchType::fine, 'e', m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev), - 'E', - lev, PatchType::fine); - - InitializeExternalFieldsOnGridUsingParser( - m_fields.get(FieldType::Efield_cp, Direction{0}, lev), - m_fields.get(FieldType::Efield_cp, Direction{1}, lev), - m_fields.get(FieldType::Efield_cp, Direction{2}, lev), - m_p_ext_field_params->Exfield_parser->compile<3>(), - m_p_ext_field_params->Eyfield_parser->compile<3>(), - m_p_ext_field_params->Ezfield_parser->compile<3>(), + m_fields.get_alldirs(FieldType::face_areas, lev)); + + ComputeExternalFieldOnGridUsingParser( + FieldType::Efield_cp, + m_p_ext_field_params->Exfield_parser->compile<4>(), + m_p_ext_field_params->Eyfield_parser->compile<4>(), + m_p_ext_field_params->Ezfield_parser->compile<4>(), + lev, PatchType::coarse, 'e', m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev), - 'E', - lev, PatchType::coarse); + m_fields.get_alldirs(FieldType::face_areas, lev)); #ifdef AMREX_USE_EB if (eb_enabled) { if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { @@ -1072,48 +1059,61 @@ WarpX::InitLevelData (int lev, Real /*time*/) } } -void -WarpX::InitializeExternalFieldsOnGridUsingParser ( - MultiFab *mfx, MultiFab *mfy, MultiFab *mfz, - ParserExecutor<3> const& xfield_parser, ParserExecutor<3> const& yfield_parser, - ParserExecutor<3> const& zfield_parser, - ablastr::fields::VectorField const& edge_lengths, - ablastr::fields::VectorField const& face_areas, - [[maybe_unused]] const char field, - const int lev, PatchType patch_type) +void WarpX::ComputeExternalFieldOnGridUsingParser ( + warpx::fields::FieldType field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, [[maybe_unused]] const char topology, + std::optional const& edge_lengths, + std::optional const& face_areas) { + auto t = gett_new(lev); auto dx_lev = geom[lev].CellSizeArray(); - amrex::IntVect refratio = (lev > 0 ) ? WarpX::RefRatio(lev-1) : amrex::IntVect(1); + const RealBox& real_box = geom[lev].ProbDomain(); + + amrex::IntVect refratio = (lev > 0 ) ? RefRatio(lev-1) : amrex::IntVect(1); if (patch_type == PatchType::coarse) { for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { dx_lev[idim] = dx_lev[idim] * refratio[idim]; } } - const RealBox& real_box = geom[lev].ProbDomain(); + + using ablastr::fields::Direction; + amrex::MultiFab* mfx = m_fields.get(field, Direction{0}, lev); + amrex::MultiFab* mfy = m_fields.get(field, Direction{1}, lev); + amrex::MultiFab* mfz = m_fields.get(field, Direction{2}, lev); + const amrex::IntVect x_nodal_flag = mfx->ixType().toIntVect(); const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); const amrex::IntVect z_nodal_flag = mfz->ixType().toIntVect(); - bool const eb_enabled = EB::enabled(); + const bool eb_enabled = EB::enabled(); for ( MFIter mfi(*mfx, TilingIfNotGPU()); mfi.isValid(); ++mfi) { - const amrex::Box &tbx = mfi.tilebox(x_nodal_flag, mfx->nGrowVect()); - const amrex::Box &tby = mfi.tilebox(y_nodal_flag, mfy->nGrowVect()); - const amrex::Box &tbz = mfi.tilebox(z_nodal_flag, mfz->nGrowVect()); + const amrex::Box& tbx = mfi.tilebox( x_nodal_flag, mfx->nGrowVect() ); + const amrex::Box& tby = mfi.tilebox( y_nodal_flag, mfy->nGrowVect() ); + const amrex::Box& tbz = mfi.tilebox( z_nodal_flag, mfz->nGrowVect() ); - auto const &mfxfab = mfx->array(mfi); - auto const &mfyfab = mfy->array(mfi); - auto const &mfzfab = mfz->array(mfi); + auto const& mfxfab = mfx->array(mfi); + auto const& mfyfab = mfy->array(mfi); + auto const& mfzfab = mfz->array(mfi); amrex::Array4 lx, ly, lz, Sx, Sy, Sz; if (eb_enabled) { - lx = edge_lengths[0]->array(mfi); - ly = edge_lengths[1]->array(mfi); - lz = edge_lengths[2]->array(mfi); - Sx = face_areas[0]->array(mfi); - Sy = face_areas[1]->array(mfi); - Sz = face_areas[2]->array(mfi); + if (edge_lengths.has_value()) { + const auto& edge_lengths_array = edge_lengths.value(); + lx = edge_lengths_array[0]->array(mfi); + ly = edge_lengths_array[1]->array(mfi); + lz = edge_lengths_array[2]->array(mfi); + } + if (face_areas.has_value()) { + const auto& face_areas_array = face_areas.value(); + Sx = face_areas_array[0]->array(mfi); + Sy = face_areas_array[1]->array(mfi); + Sz = face_areas_array[2]->array(mfi); + } } #if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) @@ -1132,10 +1132,10 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( [=] AMREX_GPU_DEVICE (int i, int j, int k) { #ifdef AMREX_USE_EB #ifdef WARPX_DIM_3D - if(lx && ((field=='E' and lx(i, j, k)<=0) or (field=='B' and Sx(i, j, k)<=0))) { return; } + if(lx && ((topology=='e' and lx(i, j, k)<=0) or (topology=='f' and Sx(i, j, k)<=0))) { return; } #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) //In XZ and RZ Ex is associated with a x-edge, while Bx is associated with a z-edge - if(lx && ((field=='E' and lx(i, j, k)<=0) or (field=='B' and lz(i, j, k)<=0))) { return; } + if(lx && ((topology=='e' and lx(i, j, k)<=0) or (topology=='f' and lz(i, j, k)<=0))) { return; } #endif #endif // Shift required in the x-, y-, or z- position @@ -1160,20 +1160,20 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; #endif // Initialize the x-component of the field. - mfxfab(i,j,k) = xfield_parser(x,y,z); + mfxfab(i,j,k) = fx_parser(x,y,z,t); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { #ifdef AMREX_USE_EB #ifdef WARPX_DIM_3D - if(ly && ((field=='E' and ly(i, j, k)<=0) or (field=='B' and Sy(i, j, k)<=0))) { return; } + if(ly && ((topology=='e' and ly(i, j, k)<=0) or (topology=='f' and Sy(i, j, k)<=0))) { return; } #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) //In XZ and RZ Ey is associated with a mesh node, so we need to check if the mesh node is covered if(lx && - ((field=='E' and (lx(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)<=0 + ((topology=='e' and (lx(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)<=0 || lx(std::max(i-1, lx_lo.x), std::min(j , lx_hi.y), k)<=0 || lz(std::min(i , lz_hi.x), std::min(j , lz_hi.y), k)<=0 || lz(std::min(i , lz_hi.x), std::max(j-1, lz_lo.y), k)<=0)) or - (field=='B' and Sy(i,j,k)<=0))) { return; } + (topology=='f' and Sy(i,j,k)<=0))) { return; } #endif #endif #if defined(WARPX_DIM_1D_Z) @@ -1196,15 +1196,15 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; #endif // Initialize the y-component of the field. - mfyfab(i,j,k) = yfield_parser(x,y,z); + mfyfab(i,j,k) = fy_parser(x,y,z,t); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { #ifdef AMREX_USE_EB #ifdef WARPX_DIM_3D - if(lz && ((field=='E' and lz(i, j, k)<=0) or (field=='B' and Sz(i, j, k)<=0))) { return; } + if(lz && ((topology=='e' and lz(i, j, k)<=0) or (topology=='f' and Sz(i, j, k)<=0))) { return; } #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) //In XZ and RZ Ez is associated with a z-edge, while Bz is associated with a x-edge - if(lz && ((field=='E' and lz(i, j, k)<=0) or (field=='B' and lx(i, j, k)<=0))) { return; } + if(lz && ((topology=='e' and lz(i, j, k)<=0) or (topology=='f' and lx(i, j, k)<=0))) { return; } #endif #endif #if defined(WARPX_DIM_1D_Z) @@ -1227,7 +1227,7 @@ WarpX::InitializeExternalFieldsOnGridUsingParser ( const amrex::Real z = k*dx_lev[2] + real_box.lo(2) + fac_z; #endif // Initialize the z-component of the field. - mfzfab(i,j,k) = zfield_parser(x,y,z); + mfzfab(i,j,k) = fz_parser(x,y,z,t); } ); } @@ -1386,17 +1386,14 @@ WarpX::LoadExternalFields (int const lev) // External grid fields if (m_p_ext_field_params->B_ext_grid_type == ExternalFieldType::parse_ext_grid_function) { // Initialize Bfield_fp_external with external function - InitializeExternalFieldsOnGridUsingParser( - m_fields.get(FieldType::Bfield_fp_external, Direction{0}, lev), - m_fields.get(FieldType::Bfield_fp_external, Direction{1}, lev), - m_fields.get(FieldType::Bfield_fp_external, Direction{2}, lev), - m_p_ext_field_params->Bxfield_parser->compile<3>(), - m_p_ext_field_params->Byfield_parser->compile<3>(), - m_p_ext_field_params->Bzfield_parser->compile<3>(), + ComputeExternalFieldOnGridUsingParser( + FieldType::Bfield_fp_external, + m_p_ext_field_params->Bxfield_parser->compile<4>(), + m_p_ext_field_params->Byfield_parser->compile<4>(), + m_p_ext_field_params->Bzfield_parser->compile<4>(), + lev, PatchType::fine, 'f', m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev), - 'B', - lev, PatchType::fine); + m_fields.get_alldirs(FieldType::face_areas, lev)); } else if (m_p_ext_field_params->B_ext_grid_type == ExternalFieldType::read_from_file) { #if defined(WARPX_DIM_RZ) @@ -1414,17 +1411,14 @@ WarpX::LoadExternalFields (int const lev) if (m_p_ext_field_params->E_ext_grid_type == ExternalFieldType::parse_ext_grid_function) { // Initialize Efield_fp_external with external function - InitializeExternalFieldsOnGridUsingParser( - m_fields.get(FieldType::Efield_fp_external, Direction{0}, lev), - m_fields.get(FieldType::Efield_fp_external, Direction{1}, lev), - m_fields.get(FieldType::Efield_fp_external, Direction{2}, lev), - m_p_ext_field_params->Exfield_parser->compile<3>(), - m_p_ext_field_params->Eyfield_parser->compile<3>(), - m_p_ext_field_params->Ezfield_parser->compile<3>(), + ComputeExternalFieldOnGridUsingParser( + FieldType::Efield_fp_external, + m_p_ext_field_params->Exfield_parser->compile<4>(), + m_p_ext_field_params->Eyfield_parser->compile<4>(), + m_p_ext_field_params->Ezfield_parser->compile<4>(), + lev, PatchType::fine, 'e', m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev), - 'E', - lev, PatchType::fine); + m_fields.get_alldirs(FieldType::face_areas, lev)); } else if (m_p_ext_field_params->E_ext_grid_type == ExternalFieldType::read_from_file) { #if defined(WARPX_DIM_RZ) diff --git a/Source/Particles/Gather/GetExternalFields.cpp b/Source/Particles/Gather/GetExternalFields.cpp index bb55f79f394..207ef4a5a8b 100644 --- a/Source/Particles/Gather/GetExternalFields.cpp +++ b/Source/Particles/Gather/GetExternalFields.cpp @@ -50,19 +50,17 @@ GetExternalEBField::GetExternalEBField (const WarpXParIter& a_pti, long a_offset if (mypc.m_E_ext_particle_s == "parse_e_ext_particle_function") { m_Etype = ExternalFieldInitType::Parser; - constexpr auto num_arguments = 4; //x,y,z,t - m_Exfield_partparser = mypc.m_Ex_particle_parser->compile(); - m_Eyfield_partparser = mypc.m_Ey_particle_parser->compile(); - m_Ezfield_partparser = mypc.m_Ez_particle_parser->compile(); + m_Exfield_partparser = mypc.m_Ex_particle_parser->compile<4>(); + m_Eyfield_partparser = mypc.m_Ey_particle_parser->compile<4>(); + m_Ezfield_partparser = mypc.m_Ez_particle_parser->compile<4>(); } if (mypc.m_B_ext_particle_s == "parse_b_ext_particle_function") { m_Btype = ExternalFieldInitType::Parser; - constexpr auto num_arguments = 4; //x,y,z,t - m_Bxfield_partparser = mypc.m_Bx_particle_parser->compile(); - m_Byfield_partparser = mypc.m_By_particle_parser->compile(); - m_Bzfield_partparser = mypc.m_Bz_particle_parser->compile(); + m_Bxfield_partparser = mypc.m_Bx_particle_parser->compile<4>(); + m_Byfield_partparser = mypc.m_By_particle_parser->compile<4>(); + m_Bzfield_partparser = mypc.m_Bz_particle_parser->compile<4>(); } if (mypc.m_E_ext_particle_s == "repeated_plasma_lens" || diff --git a/Source/WarpX.H b/Source/WarpX.H index 83b1880f2b1..c284a72dbfa 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -950,34 +950,31 @@ public: /** * \brief - * This function initializes the E and B fields on each level + * This function computes the E, B, and J fields on each level * using the parser and the user-defined function for the external fields. * The subroutine will parse the x_/y_z_external_grid_function and * then, the field multifab is initialized based on the (x,y,z) position * on the staggered yee-grid or cell-centered grid, in the interior cells * and guard cells. * - * \param[in] mfx x-component of the field to be initialized - * \param[in] mfy y-component of the field to be initialized - * \param[in] mfz z-component of the field to be initialized - * \param[in] xfield_parser parser function to initialize x-field - * \param[in] yfield_parser parser function to initialize y-field - * \param[in] zfield_parser parser function to initialize z-field + * \param[in] field FieldType + * \param[in] fx_parser parser function to initialize x-field + * \param[in] fy_parser parser function to initialize y-field + * \param[in] fz_parser parser function to initialize z-field * \param[in] edge_lengths edge lengths information * \param[in] face_areas face areas information - * \param[in] field flag indicating which field is being initialized ('E' for electric, 'B' for magnetic) + * \param[in] topology flag indicating if field is edge-based or face-based * \param[in] lev level of the Multifabs that is initialized * \param[in] patch_type PatchType on which the field is initialized (fine or coarse) */ - void InitializeExternalFieldsOnGridUsingParser ( - amrex::MultiFab *mfx, amrex::MultiFab *mfy, amrex::MultiFab *mfz, - amrex::ParserExecutor<3> const& xfield_parser, - amrex::ParserExecutor<3> const& yfield_parser, - amrex::ParserExecutor<3> const& zfield_parser, - ablastr::fields::VectorField const& edge_lengths, - ablastr::fields::VectorField const& face_areas, - [[maybe_unused]] char field, - int lev, PatchType patch_type); + void ComputeExternalFieldOnGridUsingParser ( + warpx::fields::FieldType field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, [[maybe_unused]] char topology, + std::optional const& edge_lengths = std::nullopt, + std::optional const& face_areas = std::nullopt); /** * \brief Load field values from a user-specified openPMD file, From 57bda85b9b0d197abfc13e531a1c3a496252de49 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 2 Oct 2024 11:59:42 -0700 Subject: [PATCH 022/278] Release 24.10 (#5357) Prepare the October release of WarpX: ```bash # update dependencies ./Tools/Release/updateAMReX.py ./Tools/Release/updatePICSAR.py # no changes, still 24.09 ./Tools/Release/updatepyAMReX.py # bump version number ./Tools/Release/newVersion.sh ``` Following this workflow: https://warpx.readthedocs.io/en/latest/maintenance/release.html --- .github/workflows/cuda.yml | 2 +- CMakeLists.txt | 2 +- cmake/dependencies/AMReX.cmake | 4 ++-- cmake/dependencies/pyAMReX.cmake | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 8d6cc45d714..4a38872a5f4 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -135,7 +135,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 103d0e9f7c7e7fbedbeb61512d3f37f3e2b03770 && cd - + cd ../amrex && git checkout --detach 24.10 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/CMakeLists.txt b/CMakeLists.txt index 3b4e9199f53..5065baa0b6d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ # Preamble #################################################################### # cmake_minimum_required(VERSION 3.24.0) -project(WarpX VERSION 24.09) +project(WarpX VERSION 24.10) include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake) diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 7524d919c61..91340066803 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -256,7 +256,7 @@ macro(find_amrex) endif() set(COMPONENT_PRECISION ${WarpX_PRECISION} P${WarpX_PARTICLE_PRECISION}) - find_package(AMReX 24.09 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) + find_package(AMReX 24.10 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) # note: TINYP skipped because user-configured and optional # AMReX CMake helper scripts @@ -279,7 +279,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "103d0e9f7c7e7fbedbeb61512d3f37f3e2b03770" +set(WarpX_amrex_branch "24.10" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index 69711866f74..f7b905c32c3 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -59,7 +59,7 @@ function(find_pyamrex) endif() elseif(NOT WarpX_pyamrex_internal) # TODO: MPI control - find_package(pyAMReX 24.09 CONFIG REQUIRED) + find_package(pyAMReX 24.10 CONFIG REQUIRED) message(STATUS "pyAMReX: Found version '${pyAMReX_VERSION}'") endif() endfunction() @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "1c66690f83244196c5655293f1381303a7d1589d" +set(WarpX_pyamrex_branch "24.10" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") From cf619b3c39c845f5110547887ccb1f95a355adc6 Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Wed, 2 Oct 2024 14:30:59 -0500 Subject: [PATCH 023/278] Update ALCF Polaris profile (#5358) * Explicitly use gcc-12, because gcc points to gcc/7.5. * Load the non-cray version of hdf5 because the cray version has some cmake issues. --- .../machines/polaris-alcf/polaris_gpu_warpx.profile.example | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example b/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example index d5cb1ec7a07..e1bd4e0fdd3 100644 --- a/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example +++ b/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example @@ -20,7 +20,7 @@ module load cmake/3.27.7 module load boost # optional: for openPMD and PSATD+RZ support -module load cray-hdf5-parallel/1.12.2.9 +module load hdf5/1.14.3 export CMAKE_PREFIX_PATH=/home/${USER}/sw/polaris/gpu/c-blosc-1.21.1:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=/home/${USER}/sw/polaris/gpu/adios2-2.8.3:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=/home/${USER}/sw/polaris/gpu/blaspp-2024.05.31:$CMAKE_PREFIX_PATH @@ -53,7 +53,7 @@ export CXXFLAGS="-march=znver3" export CFLAGS="-march=znver3" # compiler environment hints -export CC=$(which gcc) -export CXX=$(which g++) +export CC=$(which gcc-12) +export CXX=$(which g++-12) export CUDACXX=$(which nvcc) export CUDAHOSTCXX=${CXX} From e8584946dc0ae6960c0387e4f359ee3e46264005 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 3 Oct 2024 00:11:11 -0700 Subject: [PATCH 024/278] Fix: newVersion.sh Python (#5359) The script was not robust enough for the black/ruff formatting updates we applied. This affected the 24.09 and 24.10 release tags. This fixes it. --- Docs/source/conf.py | 4 ++-- Python/setup.py | 2 +- Tools/Release/newVersion.sh | 12 ++++++------ setup.py | 2 +- 4 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Docs/source/conf.py b/Docs/source/conf.py index 9dfda6346f9..c1ad43197c5 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -107,9 +107,9 @@ def __init__(self, *args, **kwargs): # built documents. # # The short X.Y version. -version = "24.08" +version = "24.10" # The full version, including alpha/beta/rc tags. -release = "24.08" +release = "24.10" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/Python/setup.py b/Python/setup.py index 86585bf8886..d57ebc65223 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -65,7 +65,7 @@ setup( name="pywarpx", - version="24.08", + version="24.10", packages=["pywarpx"], package_dir={"pywarpx": "pywarpx"}, description="""Wrapper of WarpX""", diff --git a/Tools/Release/newVersion.sh b/Tools/Release/newVersion.sh index b1d2a6aad27..9491401b120 100755 --- a/Tools/Release/newVersion.sh +++ b/Tools/Release/newVersion.sh @@ -104,25 +104,25 @@ sed -i -E "s/"\ # setup.py: version = '21.02', sed -i -E "s/"\ -"([[:blank:]]*version[[:blank:]]*=[[:blank:]]*')(.*)('.+)/"\ +"([[:blank:]]*version[[:blank:]]*=[[:blank:]]*\")(.*)(\".+)/"\ "\1${VERSION_STR}\3/g" \ ${REPO_DIR}/setup.py # Python/setup.py: version = '21.02', sed -i -E "s/"\ -"([[:blank:]]*version[[:blank:]]*=[[:blank:]]*')(.*)('.+)/"\ +"([[:blank:]]*version[[:blank:]]*=[[:blank:]]*\")(.*)(\".+)/"\ "\1${VERSION_STR}\3/g" \ ${REPO_DIR}/Python/setup.py # sphinx / RTD # docs/source/conf.py sed -i "s/"\ -"[[:blank:]]*version[[:blank:]]*=[[:blank:]]*u.*/"\ -"version = u'${VERSION_STR_NOSUFFIX}'/g" \ +"[[:blank:]]*version[[:blank:]]*=[[:blank:]]*.*/"\ +"version = \"${VERSION_STR_NOSUFFIX}\"/g" \ ${REPO_DIR}/Docs/source/conf.py sed -i "s/"\ -"[[:blank:]]*release[[:blank:]]*=[[:blank:]]*u.*/"\ -"release = u'${VERSION_STR}'/g" \ +"[[:blank:]]*release[[:blank:]]*=[[:blank:]]*.*/"\ +"release = \"${VERSION_STR}\"/g" \ ${REPO_DIR}/Docs/source/conf.py diff --git a/setup.py b/setup.py index efc18d900cf..9683c8ab675 100644 --- a/setup.py +++ b/setup.py @@ -274,7 +274,7 @@ def build_extension(self, ext): setup( name="pywarpx", # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version="24.08", + version="24.10", packages=["pywarpx"], package_dir={"pywarpx": "Python/pywarpx"}, author="Jean-Luc Vay, David P. Grote, Maxence Thévenet, Rémi Lehe, Andrew Myers, Weiqun Zhang, Axel Huebl, et al.", From d0690e23402c28d624791ab033b5f0d3be289dbd Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 3 Oct 2024 00:15:57 -0700 Subject: [PATCH 025/278] Doc: Update Lawrencium (LBNL) (#5360) Update installation instructions for the Lawrencium Cluster (LBNL). --- .../lawrencium_warpx.profile.example | 17 +++++++---------- 1 file changed, 7 insertions(+), 10 deletions(-) diff --git a/Tools/machines/lawrencium-lbnl/lawrencium_warpx.profile.example b/Tools/machines/lawrencium-lbnl/lawrencium_warpx.profile.example index 8db2b44b8a7..62f80433233 100644 --- a/Tools/machines/lawrencium-lbnl/lawrencium_warpx.profile.example +++ b/Tools/machines/lawrencium-lbnl/lawrencium_warpx.profile.example @@ -2,19 +2,16 @@ #export proj="" # change me, e.g., ac_blast # required dependencies -module load cmake/3.24.1 -module load cuda/11.4 -module load gcc/7.4.0 -module load openmpi/4.0.1-gcc +module load cmake/3.27.7 +module load gcc/11.4.0 +module load cuda/12.2.1 +module load openmpi/4.1.6 # optional: for QED support with detailed tables -module load boost/1.70.0-gcc +module load boost/1.83.0 # optional: for openPMD and PSATD+RZ support -module load hdf5/1.10.5-gcc-p -module load lapack/3.8.0-gcc -# CPU only: -#module load fftw/3.3.8-gcc +module load hdf5/1.14.3 export CMAKE_PREFIX_PATH=$HOME/sw/v100/c-blosc-1.21.1:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=$HOME/sw/v100/adios2-2.8.3:$CMAKE_PREFIX_PATH @@ -27,7 +24,7 @@ export PATH=$HOME/sw/v100/adios2-2.8.3/bin:$PATH #module load ccache # missing # optional: for Python bindings or libEnsemble -module load python/3.8.8 +module load python/3.11.6-gcc-11.4.0 if [ -d "$HOME/sw/v100/venvs/warpx" ] then From ab91803ad00ee3962daabb9322ab0406e8e01cdc Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Thu, 3 Oct 2024 03:26:37 -0700 Subject: [PATCH 026/278] CI: add base input file for convenience (#5362) Having a consistent set of base input files can make default testing easier for package managers (e.g., Spack, Conda). Example pointed out by @ax3l: https://github.com/spack/spack/blob/d21577803f7acb4cc1a5b8144762ea052f67ecab/var/spack/repos/builtin/packages/warpx/package.py#L469 --- .github/workflows/clang_sanitizers.yml | 4 +- .github/workflows/ubuntu.yml | 2 +- .../laser_acceleration/inputs_base_1d | 84 ++++++++++++++++++ .../inputs_test_1d_laser_acceleration | 86 +------------------ 4 files changed, 89 insertions(+), 87 deletions(-) create mode 100644 Examples/Physics_applications/laser_acceleration/inputs_base_1d diff --git a/.github/workflows/clang_sanitizers.yml b/.github/workflows/clang_sanitizers.yml index 48d8068c80c..067488911bb 100644 --- a/.github/workflows/clang_sanitizers.yml +++ b/.github/workflows/clang_sanitizers.yml @@ -73,7 +73,7 @@ jobs: export "ASAN_OPTIONS=detect_leaks=0" mpirun -n 2 ./build/bin/warpx.rz Examples/Physics_applications/laser_acceleration/inputs_base_rz - mpirun -n 2 ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration + mpirun -n 2 ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_base_1d mpirun -n 2 ./build/bin/warpx.2d Examples/Physics_applications/laser_acceleration/inputs_base_2d mpirun -n 2 ./build/bin/warpx.3d Examples/Physics_applications/laser_acceleration/inputs_base_3d @@ -154,7 +154,7 @@ jobs: export OMP_NUM_THREADS=2 mpirun -n 2 ./build/bin/warpx.rz Examples/Physics_applications/laser_acceleration/inputs_base_rz warpx.serialize_initial_conditions = 0 - mpirun -n 2 ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration warpx.serialize_initial_conditions = 0 + mpirun -n 2 ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_base_1d warpx.serialize_initial_conditions = 0 mpirun -n 2 ./build/bin/warpx.2d Examples/Physics_applications/laser_acceleration/inputs_base_2d warpx.serialize_initial_conditions = 0 mpirun -n 2 ./build/bin/warpx.3d Examples/Physics_applications/laser_acceleration/inputs_base_3d warpx.serialize_initial_conditions = 0 diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index a14f66b8cd9..6435ed7e66a 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -86,7 +86,7 @@ jobs: -DWarpX_QED_TOOLS=ON cmake --build build -j 4 - ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration + ./build/bin/warpx.1d Examples/Physics_applications/laser_acceleration/inputs_base_1d ./build/bin/warpx.2d Examples/Physics_applications/laser_acceleration/inputs_base_2d ccache -s diff --git a/Examples/Physics_applications/laser_acceleration/inputs_base_1d b/Examples/Physics_applications/laser_acceleration/inputs_base_1d new file mode 100644 index 00000000000..95e54c7d43e --- /dev/null +++ b/Examples/Physics_applications/laser_acceleration/inputs_base_1d @@ -0,0 +1,84 @@ +################################# +####### GENERAL PARAMETERS ###### +################################# +max_step = 100 +amr.n_cell = 256 +amr.max_grid_size = 64 # maximum size of each AMReX box, used to decompose the domain +amr.blocking_factor = 32 # minimum size of each AMReX box, used to decompose the domain +geometry.dims = 1 +geometry.prob_lo = -56.e-6 # physical domain +geometry.prob_hi = 12.e-6 +amr.max_level = 0 # Maximum level in hierarchy (1 might be unstable, >1 is not supported) + +################################# +####### Boundary condition ###### +################################# +boundary.field_lo = pec +boundary.field_hi = pec + +################################# +############ NUMERICS ########### +################################# +warpx.verbose = 1 +warpx.do_dive_cleaning = 0 +warpx.use_filter = 1 +warpx.cfl = 0.9 # if 1., the time step is set to its CFL limit +warpx.do_moving_window = 1 +warpx.moving_window_dir = z +warpx.moving_window_v = 1.0 # units of speed of light +warpx.do_dynamic_scheduling = 0 +warpx.serialize_initial_conditions = 1 + +# Order of particle shape factors +algo.particle_shape = 3 + +################################# +############ PLASMA ############# +################################# +particles.species_names = electrons + +electrons.species_type = electron +electrons.injection_style = "NUniformPerCell" +electrons.num_particles_per_cell_each_dim = 10 +electrons.zmin = 10.e-6 +electrons.profile = constant +electrons.density = 2.e23 # number of electrons per m^3 +electrons.momentum_distribution_type = "at_rest" +electrons.do_continuous_injection = 1 +electrons.addRealAttributes = orig_z +electrons.attribute.orig_z(x,y,z,ux,uy,uz,t) = "z" +electrons.addIntegerAttributes = regionofinterest +electrons.attribute.regionofinterest(x,y,z,ux,uy,uz,t) = " (z>12.0e-6) * (z<13.0e-6)" + +################################# +############ LASER ############## +################################# +lasers.names = laser1 +laser1.profile = Gaussian +laser1.position = 0. 0. 9.e-6 # This point is on the laser plane +laser1.direction = 0. 0. 1. # The plane normal direction +laser1.polarization = 0. 1. 0. # The main polarization vector +laser1.e_max = 16.e12 # Maximum amplitude of the laser field (in V/m) +laser1.profile_waist = 5.e-6 # The waist of the laser (in m) +laser1.profile_duration = 15.e-15 # The duration of the laser (in s) +laser1.profile_t_peak = 30.e-15 # Time at which the laser reaches its peak (in s) +laser1.profile_focal_distance = 100.e-6 # Focal distance from the antenna (in m) +laser1.wavelength = 0.8e-6 # The wavelength of the laser (in m) + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 100 +diag1.diag_type = Full +diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho + +# Reduced Diagnostics +warpx.reduced_diags_names = FP + +FP.type = FieldProbe +FP.intervals = 10 +FP.integrate = 0 +FP.probe_geometry = Line +FP.z_probe = -56e-6 +FP.z1_probe = 12e-6 +FP.resolution = 100 +FP.do_moving_window_FP = 1 diff --git a/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration b/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration index 95e54c7d43e..190b458b397 100644 --- a/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration +++ b/Examples/Physics_applications/laser_acceleration/inputs_test_1d_laser_acceleration @@ -1,84 +1,2 @@ -################################# -####### GENERAL PARAMETERS ###### -################################# -max_step = 100 -amr.n_cell = 256 -amr.max_grid_size = 64 # maximum size of each AMReX box, used to decompose the domain -amr.blocking_factor = 32 # minimum size of each AMReX box, used to decompose the domain -geometry.dims = 1 -geometry.prob_lo = -56.e-6 # physical domain -geometry.prob_hi = 12.e-6 -amr.max_level = 0 # Maximum level in hierarchy (1 might be unstable, >1 is not supported) - -################################# -####### Boundary condition ###### -################################# -boundary.field_lo = pec -boundary.field_hi = pec - -################################# -############ NUMERICS ########### -################################# -warpx.verbose = 1 -warpx.do_dive_cleaning = 0 -warpx.use_filter = 1 -warpx.cfl = 0.9 # if 1., the time step is set to its CFL limit -warpx.do_moving_window = 1 -warpx.moving_window_dir = z -warpx.moving_window_v = 1.0 # units of speed of light -warpx.do_dynamic_scheduling = 0 -warpx.serialize_initial_conditions = 1 - -# Order of particle shape factors -algo.particle_shape = 3 - -################################# -############ PLASMA ############# -################################# -particles.species_names = electrons - -electrons.species_type = electron -electrons.injection_style = "NUniformPerCell" -electrons.num_particles_per_cell_each_dim = 10 -electrons.zmin = 10.e-6 -electrons.profile = constant -electrons.density = 2.e23 # number of electrons per m^3 -electrons.momentum_distribution_type = "at_rest" -electrons.do_continuous_injection = 1 -electrons.addRealAttributes = orig_z -electrons.attribute.orig_z(x,y,z,ux,uy,uz,t) = "z" -electrons.addIntegerAttributes = regionofinterest -electrons.attribute.regionofinterest(x,y,z,ux,uy,uz,t) = " (z>12.0e-6) * (z<13.0e-6)" - -################################# -############ LASER ############## -################################# -lasers.names = laser1 -laser1.profile = Gaussian -laser1.position = 0. 0. 9.e-6 # This point is on the laser plane -laser1.direction = 0. 0. 1. # The plane normal direction -laser1.polarization = 0. 1. 0. # The main polarization vector -laser1.e_max = 16.e12 # Maximum amplitude of the laser field (in V/m) -laser1.profile_waist = 5.e-6 # The waist of the laser (in m) -laser1.profile_duration = 15.e-15 # The duration of the laser (in s) -laser1.profile_t_peak = 30.e-15 # Time at which the laser reaches its peak (in s) -laser1.profile_focal_distance = 100.e-6 # Focal distance from the antenna (in m) -laser1.wavelength = 0.8e-6 # The wavelength of the laser (in m) - -# Diagnostics -diagnostics.diags_names = diag1 -diag1.intervals = 100 -diag1.diag_type = Full -diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho - -# Reduced Diagnostics -warpx.reduced_diags_names = FP - -FP.type = FieldProbe -FP.intervals = 10 -FP.integrate = 0 -FP.probe_geometry = Line -FP.z_probe = -56e-6 -FP.z1_probe = 12e-6 -FP.resolution = 100 -FP.do_moving_window_FP = 1 +# base input parameters +FILE = inputs_base_1d From 6302c39a446e93dc9dffdb3fc98fd555da0ef0ab Mon Sep 17 00:00:00 2001 From: David Grote Date: Thu, 3 Oct 2024 09:26:19 -0700 Subject: [PATCH 027/278] Remove multifab_map and associated functions (#5351) Now that all MultiFabs are registered in the new system, the old `multifab_map` can be removed. Note that the `imultifab_map` is still needed since the new system does not yet work with `iMultiFabs`. This breaks the Python API since it removes `warpx.field` which was returning the old map. --- Source/Python/WarpX.cpp | 7 ------ Source/WarpX.H | 50 ++--------------------------------------- Source/WarpX.cpp | 39 -------------------------------- 3 files changed, 2 insertions(+), 94 deletions(-) diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 39baf0289b2..0b1ae49dfbc 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -112,13 +112,6 @@ void init_WarpX (py::module& m) //py::overload_cast< int >(&WarpX::boxArray, py::const_), py::arg("lev") ) - .def("field", - [](WarpX const & wx) { - return wx.multifab_map; - }, - py::return_value_policy::reference_internal, - R"doc(Registry to all WarpX MultiFab (fields).)doc" - ) .def("multifab", [](WarpX & wx, std::string internal_name) { if (wx.m_fields.internal_has(internal_name)) { diff --git a/Source/WarpX.H b/Source/WarpX.H index c284a72dbfa..c61fb92315f 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -403,31 +403,6 @@ public: // Global rho nodal flag to know about rho index type when rho MultiFab is not allocated amrex::IntVect m_rho_nodal_flag; - /** - * \brief - * Allocate and optionally initialize the MultiFab. This also adds the MultiFab - * to the map of MultiFabs (used to ease the access to MultiFabs from the Python - * interface - * - * \param[out] mf The MultiFab unique pointer to be allocated - * \param[in] ba The BoxArray describing the MultiFab - * \param[in] dm The DistributionMapping describing the MultiFab - * \param[in] ncomp The number of components in the MultiFab - * \param[in] ngrow The number of guard cells in the MultiFab - * \param[in] level The refinement level - * \param[in] name The name of the MultiFab to use in the map - * \param[in] initial_value The optional initial value - */ - static void AllocInitMultiFab ( - std::unique_ptr& mf, - const amrex::BoxArray& ba, - const amrex::DistributionMapping& dm, - int ncomp, - const amrex::IntVect& ngrow, - int level, - const std::string& name, - std::optional initial_value = {}); - /** * \brief * Allocate and optionally initialize the iMultiFab. This also adds the iMultiFab @@ -453,30 +428,9 @@ public: const std::string& name, std::optional initial_value = {}); - /** - * \brief - * Create an alias of a MultiFab, adding the alias to the MultiFab map - * \param[out] mf The MultiFab to create - * \param[in] mf_to_alias The MultiFab to alias - * \param[in] scomp The starting component to be aliased - * \param[in] ncomp The number of components to alias - * \param[in] level The refinement level - * \param[in] name The name of the MultiFab to use in the map - * \param[in] initial_value optional initial value for MultiFab - */ - static void AliasInitMultiFab ( - std::unique_ptr& mf, - const amrex::MultiFab& mf_to_alias, - int scomp, - int ncomp, - int level, - const std::string& name, - std::optional initial_value); - - // Maps of all of the MultiFabs and iMultiFabs used (this can include MFs from other classes) - // This is a convenience for the Python interface, allowing all MultiFabs + // Maps of all of the iMultiFabs used (this can include MFs from other classes) + // This is a convenience for the Python interface, allowing all iMultiFabs // to be easily referenced from Python. - static std::map multifab_map; static std::map imultifab_map; /** diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 89254e05c98..250bab273d0 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -184,7 +184,6 @@ bool WarpX::safe_guard_cells = false; utils::parser::IntervalsParser WarpX::dt_update_interval; -std::map WarpX::multifab_map; std::map WarpX::imultifab_map; IntVect WarpX::filter_npass_each_dir(1); @@ -3284,26 +3283,6 @@ TagWithLevelSuffix (std::string name, int const level) return name; } -void -WarpX::AllocInitMultiFab ( - std::unique_ptr& mf, - const amrex::BoxArray& ba, - const amrex::DistributionMapping& dm, - const int ncomp, - const amrex::IntVect& ngrow, - const int level, - const std::string& name, - std::optional initial_value) -{ - const auto name_with_suffix = TagWithLevelSuffix(name, level); - const auto tag = amrex::MFInfo().SetTag(name_with_suffix); - mf = std::make_unique(ba, dm, ncomp, ngrow, tag); - if (initial_value) { - mf->setVal(*initial_value); - } - multifab_map[name_with_suffix] = mf.get(); -} - void WarpX::AllocInitMultiFab ( std::unique_ptr& mf, @@ -3324,24 +3303,6 @@ WarpX::AllocInitMultiFab ( imultifab_map[name_with_suffix] = mf.get(); } -void -WarpX::AliasInitMultiFab ( - std::unique_ptr& mf, - const amrex::MultiFab& mf_to_alias, - const int scomp, - const int ncomp, - const int level, - const std::string& name, - std::optional initial_value) -{ - const auto name_with_suffix = TagWithLevelSuffix(name, level); - mf = std::make_unique(mf_to_alias, amrex::make_alias, scomp, ncomp); - if (initial_value) { - mf->setVal(*initial_value); - } - multifab_map[name_with_suffix] = mf.get(); -} - amrex::DistributionMapping WarpX::MakeDistributionMap (int lev, amrex::BoxArray const& ba) { From a7d3935de168cd66a103bc81049d76718e90858f Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Thu, 3 Oct 2024 18:28:00 -0700 Subject: [PATCH 028/278] CI: checksums cleanup (#5297) Fix #5206 and prepare for #5256: - [x] Add missing checksums - [x] `test_1d_background_mcc_picmi` - [x] `test_2d_background_mcc_picmi` - [x] `test_3d_particle_absorption` - [x] `test_2d_dirichlet_bc` - [x] `test_2d_dirichlet_bc_picmi` - [x] `test_2d_field_probe` - [x] `test_3d_embedded_boundary_picmi` - [x] `test_2d_particle_reflection_picmi` - [x] `test_rz_laser_acceleration_opmd` - [x] `test_2d_runtime_components_picmi` - [x] Clean up calls to `evaluate_checksum` Calls to `evaluate_checksum` are now uniform across the analysis scripts, making it easier to automatize further changes. Hopefully the uniformity will be preserved by copy/pasting the necessary code: - ```py import os import sys ``` - ```py sys.path.insert(1, "../../../../warpx/Regression/Checksum/") from checksumAPI import evaluate_checksum ``` - ```py # compare checksums evaluate_checksum( test_name=os.path.split(os.getcwd())[1], output_file=sys.argv[1], ) ``` with this last one possibly modified by adding other arguments, e.g., `output_format`, `rtol`, `do_particles`, etc. --- .../capacitive_discharge/analysis_1d.py | 12 +++++++ .../capacitive_discharge/analysis_2d.py | 13 ++++--- .../capacitive_discharge/analysis_dsmc.py | 17 +++++---- .../analysis_1d_fluid_boosted.py | 9 +++-- .../laser_acceleration/analysis_openpmd_rz.py | 11 ++++++ .../analysis_refined_injection.py | 9 +++-- .../spacecraft_charging/analysis.py | 13 ++++--- .../Tests/accelerator_lattice/analysis.py | 9 +++-- Examples/Tests/boosted_diags/analysis.py | 9 +++-- Examples/Tests/boundaries/analysis.py | 9 +++-- Examples/Tests/btd_rz/analysis.py | 10 +++--- .../Tests/collider_relevant_diags/analysis.py | 13 +++---- .../Tests/collision/analysis_collision_1d.py | 9 +++-- .../Tests/collision/analysis_collision_2d.py | 8 +++-- .../Tests/collision/analysis_collision_3d.py | 9 +++-- .../analysis_collision_3d_isotropization.py | 9 +++-- .../Tests/collision/analysis_collision_rz.py | 10 ++++-- Examples/Tests/diff_lumi_diag/analysis.py | 14 ++++---- Examples/Tests/divb_cleaning/analysis.py | 14 ++++---- .../electrostatic_dirichlet_bc/analysis.py | 14 ++++++++ .../analysis_electrostatic_sphere.py | 9 +++-- .../Tests/electrostatic_sphere_eb/analysis.py | 11 +++--- .../electrostatic_sphere_eb/analysis_rz.py | 10 ++++-- .../electrostatic_sphere_eb/analysis_rz_mr.py | 10 ++++-- .../embedded_boundary_cube/analysis_fields.py | 8 +++-- .../analysis_fields_2d.py | 10 +++--- .../analysis_fields.py | 11 +++--- .../embedded_boundary_python_api/analysis.py | 11 +++++- .../analysis_fields_2d.py | 10 +++--- .../analysis_fields_3d.py | 10 +++--- Examples/Tests/embedded_circle/analysis.py | 18 +++++----- .../analysis.py | 13 ++++--- Examples/Tests/field_ionization/analysis.py | 9 +++-- Examples/Tests/field_probe/analysis.py | 12 +++++++ .../analysis_flux_injection_3d.py | 14 ++++---- .../analysis_flux_injection_rz.py | 14 ++++---- Examples/Tests/gaussian_beam/analysis.py | 12 +++---- Examples/Tests/implicit/analysis_1d.py | 12 +++---- .../Tests/implicit/analysis_vandb_jfnk_2d.py | 9 +++-- .../Tests/initial_distribution/analysis.py | 12 +++---- .../Tests/initial_plasma_profile/analysis.py | 21 +++++------ Examples/Tests/ion_stopping/analysis.py | 9 +++-- Examples/Tests/langmuir/analysis_1d.py | 8 +++-- Examples/Tests/langmuir/analysis_2d.py | 8 +++-- Examples/Tests/langmuir/analysis_3d.py | 11 +++--- Examples/Tests/langmuir/analysis_rz.py | 8 +++-- Examples/Tests/langmuir_fluids/analysis_1d.py | 9 +++-- Examples/Tests/langmuir_fluids/analysis_2d.py | 9 +++-- Examples/Tests/langmuir_fluids/analysis_3d.py | 14 ++++---- Examples/Tests/langmuir_fluids/analysis_rz.py | 8 +++-- Examples/Tests/laser_injection/analysis_1d.py | 9 +++-- Examples/Tests/laser_injection/analysis_2d.py | 9 +++-- Examples/Tests/laser_injection/analysis_3d.py | 12 +++---- .../laser_injection_from_file/analysis_1d.py | 9 +++-- .../analysis_1d_boost.py | 10 +++--- .../laser_injection_from_file/analysis_2d.py | 9 +++-- .../analysis_2d_binary.py | 9 +++-- .../laser_injection_from_file/analysis_3d.py | 9 +++-- .../analysis_from_RZ_file.py | 9 +++-- .../laser_injection_from_file/analysis_rz.py | 9 +++-- .../Tests/load_external_field/analysis_3d.py | 9 +++-- .../Tests/load_external_field/analysis_rz.py | 9 +++-- .../Tests/magnetostatic_eb/analysis_rz.py | 22 +++++------- .../nci_fdtd_stability/analysis_ncicorr.py | 9 +++-- .../nci_psatd_stability/analysis_galilean.py | 10 ++++-- .../nci_psatd_stability/analysis_multiJ.py | 9 +++-- .../Tests/nodal_electrostatic/analysis.py | 13 ++++--- ...sis_deuterium_deuterium_3d_intraspecies.py | 13 ++++--- .../analysis_proton_boron_fusion.py | 9 +++-- .../analysis_two_product_fusion.py | 9 +++-- .../Tests/ohm_solver_em_modes/analysis.py | 11 +++--- .../Tests/ohm_solver_em_modes/analysis_rz.py | 12 ++++--- .../ohm_solver_ion_Landau_damping/analysis.py | 11 +++--- .../analysis.py | 11 +++--- .../analysis.py | 11 +++--- .../Tests/open_bc_poisson_solver/analysis.py | 15 ++++---- .../particle_boundary_interaction/analysis.py | 12 ++++--- .../particle_boundary_process/CMakeLists.txt | 2 +- .../analysis_absorption.py | 18 +++++++--- .../analysis_default_regression.py | 1 + .../analysis_reflection.py | 15 -------- ...nputs_test_2d_particle_reflection_picmi.py | 4 --- .../analysis_particle_diags_impl.py | 10 ++++-- Examples/Tests/particle_pusher/analysis.py | 9 +++-- .../particle_thermal_boundary/analysis.py | 11 +++--- .../analysis_particles_in_pml.py | 9 +++-- Examples/Tests/pec/analysis_pec.py | 14 ++++---- Examples/Tests/pec/analysis_pec_mr.py | 14 ++++---- Examples/Tests/photon_pusher/analysis.py | 9 +++-- Examples/Tests/plasma_lens/analysis.py | 12 ++++--- Examples/Tests/pml/analysis_pml_ckc.py | 9 +++-- Examples/Tests/pml/analysis_pml_psatd.py | 9 +++-- Examples/Tests/pml/analysis_pml_psatd_rz.py | 9 +++-- Examples/Tests/pml/analysis_pml_yee.py | 9 +++-- .../Tests/point_of_contact_eb/analysis.py | 14 ++++---- .../Tests/projection_divb_cleaner/analysis.py | 9 +++-- .../Tests/qed/analysis_breit_wheeler_opmd.py | 10 ++++-- .../Tests/qed/analysis_breit_wheeler_yt.py | 9 +++-- Examples/Tests/qed/analysis_quantum_sync.py | 9 +++-- Examples/Tests/qed/analysis_schwinger.py | 9 +++-- Examples/Tests/radiation_reaction/analysis.py | 9 +++-- .../analysis_reduced_diags_impl.py | 12 ++++--- ...alysis_reduced_diags_load_balance_costs.py | 12 ++++--- .../analysis.py | 10 ++++-- .../Tests/repelling_particles/analysis.py | 14 ++++---- Examples/Tests/resampling/analysis.py | 9 +++-- Examples/Tests/restart/CMakeLists.txt | 15 +++----- Examples/Tests/restart/analysis_restart.py | 11 +++--- .../analysis_rigid_injection_btd.py | 9 +++-- .../analysis_rigid_injection_lab.py | 9 +++-- Examples/Tests/scraping/analysis_rz.py | 11 +++--- Examples/Tests/silver_mueller/analysis.py | 9 +++-- Examples/Tests/single_particle/analysis.py | 9 +++-- .../space_charge_initialization/analysis.py | 10 ++++-- Examples/Tests/vay_deposition/analysis.py | 10 +++--- .../analysis_default_openpmd_regression.py | 22 +++++++----- Examples/analysis_default_regression.py | 20 ++++++----- Examples/analysis_default_restart.py | 17 +++++---- .../test_1d_background_mcc_picmi.json | 20 +++++++++++ .../test_2d_background_mcc_picmi.json | 22 ++++++++++++ .../benchmarks_json/test_2d_dirichlet_bc.json | 5 +++ .../benchmarks_json/test_2d_field_probe.json | 10 ++++++ .../test_2d_particle_reflection_picmi.json | 7 ++++ .../test_2d_runtime_components_picmi.json | 14 ++++++++ .../test_3d_embedded_boundary_picmi.json | 5 +++ .../test_3d_particle_absorption.json | 10 ++++++ .../test_rz_laser_acceleration_opmd.json | 35 +++++++++++++++++++ Regression/Checksum/checksum.py | 9 ++--- 128 files changed, 909 insertions(+), 510 deletions(-) create mode 120000 Examples/Tests/particle_boundary_process/analysis_default_regression.py delete mode 100755 Examples/Tests/particle_boundary_process/analysis_reflection.py create mode 100644 Regression/Checksum/benchmarks_json/test_1d_background_mcc_picmi.json create mode 100644 Regression/Checksum/benchmarks_json/test_2d_background_mcc_picmi.json create mode 100644 Regression/Checksum/benchmarks_json/test_2d_dirichlet_bc.json create mode 100644 Regression/Checksum/benchmarks_json/test_2d_field_probe.json create mode 100644 Regression/Checksum/benchmarks_json/test_2d_particle_reflection_picmi.json create mode 100644 Regression/Checksum/benchmarks_json/test_2d_runtime_components_picmi.json create mode 100644 Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_picmi.json create mode 100644 Regression/Checksum/benchmarks_json/test_3d_particle_absorption.json create mode 100644 Regression/Checksum/benchmarks_json/test_rz_laser_acceleration_opmd.json diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_1d.py b/Examples/Physics_applications/capacitive_discharge/analysis_1d.py index 82d98c38210..e9043e5dc01 100755 --- a/Examples/Physics_applications/capacitive_discharge/analysis_1d.py +++ b/Examples/Physics_applications/capacitive_discharge/analysis_1d.py @@ -2,8 +2,14 @@ # Copyright 2022 Modern Electron, David Grote +import os +import sys + import numpy as np +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum + # fmt: off ref_density = np.array([ 1.27989677e+14, 2.23601330e+14, 2.55400265e+14, 2.55664972e+14, @@ -45,3 +51,9 @@ density_data = np.load("ion_density_case_1.npy") print(repr(density_data)) assert np.allclose(density_data, ref_density) + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_2d.py b/Examples/Physics_applications/capacitive_discharge/analysis_2d.py index f52f69f4bf4..d4845ffb718 100755 --- a/Examples/Physics_applications/capacitive_discharge/analysis_2d.py +++ b/Examples/Physics_applications/capacitive_discharge/analysis_2d.py @@ -7,12 +7,15 @@ # solver that directly solves the Poisson equation using matrix inversion # rather than the iterative approach from the MLMG solver. +import os import sys -sys.path.append("../../../../warpx/Regression/Checksum/") +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum -import checksumAPI - -my_check = checksumAPI.evaluate_checksum( - "test_2d_background_mcc", "diags/diag1000050", do_particles=True, rtol=5e-3 +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=5e-3, ) diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py b/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py index 505521fc1ca..1458924b35c 100755 --- a/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py +++ b/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py @@ -7,15 +7,8 @@ import numpy as np -sys.path.append("../../../../warpx/Regression/Checksum/") - -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] - -my_check = checksumAPI.evaluate_checksum(test_name, fn, do_particles=True) +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum # fmt: off ref_density = np.array([ @@ -58,3 +51,9 @@ density_data = np.load("ion_density_case_1.npy") print(repr(density_data)) assert np.allclose(density_data, ref_density) + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid_boosted.py b/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid_boosted.py index 934d298c6b7..03369d48adf 100755 --- a/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid_boosted.py +++ b/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid_boosted.py @@ -25,7 +25,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -188,5 +188,8 @@ def odefcn(phi, xi, kp, a0, c, tau, xi_0, lambda_laser): assert error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py b/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py index f136ffeb1d4..0e07ddf914c 100755 --- a/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py +++ b/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py @@ -1,10 +1,14 @@ #!/usr/bin/env python3 +import os import sys import numpy as np import openpmd_api as io +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum + filename = sys.argv[1] series = io.Series(f"{filename}/openpmd_%T.h5", io.Access.read_only) @@ -63,3 +67,10 @@ assert ( (electron_meanz > 0) and (beam_meanz < 0) ), "problem with openPMD+RZ. Maybe openPMD+RZ mixed up the order of rho_ diagnostics?" + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + output_format="openpmd", +) diff --git a/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py b/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py index bc7fac15247..d481075c112 100755 --- a/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py +++ b/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py @@ -17,7 +17,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -59,5 +59,8 @@ # Test uniformity up to 0.5% relative variation assert rho_slice.std() < 0.005 * abs(rho_slice.mean()) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Physics_applications/spacecraft_charging/analysis.py b/Examples/Physics_applications/spacecraft_charging/analysis.py index 8819c435fb7..1795f5dfb6e 100755 --- a/Examples/Physics_applications/spacecraft_charging/analysis.py +++ b/Examples/Physics_applications/spacecraft_charging/analysis.py @@ -23,13 +23,9 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum -# Open plotfile specified in command line filename = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") - ts = OpenPMDTimeSeries(filename) dt = 1.27e-8 t = [] @@ -78,3 +74,10 @@ def func(x, v0, tau): assert (diff_v0 < tolerance_v0) and ( diff_tau < tolerance_tau ), "Test spacecraft_charging did not pass" + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + output_format="openpmd", +) diff --git a/Examples/Tests/accelerator_lattice/analysis.py b/Examples/Tests/accelerator_lattice/analysis.py index 6f76fd86855..b208d086d8c 100755 --- a/Examples/Tests/accelerator_lattice/analysis.py +++ b/Examples/Tests/accelerator_lattice/analysis.py @@ -24,7 +24,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] ds = yt.load(filename) @@ -131,5 +131,8 @@ def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): "error in x particle velocity" ) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/boosted_diags/analysis.py b/Examples/Tests/boosted_diags/analysis.py index 62956133af6..0d4794a8894 100755 --- a/Examples/Tests/boosted_diags/analysis.py +++ b/Examples/Tests/boosted_diags/analysis.py @@ -27,7 +27,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -56,5 +56,8 @@ (w,) = ts.get_particle(["w"], species="beam", iteration=3) assert (400 < len(w)) & (len(w) < 600) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/boundaries/analysis.py b/Examples/Tests/boundaries/analysis.py index be76a728a1f..ce3251ea406 100755 --- a/Examples/Tests/boundaries/analysis.py +++ b/Examples/Tests/boundaries/analysis.py @@ -23,7 +23,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # The min and max size of the box along the three axis. dmin = -1.0 @@ -111,5 +111,8 @@ def do_periodic(x): np.abs((zz - zza) / zz) < 1.0e-15 ), "Periodic particle position not correct" -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/btd_rz/analysis.py b/Examples/Tests/btd_rz/analysis.py index 5002b4c80b3..87f74599105 100755 --- a/Examples/Tests/btd_rz/analysis.py +++ b/Examples/Tests/btd_rz/analysis.py @@ -17,7 +17,7 @@ from scipy.optimize import curve_fit sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum def gaussian_laser(z, a0, z0_phase, z0_prop, ctau, lambda0): @@ -58,6 +58,8 @@ def fit_function(z, z0_phase): ## Check that the a0 agrees within 5% of the predicted value assert np.allclose(Ex, Ex_fit, atol=0.18 * Ex.max()) -# Checksum regression analysis -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, plotfile) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/collider_relevant_diags/analysis.py b/Examples/Tests/collider_relevant_diags/analysis.py index f6eb9de124f..232bc47af21 100755 --- a/Examples/Tests/collider_relevant_diags/analysis.py +++ b/Examples/Tests/collider_relevant_diags/analysis.py @@ -8,8 +8,8 @@ import pandas as pd from scipy.constants import c, e, hbar, m_e -sys.path.append("../../../../warpx/Regression/Checksum/") -import checksumAPI +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum sys.path.append("../../../../warpx/Tools/Parser/") from input_file_parser import parse_input_file @@ -180,7 +180,8 @@ def dL_dt(): dL_dt_cr = df[[col for col in df.columns if "dL_dt" in col]].to_numpy() assert np.allclose(dL_dt_cr, dL_dt(), rtol=1e-8) -# Checksum analysis -plotfile = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, plotfile) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/collision/analysis_collision_1d.py b/Examples/Tests/collision/analysis_collision_1d.py index 1888696953e..97ddee0591d 100755 --- a/Examples/Tests/collision/analysis_collision_1d.py +++ b/Examples/Tests/collision/analysis_collision_1d.py @@ -23,7 +23,7 @@ from scipy.constants import e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file last_fn = sys.argv[1] @@ -124,5 +124,8 @@ print("tolerance = ", tolerance) assert error < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, last_fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/collision/analysis_collision_2d.py b/Examples/Tests/collision/analysis_collision_2d.py index 7e1d74001a3..7ce3e4cdf2e 100755 --- a/Examples/Tests/collision/analysis_collision_2d.py +++ b/Examples/Tests/collision/analysis_collision_2d.py @@ -33,7 +33,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum test_name = os.path.split(os.getcwd())[1] @@ -121,4 +121,8 @@ last_fn, random_filter_fn, random_fraction, dim, species_name ) -checksumAPI.evaluate_checksum(test_name, last_fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/collision/analysis_collision_3d.py b/Examples/Tests/collision/analysis_collision_3d.py index 0a1b016a227..59c625d3cb8 100755 --- a/Examples/Tests/collision/analysis_collision_3d.py +++ b/Examples/Tests/collision/analysis_collision_3d.py @@ -33,7 +33,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 0.001 @@ -111,5 +111,8 @@ last_fn, random_filter_fn, random_fraction, dim, species_name ) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, last_fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/collision/analysis_collision_3d_isotropization.py b/Examples/Tests/collision/analysis_collision_3d_isotropization.py index 6386ce74812..2cfe7f9fffd 100755 --- a/Examples/Tests/collision/analysis_collision_3d_isotropization.py +++ b/Examples/Tests/collision/analysis_collision_3d_isotropization.py @@ -19,7 +19,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum e = sc.e pi = sc.pi @@ -64,5 +64,8 @@ print(f"tolerance = {tolerance}") assert error < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/collision/analysis_collision_rz.py b/Examples/Tests/collision/analysis_collision_rz.py index 168d8a8a7cf..2df2f6500d2 100755 --- a/Examples/Tests/collision/analysis_collision_rz.py +++ b/Examples/Tests/collision/analysis_collision_rz.py @@ -24,7 +24,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 1.0e-15 @@ -55,5 +55,9 @@ print("tolerance = ", tolerance) assert error < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, last_fn, do_particles=False) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + do_particles=False, +) diff --git a/Examples/Tests/diff_lumi_diag/analysis.py b/Examples/Tests/diff_lumi_diag/analysis.py index ef573fc4863..8f2061ff1dc 100755 --- a/Examples/Tests/diff_lumi_diag/analysis.py +++ b/Examples/Tests/diff_lumi_diag/analysis.py @@ -11,7 +11,7 @@ from read_raw_data import read_reduced_diags_histogram sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Extract the differential luminosity from the file _, _, E_bin, bin_data = read_reduced_diags_histogram( @@ -44,9 +44,9 @@ print("Tolerance: ", tol) assert error < tol -# Get name of the test -fn = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] - -# Run checksum regression test -checksumAPI.evaluate_checksum(test_name, fn, rtol=1e-2) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=1e-2, +) diff --git a/Examples/Tests/divb_cleaning/analysis.py b/Examples/Tests/divb_cleaning/analysis.py index e534e5b0d59..d72226a01cc 100755 --- a/Examples/Tests/divb_cleaning/analysis.py +++ b/Examples/Tests/divb_cleaning/analysis.py @@ -15,9 +15,8 @@ import yt yt.funcs.mylog.setLevel(50) -import re -import checksumAPI +from checksumAPI import evaluate_checksum from scipy.constants import c # Name of the last plotfile @@ -53,9 +52,8 @@ assert rel_error < tolerance -test_name = os.path.split(os.getcwd())[1] - -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) -else: - checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/electrostatic_dirichlet_bc/analysis.py b/Examples/Tests/electrostatic_dirichlet_bc/analysis.py index 82fe061c3a8..1b8f6923c1c 100755 --- a/Examples/Tests/electrostatic_dirichlet_bc/analysis.py +++ b/Examples/Tests/electrostatic_dirichlet_bc/analysis.py @@ -14,10 +14,16 @@ # Possible running time: ~ 19 s import glob +import os +import re +import sys import numpy as np import yt +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum + files = sorted(glob.glob("diags/diag1*"))[1:] assert len(files) > 0 @@ -39,3 +45,11 @@ assert np.allclose(potentials_lo, expected_potentials_lo, rtol=0.1) assert np.allclose(potentials_hi, expected_potentials_hi, rtol=0.1) + +# compare checksums +test_name = os.path.split(os.getcwd())[1] +test_name = re.sub("_picmi", "", test_name) # same checksums for PICMI test +evaluate_checksum( + test_name=test_name, + output_file=sys.argv[1], +) diff --git a/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py b/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py index 33842058b0b..dd15a6492f1 100755 --- a/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py +++ b/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py @@ -29,7 +29,7 @@ from scipy.optimize import fsolve sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum yt.funcs.mylog.setLevel(0) @@ -193,5 +193,8 @@ def return_energies(iteration): Ek_i + Ep_i ) # Check conservation of energy -# Checksum regression analysis -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/electrostatic_sphere_eb/analysis.py b/Examples/Tests/electrostatic_sphere_eb/analysis.py index 71b3bfa3aa5..e12070119ac 100755 --- a/Examples/Tests/electrostatic_sphere_eb/analysis.py +++ b/Examples/Tests/electrostatic_sphere_eb/analysis.py @@ -8,10 +8,9 @@ import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - # Check reduced diagnostics for charge on EB import numpy as np +from checksumAPI import evaluate_checksum from scipy.constants import epsilon_0 # Theoretical charge on the embedded boundary, for sphere at potential phi_0 @@ -29,6 +28,8 @@ q_sim_eighth = data_eighth[1, 2] assert abs((q_sim_eighth - q_th / 8) / (q_th / 8)) < 0.06 -filename = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py b/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py index b33f19488d0..e3976c95e68 100755 --- a/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py +++ b/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py @@ -24,7 +24,7 @@ from unyt import m sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 0.0041 @@ -67,5 +67,9 @@ print("tolerance = ", tolerance) assert errmax_phi < tolerance and errmax_Er < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn, do_particles=False) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + do_particles=False, +) diff --git a/Examples/Tests/electrostatic_sphere_eb/analysis_rz_mr.py b/Examples/Tests/electrostatic_sphere_eb/analysis_rz_mr.py index 77710ca5f72..586b35fc7a4 100755 --- a/Examples/Tests/electrostatic_sphere_eb/analysis_rz_mr.py +++ b/Examples/Tests/electrostatic_sphere_eb/analysis_rz_mr.py @@ -19,7 +19,7 @@ from openpmd_viewer import OpenPMDTimeSeries sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 0.004 print(f"tolerance = {tolerance}") @@ -110,5 +110,9 @@ def get_error_per_lev(ts, level): for level in range(nlevels + 1): get_error_per_lev(ts, level) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn, output_format="openpmd") +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + output_format="openpmd", +) diff --git a/Examples/Tests/embedded_boundary_cube/analysis_fields.py b/Examples/Tests/embedded_boundary_cube/analysis_fields.py index 49da1a76edd..3202ccfaca2 100755 --- a/Examples/Tests/embedded_boundary_cube/analysis_fields.py +++ b/Examples/Tests/embedded_boundary_cube/analysis_fields.py @@ -9,7 +9,7 @@ from scipy.constants import c, mu_0, pi sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # This is a script that analyses the simulation results from # the script `inputs_3d`. This simulates a TMmnp mode in a PEC cubic resonator. @@ -110,4 +110,8 @@ rel_err_z = np.sqrt(np.sum(np.square(Bz_sim - Bz_th)) / np.sum(np.square(Bz_th))) assert rel_err_z < rel_tol_err -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py b/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py index 70a5b7d46c5..454d78169b7 100755 --- a/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py +++ b/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py @@ -8,7 +8,7 @@ from scipy.constants import c, mu_0, pi sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # This is a script that analyses the simulation results from # the script `inputs_3d`. This simulates a TMmnp mode in a PEC cubic resonator. @@ -63,6 +63,8 @@ Ey_sim = data["Ey"].to_ndarray() rel_err_y = np.sqrt(np.sum(np.square(Ey_sim / c - By_th)) / np.sum(np.square(By_th))) -test_name = os.path.split(os.getcwd())[1] - -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py b/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py index 84dfacbb505..8f0b7818516 100755 --- a/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py +++ b/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py @@ -15,7 +15,7 @@ from scipy.ndimage import gaussian_filter1d sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] ts = OpenPMDTimeSeries(filename) @@ -42,6 +42,9 @@ def r_first_minimum(iz): theta_diffraction = np.arcsin(1.22 * 0.1 / 0.4) / 2 assert np.all(abs(r[50:] - theta_diffraction * info.z[50:]) < 0.03) -# Open the right plot file -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + output_format="openpmd", +) diff --git a/Examples/Tests/embedded_boundary_python_api/analysis.py b/Examples/Tests/embedded_boundary_python_api/analysis.py index 09cc2accfea..7fda682f618 100755 --- a/Examples/Tests/embedded_boundary_python_api/analysis.py +++ b/Examples/Tests/embedded_boundary_python_api/analysis.py @@ -3,8 +3,17 @@ # This script just checks that the PICMI file executed successfully. # If it did there will be a plotfile for the final step. +import os import sys -step = int(sys.argv[1][-5:]) +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum +step = int(sys.argv[1][-5:]) assert step == 2 + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py index 6f3904e8764..451913fd54d 100755 --- a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py +++ b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py @@ -8,7 +8,7 @@ from scipy.constants import c, mu_0, pi sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # This is a script that analyses the simulation results from # the script `inputs_3d`. This simulates a TMmnp mode in a PEC cubic resonator. @@ -68,6 +68,8 @@ rel_err_y = np.sqrt(np.sum(np.square(By_sim - By_th)) / np.sum(np.square(By_th))) assert rel_err_y < rel_tol_err -test_name = os.path.split(os.getcwd())[1] - -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_3d.py b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_3d.py index 968ebe395a5..838c9c82479 100755 --- a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_3d.py +++ b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_3d.py @@ -15,7 +15,7 @@ from scipy.constants import c, mu_0, pi sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # This is a script that analyses the simulation results from # the script `inputs_3d`. This simulates a TMmnp mode in a PEC cubic resonator rotated by pi/8. @@ -144,6 +144,8 @@ ) assert rel_err_z < rel_tol_err -test_name = os.path.split(os.getcwd())[1] - -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/embedded_circle/analysis.py b/Examples/Tests/embedded_circle/analysis.py index 569ca40dce4..d1bb04fedb6 100755 --- a/Examples/Tests/embedded_circle/analysis.py +++ b/Examples/Tests/embedded_circle/analysis.py @@ -4,13 +4,11 @@ import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] - -# Get name of the test -test_name = os.path.split(os.getcwd())[1] - -# Run checksum regression test -checksumAPI.evaluate_checksum(test_name, fn, rtol=1e-2) +from checksumAPI import evaluate_checksum + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=1e-2, +) diff --git a/Examples/Tests/energy_conserving_thermal_plasma/analysis.py b/Examples/Tests/energy_conserving_thermal_plasma/analysis.py index 4cf7b4ff4e6..0d29f85e7eb 100755 --- a/Examples/Tests/energy_conserving_thermal_plasma/analysis.py +++ b/Examples/Tests/energy_conserving_thermal_plasma/analysis.py @@ -18,10 +18,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] +from checksumAPI import evaluate_checksum # Get energy as a function of time, from reduced diagnostics EFdata = np.genfromtxt("./diags/reducedfiles/EF.txt") # Field energy @@ -33,6 +30,8 @@ # Check that the energy is conserved to 0.3% assert np.all(abs(E - E[0]) / E[0] < 0.003) -# Checksum test -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/field_ionization/analysis.py b/Examples/Tests/field_ionization/analysis.py index 62d3f839941..a02c293601b 100755 --- a/Examples/Tests/field_ionization/analysis.py +++ b/Examples/Tests/field_ionization/analysis.py @@ -26,7 +26,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Open plotfile specified in command line, and get ion's ionization level. filename = sys.argv[1] @@ -107,5 +107,8 @@ except yt.utilities.exceptions.YTFieldNotFound: pass # The backtransformed diagnostic version of the test does not have orig_z -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/field_probe/analysis.py b/Examples/Tests/field_probe/analysis.py index 57085fb7cdc..e974e284b65 100755 --- a/Examples/Tests/field_probe/analysis.py +++ b/Examples/Tests/field_probe/analysis.py @@ -18,9 +18,15 @@ which can be solved analytically. """ +import os +import sys + import numpy as np import pandas as pd +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum + filename = "diags/reducedfiles/FP_line.txt" # Open data file @@ -59,3 +65,9 @@ def I_envelope(x, lam=0.2e-6, a=0.3e-6, D=1.7e-6): print("Average error greater than 2.5%") assert averror < 2.5 + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/flux_injection/analysis_flux_injection_3d.py b/Examples/Tests/flux_injection/analysis_flux_injection_3d.py index 3840bb72e74..dc89780703d 100755 --- a/Examples/Tests/flux_injection/analysis_flux_injection_3d.py +++ b/Examples/Tests/flux_injection/analysis_flux_injection_3d.py @@ -22,7 +22,6 @@ """ import os -import re import sys import matplotlib.pyplot as plt @@ -32,7 +31,7 @@ from scipy.special import erf sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum yt.funcs.mylog.setLevel(0) @@ -148,9 +147,8 @@ def compare_gaussian_flux(u, w, u_th, u_m, label=""): plt.tight_layout() plt.savefig("Distribution.png") -# Verify checksum -test_name = os.path.split(os.getcwd())[1] -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) -else: - checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/flux_injection/analysis_flux_injection_rz.py b/Examples/Tests/flux_injection/analysis_flux_injection_rz.py index ad73fdb47af..33b487cc36b 100755 --- a/Examples/Tests/flux_injection/analysis_flux_injection_rz.py +++ b/Examples/Tests/flux_injection/analysis_flux_injection_rz.py @@ -26,14 +26,13 @@ """ import os -import re import sys import numpy as np import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum yt.funcs.mylog.setLevel(0) @@ -58,9 +57,8 @@ # Check that the particles are at the right radius assert np.all((r >= 1.48) & (r <= 1.92)) -test_name = os.path.split(os.getcwd())[1] - -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) -else: - checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/gaussian_beam/analysis.py b/Examples/Tests/gaussian_beam/analysis.py index c2318d0cb7d..a2278b2cf7a 100755 --- a/Examples/Tests/gaussian_beam/analysis.py +++ b/Examples/Tests/gaussian_beam/analysis.py @@ -14,8 +14,7 @@ from scipy.constants import c, eV, m_e, micro, nano sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - -import checksumAPI +from checksumAPI import evaluate_checksum from openpmd_viewer import OpenPMDTimeSeries GeV = 1e9 * eV @@ -39,8 +38,6 @@ def s(z, sigma0, emit): return np.sqrt(sigma0**2 + emit**2 * (z - focal_distance) ** 2 / sigma0**2) -filename = sys.argv[1] - ts = OpenPMDTimeSeries("./diags/openpmd/") ( @@ -71,5 +68,8 @@ def s(z, sigma0, emit): assert np.allclose(sx, sx_theory, rtol=0.051, atol=0) assert np.allclose(sy, sy_theory, rtol=0.038, atol=0) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/implicit/analysis_1d.py b/Examples/Tests/implicit/analysis_1d.py index bbbbb8db9b2..665fcaac951 100755 --- a/Examples/Tests/implicit/analysis_1d.py +++ b/Examples/Tests/implicit/analysis_1d.py @@ -16,10 +16,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] +from checksumAPI import evaluate_checksum field_energy = np.loadtxt("diags/reducedfiles/field_energy.txt", skiprows=1) particle_energy = np.loadtxt("diags/reducedfiles/particle_energy.txt", skiprows=1) @@ -41,5 +38,8 @@ assert max_delta_E < tolerance_rel -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/implicit/analysis_vandb_jfnk_2d.py b/Examples/Tests/implicit/analysis_vandb_jfnk_2d.py index 3c962eb91ea..29a2c870574 100755 --- a/Examples/Tests/implicit/analysis_vandb_jfnk_2d.py +++ b/Examples/Tests/implicit/analysis_vandb_jfnk_2d.py @@ -18,7 +18,7 @@ from scipy.constants import e, epsilon_0 sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -66,5 +66,8 @@ assert drho_rms < tolerance_rel_charge -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/initial_distribution/analysis.py b/Examples/Tests/initial_distribution/analysis.py index 6d23c5da1e4..834934df255 100755 --- a/Examples/Tests/initial_distribution/analysis.py +++ b/Examples/Tests/initial_distribution/analysis.py @@ -27,9 +27,7 @@ from read_raw_data import read_reduced_diags, read_reduced_diags_histogram sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -filename = sys.argv[1] +from checksumAPI import evaluate_checksum # print tolerance tolerance = 0.02 @@ -451,6 +449,8 @@ def Gaussian(mean, sigma, u): assert f9_error < tolerance - -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/initial_plasma_profile/analysis.py b/Examples/Tests/initial_plasma_profile/analysis.py index f5fc75ee578..d372bd30a93 100755 --- a/Examples/Tests/initial_plasma_profile/analysis.py +++ b/Examples/Tests/initial_plasma_profile/analysis.py @@ -9,16 +9,13 @@ import os import sys -import yt - -yt.funcs.mylog.setLevel(50) - sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# Name of the plotfile -fn = sys.argv[1] - -test_name = os.path.split(os.getcwd())[1] - -checksumAPI.evaluate_checksum(test_name, fn, rtol=1e-4, do_particles=False) +from checksumAPI import evaluate_checksum + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=1e-4, + do_particles=False, +) diff --git a/Examples/Tests/ion_stopping/analysis.py b/Examples/Tests/ion_stopping/analysis.py index e343bd23fdd..45983538025 100755 --- a/Examples/Tests/ion_stopping/analysis.py +++ b/Examples/Tests/ion_stopping/analysis.py @@ -19,7 +19,7 @@ from scipy.constants import e, epsilon_0, k, m_e, m_p sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Define constants using the WarpX names for the evals below q_e = e @@ -194,5 +194,8 @@ def stopping_from_ions(dt, ni, Ti, mi, Zi, Zb, ion_mass, ion_energy): assert np.all(error3 < tolerance) assert np.all(error4 < tolerance) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, last_filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir/analysis_1d.py b/Examples/Tests/langmuir/analysis_1d.py index d041ca03b36..8eefd95b4f7 100755 --- a/Examples/Tests/langmuir/analysis_1d.py +++ b/Examples/Tests/langmuir/analysis_1d.py @@ -27,7 +27,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # test name test_name = os.path.split(os.getcwd())[1] @@ -126,4 +126,8 @@ def get_theoretical_field(field, t): print("tolerance = {}".format(tolerance)) assert error_rel < tolerance -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir/analysis_2d.py b/Examples/Tests/langmuir/analysis_2d.py index ac98354c73b..31995e896a5 100755 --- a/Examples/Tests/langmuir/analysis_2d.py +++ b/Examples/Tests/langmuir/analysis_2d.py @@ -27,7 +27,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # test name test_name = os.path.split(os.getcwd())[1] @@ -163,4 +163,8 @@ def get_theoretical_field(field, t): print("tolerance = {}".format(tolerance)) assert error_rel < tolerance -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir/analysis_3d.py b/Examples/Tests/langmuir/analysis_3d.py index 9f4b2cc1f93..05f1c585ec0 100755 --- a/Examples/Tests/langmuir/analysis_3d.py +++ b/Examples/Tests/langmuir/analysis_3d.py @@ -27,7 +27,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # test name test_name = os.path.split(os.getcwd())[1] @@ -212,7 +212,8 @@ def get_theoretical_field(field, t): print("tolerance = {}".format(tolerance)) assert error_rel < tolerance -if re.search("single_precision", test_name): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) -else: - checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir/analysis_rz.py b/Examples/Tests/langmuir/analysis_rz.py index dd26fd29db7..64f8cfb6313 100755 --- a/Examples/Tests/langmuir/analysis_rz.py +++ b/Examples/Tests/langmuir/analysis_rz.py @@ -30,7 +30,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -183,4 +183,8 @@ def Ez(z, r, epsilon, k0, w0, wp, t): fn, random_filter_fn, random_fraction, dim, species_name ) -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir_fluids/analysis_1d.py b/Examples/Tests/langmuir_fluids/analysis_1d.py index fa4566b6173..c448303783f 100755 --- a/Examples/Tests/langmuir_fluids/analysis_1d.py +++ b/Examples/Tests/langmuir_fluids/analysis_1d.py @@ -26,7 +26,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -148,5 +148,8 @@ def get_theoretical_rho_field(field, t): assert error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir_fluids/analysis_2d.py b/Examples/Tests/langmuir_fluids/analysis_2d.py index d7ecca986e4..d8ba50a9df1 100755 --- a/Examples/Tests/langmuir_fluids/analysis_2d.py +++ b/Examples/Tests/langmuir_fluids/analysis_2d.py @@ -26,7 +26,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -178,5 +178,8 @@ def get_theoretical_rho_field(field, t): assert error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir_fluids/analysis_3d.py b/Examples/Tests/langmuir_fluids/analysis_3d.py index 321b528b6cb..899dc72424b 100755 --- a/Examples/Tests/langmuir_fluids/analysis_3d.py +++ b/Examples/Tests/langmuir_fluids/analysis_3d.py @@ -14,7 +14,6 @@ # $$ E_y = \epsilon \,\frac{m_e c^2 k_y}{q_e}\cos(k_x x)\sin(k_y y)\cos(k_z z)\sin( \omega_p t)$$ # $$ E_z = \epsilon \,\frac{m_e c^2 k_z}{q_e}\cos(k_x x)\cos(k_y y)\sin(k_z z)\sin( \omega_p t)$$ import os -import re import sys import matplotlib.pyplot as plt @@ -27,7 +26,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -213,9 +212,8 @@ def get_theoretical_rho_field(field, t): assert error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] - -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) -else: - checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/langmuir_fluids/analysis_rz.py b/Examples/Tests/langmuir_fluids/analysis_rz.py index f629ddc6626..0e918a6ab31 100755 --- a/Examples/Tests/langmuir_fluids/analysis_rz.py +++ b/Examples/Tests/langmuir_fluids/analysis_rz.py @@ -29,7 +29,7 @@ from scipy.constants import c, e, epsilon_0, m_e sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -226,4 +226,8 @@ def rho(z, r, epsilon, k0, w0, wp, t): assert error_rel < tolerance_rel -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection/analysis_1d.py b/Examples/Tests/laser_injection/analysis_1d.py index 9215125427d..5ce7065c967 100755 --- a/Examples/Tests/laser_injection/analysis_1d.py +++ b/Examples/Tests/laser_injection/analysis_1d.py @@ -24,7 +24,7 @@ from scipy.signal import hilbert sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.05 @@ -192,8 +192,11 @@ def main(): check_laser(filename_end) - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) if __name__ == "__main__": diff --git a/Examples/Tests/laser_injection/analysis_2d.py b/Examples/Tests/laser_injection/analysis_2d.py index c6548e8be1d..5e2d9ebf280 100755 --- a/Examples/Tests/laser_injection/analysis_2d.py +++ b/Examples/Tests/laser_injection/analysis_2d.py @@ -30,7 +30,7 @@ from scipy.signal import hilbert sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.05 @@ -225,8 +225,11 @@ def main(): check_laser(filename_end) - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) if __name__ == "__main__": diff --git a/Examples/Tests/laser_injection/analysis_3d.py b/Examples/Tests/laser_injection/analysis_3d.py index bf2a03e342c..153b721b526 100755 --- a/Examples/Tests/laser_injection/analysis_3d.py +++ b/Examples/Tests/laser_injection/analysis_3d.py @@ -18,10 +18,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] +from checksumAPI import evaluate_checksum # you can save an image to be displayed on the website t = np.arange(0.0, 2.0, 0.01) @@ -29,5 +26,8 @@ plt.plot(t, s) plt.savefig("laser_analysis.png") -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection_from_file/analysis_1d.py b/Examples/Tests/laser_injection_from_file/analysis_1d.py index 1b5f209cb91..c6542ed1ac8 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_1d.py +++ b/Examples/Tests/laser_injection_from_file/analysis_1d.py @@ -28,7 +28,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -113,5 +113,8 @@ def gauss_env(T, Z): print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py b/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py index 89c0ea3c57c..e410369cb45 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py +++ b/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py @@ -28,7 +28,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -113,6 +113,8 @@ def gauss_env(T, Z): print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold -# Do the checksum test -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection_from_file/analysis_2d.py b/Examples/Tests/laser_injection_from_file/analysis_2d.py index ab5649e968f..1e6704f55a5 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_2d.py +++ b/Examples/Tests/laser_injection_from_file/analysis_2d.py @@ -28,7 +28,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -139,5 +139,8 @@ def gauss_env(T, X, Y, Z): print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py b/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py index bcb13bba410..7fc14824471 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py +++ b/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py @@ -27,7 +27,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -146,5 +146,8 @@ def gauss_env(T, XX, ZZ): print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection_from_file/analysis_3d.py b/Examples/Tests/laser_injection_from_file/analysis_3d.py index 7d30af28639..3921e3d5930 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_3d.py +++ b/Examples/Tests/laser_injection_from_file/analysis_3d.py @@ -28,7 +28,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -145,5 +145,8 @@ def gauss_env(T, X, Y, Z): print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py b/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py index 72575da96b4..f797ddb5d90 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py +++ b/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py @@ -29,7 +29,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -147,5 +147,8 @@ def laguerre_env(T, X, Y, Z, p, m): print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/laser_injection_from_file/analysis_rz.py b/Examples/Tests/laser_injection_from_file/analysis_rz.py index 90e392bcf25..c37c6d8b3c2 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_rz.py +++ b/Examples/Tests/laser_injection_from_file/analysis_rz.py @@ -28,7 +28,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -140,5 +140,8 @@ def gauss_env(T, X, Y, Z): print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/load_external_field/analysis_3d.py b/Examples/Tests/load_external_field/analysis_3d.py index 0865584d683..05cba3ea7bd 100755 --- a/Examples/Tests/load_external_field/analysis_3d.py +++ b/Examples/Tests/load_external_field/analysis_3d.py @@ -23,7 +23,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 1.0e-8 x0 = 0.12238072 @@ -44,5 +44,8 @@ print("tolerance = ", tolerance) assert error < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/load_external_field/analysis_rz.py b/Examples/Tests/load_external_field/analysis_rz.py index 75d9c084718..7de160cdd50 100755 --- a/Examples/Tests/load_external_field/analysis_rz.py +++ b/Examples/Tests/load_external_field/analysis_rz.py @@ -23,7 +23,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 1.0e-8 r0 = 0.12402005 @@ -41,5 +41,8 @@ print("tolerance = ", tolerance) assert error < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/magnetostatic_eb/analysis_rz.py b/Examples/Tests/magnetostatic_eb/analysis_rz.py index 05aa4a3fe47..f31069ad230 100755 --- a/Examples/Tests/magnetostatic_eb/analysis_rz.py +++ b/Examples/Tests/magnetostatic_eb/analysis_rz.py @@ -1,20 +1,14 @@ #!/usr/bin/env python3 import os -import re import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] - -# Get name of the test -test_name = os.path.split(os.getcwd())[1] - -# Run checksum regression test -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=2.0e-6, do_particles=False) -else: - checksumAPI.evaluate_checksum(test_name, fn, do_particles=False) +from checksumAPI import evaluate_checksum + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + do_particles=False, +) diff --git a/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py b/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py index 7bfa47f3164..ad635bf0fbe 100755 --- a/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py +++ b/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py @@ -19,7 +19,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum fn = sys.argv[1] use_MR = re.search("nci_correctorMR", fn) is not None @@ -50,5 +50,8 @@ assert energy < energy_threshold -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/nci_psatd_stability/analysis_galilean.py b/Examples/Tests/nci_psatd_stability/analysis_galilean.py index 40c74ecc5bf..99f14d91371 100755 --- a/Examples/Tests/nci_psatd_stability/analysis_galilean.py +++ b/Examples/Tests/nci_psatd_stability/analysis_galilean.py @@ -23,7 +23,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -118,5 +118,9 @@ print(f"tol_charge = {tol_charge}") assert err_charge < tol_charge -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, rtol=1.0e-8) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=1e-8, +) diff --git a/Examples/Tests/nci_psatd_stability/analysis_multiJ.py b/Examples/Tests/nci_psatd_stability/analysis_multiJ.py index 2a438d5d22e..6dcfb6565fe 100755 --- a/Examples/Tests/nci_psatd_stability/analysis_multiJ.py +++ b/Examples/Tests/nci_psatd_stability/analysis_multiJ.py @@ -19,7 +19,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -50,5 +50,8 @@ print(f"tol_energy = {tol_energy}") assert err_energy < tol_energy -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/nodal_electrostatic/analysis.py b/Examples/Tests/nodal_electrostatic/analysis.py index c8725ce5d95..f015d525280 100755 --- a/Examples/Tests/nodal_electrostatic/analysis.py +++ b/Examples/Tests/nodal_electrostatic/analysis.py @@ -6,10 +6,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] +from checksumAPI import evaluate_checksum # check that the maximum chi value is small fname = "diags/reducedfiles/ParticleExtrema_beam_p.txt" @@ -21,6 +18,8 @@ pho_num = np.loadtxt(fname)[:, 7] assert pho_num.all() == 0.0 -# Checksum regression analysis -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py b/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py index 22de371090c..8ae0e768815 100755 --- a/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py +++ b/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py @@ -29,10 +29,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# Name of the plotfile -fn = sys.argv[1] +from checksumAPI import evaluate_checksum # Load data from reduced diagnostics (physical time and neutron weights) time = np.loadtxt("./reduced_diags/particle_number.txt", usecols=1) @@ -52,6 +49,8 @@ print("tolerance = ", tolerance) assert error < tolerance -# Compare checksums with benchmark -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py b/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py index 25e898c05be..c69080ac726 100755 --- a/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py +++ b/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py @@ -12,9 +12,9 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI import numpy as np import scipy.constants as scc +from checksumAPI import evaluate_checksum ## This script performs various checks for the proton boron nuclear fusion module. The simulation ## that we check is made of 5 different tests, each with different proton, boron and alpha species. @@ -880,8 +880,11 @@ def main(): rho_end = field_data_end["rho"].to_ndarray() check_charge_conservation(rho_start, rho_end) - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) if __name__ == "__main__": diff --git a/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py b/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py index be1fbb0702a..1a458a25e4a 100755 --- a/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py +++ b/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py @@ -12,9 +12,9 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI import numpy as np import scipy.constants as scc +from checksumAPI import evaluate_checksum ## This script performs various checks for the fusion module. The simulation ## that we check is made of 2 different tests, each with different reactant and product species. @@ -555,8 +555,11 @@ def main(): rho_end = field_data_end["rho"].to_ndarray() check_charge_conservation(rho_start, rho_end) - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) if __name__ == "__main__": diff --git a/Examples/Tests/ohm_solver_em_modes/analysis.py b/Examples/Tests/ohm_solver_em_modes/analysis.py index 36869623ac4..bee634415d9 100755 --- a/Examples/Tests/ohm_solver_em_modes/analysis.py +++ b/Examples/Tests/ohm_solver_em_modes/analysis.py @@ -353,9 +353,10 @@ def get_analytic_L_mode(w): import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - import checksumAPI + from checksumAPI import evaluate_checksum - # this will be the name of the plot file - fn = sys.argv[1] - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, fn) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) diff --git a/Examples/Tests/ohm_solver_em_modes/analysis_rz.py b/Examples/Tests/ohm_solver_em_modes/analysis_rz.py index 4d5bc2aa016..a1eb185bbf6 100755 --- a/Examples/Tests/ohm_solver_em_modes/analysis_rz.py +++ b/Examples/Tests/ohm_solver_em_modes/analysis_rz.py @@ -187,9 +187,11 @@ def process(it): import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - import checksumAPI + from checksumAPI import evaluate_checksum - # this will be the name of the plot file - fn = sys.argv[1] - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, fn, rtol=1e-6) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=1e-6, + ) diff --git a/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py b/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py index 700ad68fe87..620331cf13f 100755 --- a/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py +++ b/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py @@ -117,9 +117,10 @@ import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - import checksumAPI + from checksumAPI import evaluate_checksum - # this will be the name of the plot file - fn = sys.argv[1] - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, fn) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) diff --git a/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py b/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py index 5bd9db3d91d..3b0a18f29d5 100755 --- a/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py +++ b/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py @@ -236,9 +236,10 @@ import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - import checksumAPI + from checksumAPI import evaluate_checksum - # this will be the name of the plot file - fn = sys.argv[1] - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, fn) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py b/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py index 93d574e5294..e7b41d4fbb4 100755 --- a/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py @@ -190,9 +190,10 @@ def animate(i): import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - import checksumAPI + from checksumAPI import evaluate_checksum - # this will be the name of the plot file - fn = sys.argv[1] - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, fn) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) diff --git a/Examples/Tests/open_bc_poisson_solver/analysis.py b/Examples/Tests/open_bc_poisson_solver/analysis.py index 8d5be875c7a..8ffd9ef52e2 100755 --- a/Examples/Tests/open_bc_poisson_solver/analysis.py +++ b/Examples/Tests/open_bc_poisson_solver/analysis.py @@ -9,7 +9,7 @@ from scipy.special import erf sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum sigmaz = 300e-6 sigmax = 516e-9 @@ -37,8 +37,6 @@ def evaluate_E(x, y, z): return E_complex.imag, E_complex.real -fn = sys.argv[1] - path = os.path.join("diags", "diag2") ts = OpenPMDTimeSeries(path) @@ -64,8 +62,9 @@ def evaluate_E(x, y, z): assert np.allclose(Ey_warpx, Ey_theory, rtol=0.029, atol=0) -# Get name of the test -test_name = os.path.split(os.getcwd())[1] - -# Run checksum regression test -checksumAPI.evaluate_checksum(test_name, fn, rtol=1e-2) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=1e-2, +) diff --git a/Examples/Tests/particle_boundary_interaction/analysis.py b/Examples/Tests/particle_boundary_interaction/analysis.py index 3b9d2f12b84..d06200157d2 100755 --- a/Examples/Tests/particle_boundary_interaction/analysis.py +++ b/Examples/Tests/particle_boundary_interaction/analysis.py @@ -17,13 +17,10 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Open plotfile specified in command line filename = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") - ts = OpenPMDTimeSeries(filename) it = ts.iterations @@ -52,3 +49,10 @@ assert ( (diff_x < tolerance) and (y[0] < 1e-8) and (diff_z < tolerance) ), "Test particle_boundary_interaction did not pass" + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + output_format="openpmd", +) diff --git a/Examples/Tests/particle_boundary_process/CMakeLists.txt b/Examples/Tests/particle_boundary_process/CMakeLists.txt index a7081fe9090..499cf445da5 100644 --- a/Examples/Tests/particle_boundary_process/CMakeLists.txt +++ b/Examples/Tests/particle_boundary_process/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 2 # dims 1 # nprocs inputs_test_2d_particle_reflection_picmi.py # inputs - analysis_reflection.py # analysis + analysis_default_regression.py # analysis diags/diag1000010 # output OFF # dependency ) diff --git a/Examples/Tests/particle_boundary_process/analysis_absorption.py b/Examples/Tests/particle_boundary_process/analysis_absorption.py index fdde2622684..79e8d0e4bc6 100755 --- a/Examples/Tests/particle_boundary_process/analysis_absorption.py +++ b/Examples/Tests/particle_boundary_process/analysis_absorption.py @@ -1,15 +1,19 @@ #!/usr/bin/env python3 -import sys - -import yt - # This test shoots a beam of electrons at cubic embedded boundary geometry # At time step 40, none of the particles have hit the boundary yet. At time # step 60, all of them should have been absorbed by the boundary. In the # absence of the cube, none of the particles would have had time to exit # the problem domain yet. +import os +import sys + +import yt + +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum + # all particles are still there ds40 = yt.load("diags/diag1000040") np40 = ds40.index.particle_headers["electrons"].num_particles @@ -20,3 +24,9 @@ ds60 = yt.load(filename) np60 = ds60.index.particle_headers["electrons"].num_particles assert np60 == 0 + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/particle_boundary_process/analysis_default_regression.py b/Examples/Tests/particle_boundary_process/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/particle_boundary_process/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/particle_boundary_process/analysis_reflection.py b/Examples/Tests/particle_boundary_process/analysis_reflection.py deleted file mode 100755 index 1187a58e75d..00000000000 --- a/Examples/Tests/particle_boundary_process/analysis_reflection.py +++ /dev/null @@ -1,15 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2021 Modern Electron -# -# License: BSD-3-Clause-LBNL - -# This script just checks that the PICMI file executed successfully. -# If it did there will be a plotfile for the final step. - -import yt - -plotfile = "Python_particle_reflection_plt000010" -ds = yt.load(plotfile) # noqa - -assert True diff --git a/Examples/Tests/particle_boundary_process/inputs_test_2d_particle_reflection_picmi.py b/Examples/Tests/particle_boundary_process/inputs_test_2d_particle_reflection_picmi.py index 0803bc05d59..ef1b7d45e1a 100755 --- a/Examples/Tests/particle_boundary_process/inputs_test_2d_particle_reflection_picmi.py +++ b/Examples/Tests/particle_boundary_process/inputs_test_2d_particle_reflection_picmi.py @@ -80,16 +80,12 @@ particle_diag = picmi.ParticleDiagnostic( name="diag1", period=10, - write_dir=".", - warpx_file_prefix="Python_particle_reflection_plt", ) field_diag = picmi.FieldDiagnostic( grid=grid, name="diag1", data_list=["E"], period=10, - write_dir=".", - warpx_file_prefix="Python_particle_reflection_plt", ) ########################## diff --git a/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py b/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py index a7c84b05459..01a7436a787 100755 --- a/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py +++ b/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py @@ -20,7 +20,7 @@ from scipy.constants import c, e, m_e, m_p sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum def do_analysis(single_precision=False): @@ -248,5 +248,9 @@ def do_analysis(single_precision=False): assert error_opmd[k] < tolerance print(k, "relative error openPMD = ", error_opmd[k]) - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, fn, rtol=check_tolerance) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=check_tolerance, + ) diff --git a/Examples/Tests/particle_pusher/analysis.py b/Examples/Tests/particle_pusher/analysis.py index acef0e819d3..9ed92507d4d 100755 --- a/Examples/Tests/particle_pusher/analysis.py +++ b/Examples/Tests/particle_pusher/analysis.py @@ -28,7 +28,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 0.001 @@ -41,5 +41,8 @@ print("tolerance = ", tolerance) assert abs(x) < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/particle_thermal_boundary/analysis.py b/Examples/Tests/particle_thermal_boundary/analysis.py index 49f33b5b805..621bf2032be 100755 --- a/Examples/Tests/particle_thermal_boundary/analysis.py +++ b/Examples/Tests/particle_thermal_boundary/analysis.py @@ -20,7 +20,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum FE_rdiag = "./diags/reducedfiles/EF.txt" init_Fenergy = np.loadtxt(FE_rdiag)[1, 2] @@ -32,6 +32,9 @@ init_Penergy = np.loadtxt(PE_rdiag)[0, 2] final_Penergy = np.loadtxt(PE_rdiag)[-1, 2] assert abs(final_Penergy - init_Penergy) / init_Penergy < 0.02 -filename = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py b/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py index df106976e78..8e7d95eda08 100755 --- a/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py +++ b/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py @@ -25,7 +25,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Open plotfile specified in command line filename = sys.argv[1] @@ -67,5 +67,8 @@ print("tolerance_abs: " + str(tolerance_abs)) assert max_Efield < tolerance_abs -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/pec/analysis_pec.py b/Examples/Tests/pec/analysis_pec.py index 12907bb7846..29d9a4e26f4 100755 --- a/Examples/Tests/pec/analysis_pec.py +++ b/Examples/Tests/pec/analysis_pec.py @@ -12,7 +12,6 @@ # and as a result, the minimum and maximum value after reflection would be two times the value at initialization due to constructive interference. # Additionally, the value of Ey at the boundary must be equal to zero. import os -import re import sys import matplotlib @@ -26,7 +25,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -91,9 +90,8 @@ assert max_Ey_error_rel < tolerance_rel assert min_Ey_error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] - -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) -else: - checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/pec/analysis_pec_mr.py b/Examples/Tests/pec/analysis_pec_mr.py index 8361246b8dd..069a1d01afa 100755 --- a/Examples/Tests/pec/analysis_pec_mr.py +++ b/Examples/Tests/pec/analysis_pec_mr.py @@ -12,7 +12,6 @@ # and as a result, the minimum and maximum value after reflection would be two times the value at initialization due to constructive interference. # Additionally, the value of Ey at the boundary must be equal to zero. import os -import re import sys import matplotlib @@ -26,7 +25,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # this will be the name of the plot file fn = sys.argv[1] @@ -91,9 +90,8 @@ assert max_Ey_error_rel < tolerance_rel assert min_Ey_error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] - -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=1.0e-3) -else: - checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/photon_pusher/analysis.py b/Examples/Tests/photon_pusher/analysis.py index 9135ad981ba..2a77e325bc5 100755 --- a/Examples/Tests/photon_pusher/analysis.py +++ b/Examples/Tests/photon_pusher/analysis.py @@ -14,7 +14,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # This script checks if photons initialized with different momenta and # different initial directions propagate along straight lines at the speed of @@ -153,8 +153,11 @@ def check(): assert (max(disc_pos) <= tol_pos) and (max(disc_mom) <= tol_mom) - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) # This function generates the input file to test the photon pusher. diff --git a/Examples/Tests/plasma_lens/analysis.py b/Examples/Tests/plasma_lens/analysis.py index 11e2a084ac5..46036573940 100755 --- a/Examples/Tests/plasma_lens/analysis.py +++ b/Examples/Tests/plasma_lens/analysis.py @@ -25,7 +25,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] ds = yt.load(filename) @@ -195,8 +195,10 @@ def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): "error in y particle velocity" ) -# The PICMI and native input versions run the same test, so -# their results are compared to the same benchmark file +# compare checksums test_name = os.path.split(os.getcwd())[1] -test_name = re.sub("_picmi", "", test_name) -checksumAPI.evaluate_checksum(test_name, filename) +test_name = re.sub("_picmi", "", test_name) # same checksums for PICMI test +evaluate_checksum( + test_name=test_name, + output_file=sys.argv[1], +) diff --git a/Examples/Tests/pml/analysis_pml_ckc.py b/Examples/Tests/pml/analysis_pml_ckc.py index 4e6bff076c7..f6637e2d47b 100755 --- a/Examples/Tests/pml/analysis_pml_ckc.py +++ b/Examples/Tests/pml/analysis_pml_ckc.py @@ -17,7 +17,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -57,5 +57,8 @@ assert error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/pml/analysis_pml_psatd.py b/Examples/Tests/pml/analysis_pml_psatd.py index 00b867857f9..4f44c0f3432 100755 --- a/Examples/Tests/pml/analysis_pml_psatd.py +++ b/Examples/Tests/pml/analysis_pml_psatd.py @@ -17,7 +17,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -75,5 +75,8 @@ assert reflectivity < reflectivity_max -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/pml/analysis_pml_psatd_rz.py b/Examples/Tests/pml/analysis_pml_psatd_rz.py index 2d9d58734a1..fb662e36d40 100755 --- a/Examples/Tests/pml/analysis_pml_psatd_rz.py +++ b/Examples/Tests/pml/analysis_pml_psatd_rz.py @@ -24,7 +24,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Open plotfile specified in command line filename = sys.argv[1] @@ -55,5 +55,8 @@ print("tolerance_abs: " + str(tolerance_abs)) assert max_Efield < tolerance_abs -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/pml/analysis_pml_yee.py b/Examples/Tests/pml/analysis_pml_yee.py index a24854af095..5f6d21e579c 100755 --- a/Examples/Tests/pml/analysis_pml_yee.py +++ b/Examples/Tests/pml/analysis_pml_yee.py @@ -17,7 +17,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -57,5 +57,8 @@ assert error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/point_of_contact_eb/analysis.py b/Examples/Tests/point_of_contact_eb/analysis.py index 3f42aa6eeca..1c9dbc85f4c 100755 --- a/Examples/Tests/point_of_contact_eb/analysis.py +++ b/Examples/Tests/point_of_contact_eb/analysis.py @@ -17,12 +17,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# Open plotfile specified in command line -filename = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") +from checksumAPI import evaluate_checksum ts_scraping = OpenPMDTimeSeries("./diags/diag2/particles_at_eb/") @@ -97,3 +92,10 @@ and (diff_ny < tolerance_n) and (np.abs(nz) < 1e-8) ), "Test point_of_contact did not pass" + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + output_format="openpmd", +) diff --git a/Examples/Tests/projection_divb_cleaner/analysis.py b/Examples/Tests/projection_divb_cleaner/analysis.py index 256d1929d06..2324c370032 100755 --- a/Examples/Tests/projection_divb_cleaner/analysis.py +++ b/Examples/Tests/projection_divb_cleaner/analysis.py @@ -23,7 +23,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 4e-3 @@ -73,5 +73,8 @@ print("tolerance = ", tolerance) assert error < tolerance -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/qed/analysis_breit_wheeler_opmd.py b/Examples/Tests/qed/analysis_breit_wheeler_opmd.py index 25547eda438..b88f00a85dc 100755 --- a/Examples/Tests/qed/analysis_breit_wheeler_opmd.py +++ b/Examples/Tests/qed/analysis_breit_wheeler_opmd.py @@ -13,7 +13,7 @@ import openpmd_api as io # sys.path.insert(1, '../../../../warpx/Regression/Checksum/') -# import checksumAPI +# from checksumAPI import evaluate_checksum # This script is a frontend for the analysis routines @@ -72,8 +72,12 @@ def main(): ac.check(dt, particle_data) - # test_name = os.path.split(os.getcwd())[1] - # checksumAPI.evaluate_checksum(test_name, filename_end) + # compare checksums + # evaluate_checksum( + # test_name=os.path.split(os.getcwd())[1], + # output_file=sys.argv[1], + # output_format="openpmd", + # ) if __name__ == "__main__": diff --git a/Examples/Tests/qed/analysis_breit_wheeler_yt.py b/Examples/Tests/qed/analysis_breit_wheeler_yt.py index 9836e3e8894..48c45c990b0 100755 --- a/Examples/Tests/qed/analysis_breit_wheeler_yt.py +++ b/Examples/Tests/qed/analysis_breit_wheeler_yt.py @@ -14,7 +14,7 @@ sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import analysis_breit_wheeler_core as ac -import checksumAPI +from checksumAPI import evaluate_checksum # This script is a frontend for the analysis routines # in analysis_breit_wheeler_core.py (please refer to this file for @@ -58,8 +58,11 @@ def main(): ac.check(dt, particle_data) - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) if __name__ == "__main__": diff --git a/Examples/Tests/qed/analysis_quantum_sync.py b/Examples/Tests/qed/analysis_quantum_sync.py index cf60d2ee647..531a0eac195 100755 --- a/Examples/Tests/qed/analysis_quantum_sync.py +++ b/Examples/Tests/qed/analysis_quantum_sync.py @@ -18,8 +18,8 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI import matplotlib.pyplot as plt +from checksumAPI import evaluate_checksum # This script performs detailed checks of the Quantum Synchrotron photon emission process. # Two electron populations and two positron populations are initialized with different momenta in different @@ -348,8 +348,11 @@ def check(): print("*************\n") - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename_end) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) def main(): diff --git a/Examples/Tests/qed/analysis_schwinger.py b/Examples/Tests/qed/analysis_schwinger.py index 30a25e6a956..4ad21e3d518 100755 --- a/Examples/Tests/qed/analysis_schwinger.py +++ b/Examples/Tests/qed/analysis_schwinger.py @@ -19,7 +19,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # define some parameters @@ -161,5 +161,8 @@ def do_analysis(Ex, Ey, Ez, Bx, By, Bz): do_analysis(Ex_test, Ey_test, Ez_test, Bx_test, By_test, Bz_test) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/radiation_reaction/analysis.py b/Examples/Tests/radiation_reaction/analysis.py index e24129d3e38..74155a89cb3 100755 --- a/Examples/Tests/radiation_reaction/analysis.py +++ b/Examples/Tests/radiation_reaction/analysis.py @@ -37,7 +37,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Input filename inputname = "inputs" @@ -163,8 +163,11 @@ def check(): assert error_rel < tolerance_rel - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + ) def generate(): diff --git a/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py b/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py index 64b726e5954..42916d34568 100755 --- a/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py +++ b/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py @@ -21,7 +21,7 @@ from scipy.constants import mu_0 as mu0 sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # gamma threshold to switch between the relativistic expression of # the kinetic energy and its Taylor expansion. @@ -376,7 +376,9 @@ def do_analysis(single_precision=False): assert error[k] < tol print() - test_name = os.path.split(os.getcwd())[1] - - checksum_rtol = 2e-9 if single_precision else 1e-9 - checksumAPI.evaluate_checksum(test_name, fn, rtol=checksum_rtol) + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + rtol=1e-9, + ) diff --git a/Examples/Tests/reduced_diags/analysis_reduced_diags_load_balance_costs.py b/Examples/Tests/reduced_diags/analysis_reduced_diags_load_balance_costs.py index 05f696e2fe6..49a0018baa5 100755 --- a/Examples/Tests/reduced_diags/analysis_reduced_diags_load_balance_costs.py +++ b/Examples/Tests/reduced_diags/analysis_reduced_diags_load_balance_costs.py @@ -24,7 +24,7 @@ import numpy as np sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Command line argument fn = sys.argv[1] @@ -77,8 +77,10 @@ def get_efficiency(i): # than non-load balanced case assert efficiency_before < efficiency_after -# The PICMI and native input versions run the same test, so -# their results are compared to the same benchmark file +# compare checksums test_name = os.path.split(os.getcwd())[1] -test_name = re.sub("_picmi", "", test_name) -checksumAPI.evaluate_checksum(test_name, fn) +test_name = re.sub("_picmi", "", test_name) # same checksums for PICMI test +evaluate_checksum( + test_name=test_name, + output_file=sys.argv[1], +) diff --git a/Examples/Tests/relativistic_space_charge_initialization/analysis.py b/Examples/Tests/relativistic_space_charge_initialization/analysis.py index 4828e3ddce5..ef0a87dce92 100755 --- a/Examples/Tests/relativistic_space_charge_initialization/analysis.py +++ b/Examples/Tests/relativistic_space_charge_initialization/analysis.py @@ -25,7 +25,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Parameters from the Simulation Qtot = -1.0e-20 @@ -95,5 +95,9 @@ def check(E, E_th, label): check(Ex_array, Ex_th, "Ex") -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, do_particles=False) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + do_particles=False, +) diff --git a/Examples/Tests/repelling_particles/analysis.py b/Examples/Tests/repelling_particles/analysis.py index 401ba7ba5d0..74bde7b68ca 100755 --- a/Examples/Tests/repelling_particles/analysis.py +++ b/Examples/Tests/repelling_particles/analysis.py @@ -35,6 +35,9 @@ yt.funcs.mylog.setLevel(0) +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum + # Check plotfile name specified in command line last_filename = sys.argv[1] filename_radical = re.findall(r"(.*?)\d+/*$", last_filename)[0] @@ -76,9 +79,8 @@ assert np.allclose(beta1[1:], beta_th[1:], atol=0.01) assert np.allclose(-beta2[1:], beta_th[1:], atol=0.01) -# Run checksum regression test -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, last_filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/resampling/analysis.py b/Examples/Tests/resampling/analysis.py index f55f3b996c5..40bad24d65e 100755 --- a/Examples/Tests/resampling/analysis.py +++ b/Examples/Tests/resampling/analysis.py @@ -17,7 +17,7 @@ from scipy.special import erf sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum fn_final = sys.argv[1] fn0 = fn_final[:-4] + "0000" @@ -171,5 +171,8 @@ # Check that particles with weight higher than level weight are unaffected by resampling. assert np.all(w[-numparts_unaffected:] == w0[-numparts_unaffected:]) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn_final) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/restart/CMakeLists.txt b/Examples/Tests/restart/CMakeLists.txt index bb3e90059c9..df5b1239a01 100644 --- a/Examples/Tests/restart/CMakeLists.txt +++ b/Examples/Tests/restart/CMakeLists.txt @@ -11,29 +11,24 @@ add_warpx_test( OFF # dependency ) -# TODO -# - Add checksums file -# - Enable analysis add_warpx_test( test_2d_runtime_components_picmi # name 2 # dims 1 # nprocs inputs_test_2d_runtime_components_picmi.py # inputs - OFF #analysis_default_regression.py # analysis - OFF #diags/diag1000010 # output + analysis_default_regression.py # analysis + diags/diag1000010 # output OFF # dependency ) -# TODO -# - Add checksums file -# - Enable analysis +# FIXME add_warpx_test( test_2d_runtime_components_picmi_restart # name 2 # dims 1 # nprocs "inputs_test_2d_runtime_components_picmi.py amr.restart='../test_2d_runtime_components_picmi/diags/chk000005'" # inputs - OFF #analysis_default_restart.py # analysis - OFF #diags/diag1000010 # output + OFF #analysis_default_restart.py # analysis + OFF #diags/diag1000010 # output test_2d_runtime_components_picmi # dependency ) diff --git a/Examples/Tests/restart/analysis_restart.py b/Examples/Tests/restart/analysis_restart.py index 4a4d198f63f..26a05da90f2 100755 --- a/Examples/Tests/restart/analysis_restart.py +++ b/Examples/Tests/restart/analysis_restart.py @@ -4,7 +4,7 @@ import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -14,7 +14,8 @@ check_restart(filename) -# Check-sum analysis -filename = sys.argv[1] -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/rigid_injection/analysis_rigid_injection_btd.py b/Examples/Tests/rigid_injection/analysis_rigid_injection_btd.py index 9b9054a4d42..759c211b42d 100755 --- a/Examples/Tests/rigid_injection/analysis_rigid_injection_btd.py +++ b/Examples/Tests/rigid_injection/analysis_rigid_injection_btd.py @@ -30,7 +30,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -81,5 +81,8 @@ print(f"tolerance = {tol}") assert err < tol -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/rigid_injection/analysis_rigid_injection_lab.py b/Examples/Tests/rigid_injection/analysis_rigid_injection_lab.py index 94b2a1ac07e..91e2bed1ed0 100755 --- a/Examples/Tests/rigid_injection/analysis_rigid_injection_lab.py +++ b/Examples/Tests/rigid_injection/analysis_rigid_injection_lab.py @@ -31,7 +31,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -102,5 +102,8 @@ def remove_rigid_lines(plotfile, nlines_if_rigid): assert np.array_equal(z, orig_z) assert np.array_equal(1 * (np.abs(x) < 5.0e-7), center) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/scraping/analysis_rz.py b/Examples/Tests/scraping/analysis_rz.py index 8bf86e320f3..aa0038dbcf5 100755 --- a/Examples/Tests/scraping/analysis_rz.py +++ b/Examples/Tests/scraping/analysis_rz.py @@ -28,7 +28,7 @@ from openpmd_viewer import OpenPMDTimeSeries sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum tolerance = 0 @@ -83,6 +83,9 @@ def n_scraped_particles(iteration): np.sort(id_initial) == np.sort(id_final) ) # Sort because particles may not be in the same order -# Checksum test -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn, do_particles=False) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + do_particles=False, +) diff --git a/Examples/Tests/silver_mueller/analysis.py b/Examples/Tests/silver_mueller/analysis.py index e1de7199aa0..aee27131bc9 100755 --- a/Examples/Tests/silver_mueller/analysis.py +++ b/Examples/Tests/silver_mueller/analysis.py @@ -20,7 +20,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -51,5 +51,8 @@ assert np.all(abs(Ey) < max_reflection_amplitude) assert np.all(abs(Ez) < max_reflection_amplitude) -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/single_particle/analysis.py b/Examples/Tests/single_particle/analysis.py index 198d84c6bfd..4127663e14d 100755 --- a/Examples/Tests/single_particle/analysis.py +++ b/Examples/Tests/single_particle/analysis.py @@ -16,7 +16,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Build Jx without filter. This can be obtained by running this test without # a filter, e.g., execute @@ -66,5 +66,8 @@ assert error_rel < tolerance_rel -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/Tests/space_charge_initialization/analysis.py b/Examples/Tests/space_charge_initialization/analysis.py index 1d5c8b9cb78..d63ba8f7334 100755 --- a/Examples/Tests/space_charge_initialization/analysis.py +++ b/Examples/Tests/space_charge_initialization/analysis.py @@ -26,7 +26,7 @@ yt.funcs.mylog.setLevel(0) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Parameters from the Simulation Qtot = -1.0e-20 @@ -124,5 +124,9 @@ def check(E, E_th, label): if ds.dimensionality == 3: check(Ez_array, Ez_th, "Ez") -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, filename, do_particles=0) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + do_particles=False, +) diff --git a/Examples/Tests/vay_deposition/analysis.py b/Examples/Tests/vay_deposition/analysis.py index 82776c34c42..ba428520660 100755 --- a/Examples/Tests/vay_deposition/analysis.py +++ b/Examples/Tests/vay_deposition/analysis.py @@ -16,7 +16,7 @@ yt.funcs.mylog.setLevel(50) sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum # Plotfile data set fn = sys.argv[1] @@ -35,6 +35,8 @@ print("tolerance = {}".format(tolerance)) assert error_rel < tolerance -# Checksum analysis -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], +) diff --git a/Examples/analysis_default_openpmd_regression.py b/Examples/analysis_default_openpmd_regression.py index 03a0f1ede1f..6f38693f820 100755 --- a/Examples/analysis_default_openpmd_regression.py +++ b/Examples/analysis_default_openpmd_regression.py @@ -5,16 +5,22 @@ import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum -# this will be the name of the plot file -fn = sys.argv[1] - -# Get name of the test test_name = os.path.split(os.getcwd())[1] +output_file = sys.argv[1] # Run checksum regression test -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, output_format="openpmd", rtol=2.0e-6) +if re.search("single_precision", output_file): + evaluate_checksum( + test_name=test_name, + output_file=output_file, + output_format="openpmd", + rtol=2e-6, + ) else: - checksumAPI.evaluate_checksum(test_name, fn, output_format="openpmd") + evaluate_checksum( + test_name=test_name, + output_file=output_file, + output_format="openpmd", + ) diff --git a/Examples/analysis_default_regression.py b/Examples/analysis_default_regression.py index 519bbeeea64..7c02f6904b2 100755 --- a/Examples/analysis_default_regression.py +++ b/Examples/analysis_default_regression.py @@ -5,17 +5,21 @@ import sys sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum -# this will be the name of the plot file -fn = sys.argv[1] - -# Get name of the test test_name = os.path.split(os.getcwd())[1] +output_file = sys.argv[1] # Run checksum regression test -if re.search("single_precision", fn): - checksumAPI.evaluate_checksum(test_name, fn, rtol=2.0e-6) +if re.search("single_precision", output_file): + evaluate_checksum( + test_name=test_name, + output_file=output_file, + rtol=2e-6, + ) else: # using default relative tolerance - checksumAPI.evaluate_checksum(test_name, fn) + evaluate_checksum( + test_name=test_name, + output_file=output_file, + ) diff --git a/Examples/analysis_default_restart.py b/Examples/analysis_default_restart.py index 55bab253dbc..c019a0b5945 100755 --- a/Examples/analysis_default_restart.py +++ b/Examples/analysis_default_restart.py @@ -7,7 +7,7 @@ import yt sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI +from checksumAPI import evaluate_checksum def check_restart(filename, tolerance=1e-12): @@ -67,12 +67,17 @@ def check_restart(filename, tolerance=1e-12): print() -filename = sys.argv[1] +# test name (for checksums, remove "_restart") and output file name +test_name = os.path.split(os.getcwd())[1] +test_name = test_name.replace("_restart", "") +output_file = sys.argv[1] # compare restart results against original results -check_restart(filename) +check_restart(output_file) # compare restart checksums against original checksums -testname = os.path.split(os.getcwd())[1] -testname = testname.replace("_restart", "") -checksumAPI.evaluate_checksum(testname, filename, rtol=1e-12) +evaluate_checksum( + test_name=test_name, + output_file=output_file, + rtol=1e-12, +) diff --git a/Regression/Checksum/benchmarks_json/test_1d_background_mcc_picmi.json b/Regression/Checksum/benchmarks_json/test_1d_background_mcc_picmi.json new file mode 100644 index 00000000000..029294deb66 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_1d_background_mcc_picmi.json @@ -0,0 +1,20 @@ +{ + "lev=0": { + "rho_electrons": 0.0044328572492614605, + "rho_he_ions": 0.005198609403474849 + }, + "electrons": { + "particle_momentum_x": 3.5020450942268976e-20, + "particle_momentum_y": 3.5342700024993965e-20, + "particle_momentum_z": 1.2596017960675146e-19, + "particle_position_x": 2139.5967568101983, + "particle_weight": 14577210937500.002 + }, + "he_ions": { + "particle_momentum_x": 2.770046913680294e-19, + "particle_momentum_y": 2.755651798947783e-19, + "particle_momentum_z": 3.619494241595636e-19, + "particle_position_x": 2200.218124999781, + "particle_weight": 17184714843750.002 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_background_mcc_picmi.json b/Regression/Checksum/benchmarks_json/test_2d_background_mcc_picmi.json new file mode 100644 index 00000000000..579f46d33ab --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_background_mcc_picmi.json @@ -0,0 +1,22 @@ +{ + "electrons": { + "particle_momentum_x": 1.011638818664759e-18, + "particle_momentum_y": 2.81974298744432e-19, + "particle_momentum_z": 2.809194032519318e-19, + "particle_position_x": 17136.01865460215, + "particle_position_y": 936.3651769897449, + "particle_weight": 61113170379.63868 + }, + "he_ions": { + "particle_momentum_x": 2.883076633513297e-18, + "particle_momentum_y": 2.195704870583595e-18, + "particle_momentum_z": 2.198216553980008e-18, + "particle_position_x": 17607.42545752183, + "particle_position_y": 1100.024786059151, + "particle_weight": 71976747650.1465 + }, + "lev=0": { + "rho_electrons": 0.03558889419586454, + "rho_he_ions": 0.04176234095111594 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_2d_dirichlet_bc.json b/Regression/Checksum/benchmarks_json/test_2d_dirichlet_bc.json new file mode 100644 index 00000000000..41567dc3bf2 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_dirichlet_bc.json @@ -0,0 +1,5 @@ +{ + "lev=0": { + "phi": 10817.97280547637 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_field_probe.json b/Regression/Checksum/benchmarks_json/test_2d_field_probe.json new file mode 100644 index 00000000000..cb82acfc067 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_field_probe.json @@ -0,0 +1,10 @@ +{ + "lev=0": { + "Bx": 0.0, + "By": 126826.78487921853, + "Bz": 0.0, + "Ex": 32517064310550.266, + "Ey": 0.0, + "Ez": 17321323003697.61 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_particle_reflection_picmi.json b/Regression/Checksum/benchmarks_json/test_2d_particle_reflection_picmi.json new file mode 100644 index 00000000000..97d0c1f5e58 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_particle_reflection_picmi.json @@ -0,0 +1,7 @@ +{ + "lev=0": { + "Ex": 4.865922376234882e-11, + "Ey": 0.0, + "Ez": 2.3293326580399806e-10 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_runtime_components_picmi.json b/Regression/Checksum/benchmarks_json/test_2d_runtime_components_picmi.json new file mode 100644 index 00000000000..f1eb0047d49 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_runtime_components_picmi.json @@ -0,0 +1,14 @@ +{ + "lev=0": { + "phi": 0.001516261626005395 + }, + "electrons": { + "particle_momentum_x": 7.75165529536844e-26, + "particle_momentum_y": 6.938526597814195e-26, + "particle_momentum_z": 6.572519525636007e-26, + "particle_newPid": 500.0, + "particle_position_x": 1.4999588764814886, + "particle_position_y": 1.4999551809410656, + "particle_weight": 200.0 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_picmi.json new file mode 100644 index 00000000000..f3483a544b5 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_picmi.json @@ -0,0 +1,5 @@ +{ + "lev=0": { + "Ex": 0.0 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_3d_particle_absorption.json b/Regression/Checksum/benchmarks_json/test_3d_particle_absorption.json new file mode 100644 index 00000000000..ce6e2fcf79b --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_particle_absorption.json @@ -0,0 +1,10 @@ +{ + "lev=0": { + "Bx": 202106.71291347666, + "By": 202106.71291347663, + "Bz": 3371.897999274175, + "Ex": 38304043178806.11, + "Ey": 38304043178806.11, + "Ez": 83057027925874.84 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_rz_laser_acceleration_opmd.json b/Regression/Checksum/benchmarks_json/test_rz_laser_acceleration_opmd.json new file mode 100644 index 00000000000..de631f4767a --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_rz_laser_acceleration_opmd.json @@ -0,0 +1,35 @@ +{ + "lev=0": { + "Bt": 4299.677335258863, + "Bz": 34749.512290662635, + "Er": 1343319090029.9607, + "jr": 5229952989213.152, + "jt": 9.287962600874053e+17, + "jz": 3712414162446391.5, + "part_per_cell": 6288.0, + "part_per_grid": 25755648.0, + "rho": 102920475.65331206, + "rho_beam": 12377109.352622943, + "rho_electrons": 90543366.3006891 + }, + "beam": { + "particle_position_x": 3.651481908823126e-05, + "particle_position_y": 4.275668879776449e-05, + "particle_position_z": 0.0025531549045483943, + "particle_momentum_x": 3.879691286254116e-20, + "particle_momentum_y": 5.0782566944104114e-20, + "particle_momentum_z": 1.3503182565048374e-17, + "particle_weight": 6241509.074460764 + }, + "electrons": { + "particle_origX": 0.03652440297475791, + "particle_origZ": 0.06924276562500002, + "particle_position_x": 0.036524412900510936, + "particle_position_y": 0.03652445428108603, + "particle_position_z": 0.06924303765442104, + "particle_momentum_x": 5.508781425380743e-23, + "particle_momentum_y": 7.236141259605716e-21, + "particle_momentum_z": 4.4528442530356535e-22, + "particle_weight": 1118799420.1067173 + } +} diff --git a/Regression/Checksum/checksum.py b/Regression/Checksum/checksum.py index 4133d882a41..b2f327e36e3 100644 --- a/Regression/Checksum/checksum.py +++ b/Regression/Checksum/checksum.py @@ -243,9 +243,8 @@ def evaluate(self, rtol=1.0e-9, atol=1.0e-40): ) print("Benchmark: %s" % ref_benchmark.data.keys()) print("Test file: %s" % self.data.keys()) - print("\n----------------\nNew file for " + self.test_name + ":") + print(f"\nNew checksums file {self.test_name}.json:") print(json.dumps(self.data, indent=2)) - print("----------------") sys.exit(1) # Dictionaries have same inner keys (field and particle quantities)? @@ -261,9 +260,8 @@ def evaluate(self, rtol=1.0e-9, atol=1.0e-40): % (key1, ref_benchmark.data[key1].keys()) ) print("Test file inner keys in %s: %s" % (key1, self.data[key1].keys())) - print("\n----------------\nNew file for " + self.test_name + ":") + print(f"\nNew checksums file {self.test_name}.json:") print(json.dumps(self.data, indent=2)) - print("----------------") sys.exit(1) # Dictionaries have same values? @@ -298,7 +296,6 @@ def evaluate(self, rtol=1.0e-9, atol=1.0e-40): rel_err = abs_err / np.abs(x) print("Relative error: {:.2e}".format(rel_err)) if checksums_differ: - print("\n----------------\nNew file for " + self.test_name + ":") + print(f"\nNew checksums file {self.test_name}.json:") print(json.dumps(self.data, indent=2)) - print("----------------") sys.exit(1) From dac8c3b8ee3b8c556c0c708411ef356828142f0d Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 7 Oct 2024 08:53:18 -0700 Subject: [PATCH 029/278] Doc: Catalyst Verbatim Formatting (#5365) Fix some RST `verbatim`, which needs two backticks compared to Markdown. --- Docs/source/dataanalysis/catalyst.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Docs/source/dataanalysis/catalyst.rst b/Docs/source/dataanalysis/catalyst.rst index 97e634c5c6a..939b6b134bd 100644 --- a/Docs/source/dataanalysis/catalyst.rst +++ b/Docs/source/dataanalysis/catalyst.rst @@ -19,7 +19,7 @@ First, we build build `Catalyst 2 `_ using the conduit library created in the previous step. The latter can be achieved by adding the installation path of conduit to the environmental -variable `CMAKE_PREFIX_PATH` and setting `CATALYST_WITH_EXTERNAL_CONDUIT=ON` during the configuration step of Catalyst. +variable ``CMAKE_PREFIX_PATH`` and setting ``CATALYST_WITH_EXTERNAL_CONDUIT=ON`` during the configuration step of Catalyst. Then we build ParaView master (on a commit after 2024.07.01, tested on ``4ef351a54ff747ef7169e2e52e77d9703a9dfa77``) following the developer instructions provided `here `__ . @@ -27,7 +27,7 @@ A representative set of options for a headless ParaView installation is provided `here `__ Afterward, WarpX must be built with ``WarpX_CATALYST=ON``. Also, make sure to provide the installed paths of Conduit and Catalyst via -`CMAKE_PREFIX_PATH` before configuring WarpX. +``CMAKE_PREFIX_PATH`` before configuring WarpX. Inputs File Configuration ------------------------- @@ -41,7 +41,7 @@ In addition to configuring the diagnostics, the following parameters must be inc * ``catalyst.implementation_search_paths``: The locations to search for the given implementation. The specific file being searched for will be ``catalyst_{implementation}.so``. The latter two can also be given via the environmental variables -`CATALYST_IMPLEMENTATION_NAME` and `CATALYST_IMPLEMENTATION_PATHS` +``CATALYST_IMPLEMENTATION_NAME`` and ``CATALYST_IMPLEMENTATION_PATHS`` respectively. Because the scripts and implementations are global, Catalyst does not benefit from nor differentiate between multiple diagnostics. @@ -110,7 +110,7 @@ To generate the data dumps one must first set the environmental variable ``CATAL This will run the simulation and write the conduit nodes under ``CATALYST_DATA_DUMP_DIRECTORY``. -Afterward, one can replay the generated nodes by setting up the `CATALYST_IMPLEMENTATION_*` variables for the `catalyst_replay` executable (which can be found in the catalyst build directory) appropriately. For example: +Afterward, one can replay the generated nodes by setting up the ``CATALYST_IMPLEMENTATION_*`` variables for the ``catalyst_replay`` executable (which can be found in the catalyst build directory) appropriately. For example: .. code-block:: bash From bacbef747f9320a6288ef461bd8ace1c669ae665 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 7 Oct 2024 16:43:13 -0700 Subject: [PATCH 030/278] AMReX/pyAMReX/PICSAR: weekly update (#5369) Weekly update to latest AMReX/pyAMReX/PICSAR. ```console ./Tools/Release/updateAMReX.py ./Tools/Release/updatepyAMReX.py ./Tools/Release/updatePICSAR.py ``` Co-authored-by: Axel Huebl --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- cmake/dependencies/pyAMReX.cmake | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 4a38872a5f4..2209f425d1f 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -135,7 +135,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 24.10 && cd - + cd ../amrex && git checkout --detach e1222803739ed2342b9ff6fc2d57316ff0d6cb0c && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 91340066803..51ad361276a 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -279,7 +279,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "24.10" +set(WarpX_amrex_branch "e1222803739ed2342b9ff6fc2d57316ff0d6cb0c" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index f7b905c32c3..9543dac2ee2 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "24.10" +set(WarpX_pyamrex_branch "3699781e4284921f9ccdbbbbc57169ff79c0de20" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") From 46d88b8311c750efc8f2348a5a25a9fbc12558f2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 8 Oct 2024 02:21:05 +0000 Subject: [PATCH 031/278] [pre-commit.ci] pre-commit autoupdate (#5373) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/pre-commit/pre-commit-hooks: v4.6.0 → v5.0.0](https://github.com/pre-commit/pre-commit-hooks/compare/v4.6.0...v5.0.0) - [github.com/astral-sh/ruff-pre-commit: v0.6.8 → v0.6.9](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.8...v0.6.9) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d2b15b8af95..8ba600be560 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,7 +18,7 @@ exclude: '^share/openPMD/thirdParty' # See https://pre-commit.com/hooks.html for more hooks repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: trailing-whitespace args: [--markdown-linebreak-ext=md] @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.8 + rev: v0.6.9 hooks: # Run the linter - id: ruff From 894a699c11d165fdaff53fcc3cd051eeadbdf367 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 7 Oct 2024 19:46:16 -0700 Subject: [PATCH 032/278] Doc: Governance GitHub Team Links (#5374) Fix the link in the governance doc to the steering committee (formerly: admin) and technical committee (formerly: maintainers). This was a rename when we adopted the governance doc #4743 and fixes two broken links. cc @ECP-WarpX/warpx-steering-committee (for approval) cc @ECP-WarpX/warpx-technical-committee (FYI) --- GOVERNANCE.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/GOVERNANCE.rst b/GOVERNANCE.rst index b5253b80f9f..588e8b2df6e 100644 --- a/GOVERNANCE.rst +++ b/GOVERNANCE.rst @@ -16,7 +16,7 @@ Current Roster - Remi Lehe - Axel Huebl -See: `GitHub team `__ +See: `GitHub team `__ Role ^^^^ @@ -66,7 +66,7 @@ Current Roster - Weiqun Zhang - Edoardo Zoni -See: `GitHub team `__ +See: `GitHub team `__ Role ^^^^ From b7108967b5068c5d942d0ba9e0285efd0d07aa05 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Tue, 8 Oct 2024 14:21:48 -0700 Subject: [PATCH 033/278] CI: update docs and reset tool for checksums (#5372) The major part of this PR is about updating the docs so that it is a bit easier for developers to connect the section on testing with the section on checksums. Here's a couple of screenshots showing the new content organization for the testing and checksums sections:

As part of this PR, I also update the tool that we have to reset checksums locally based on the Azure output. The update is necessary due to a change in #5297, as noted in https://github.com/ECP-WarpX/WarpX/pull/5297#discussion_r1787006065, given that new checksum files are now displayed as follows: ``` New checksums file test_2d_langmuir_multi.json: { "lev=0": { "Bx": 0.0, "By": 5.726296856755232, "Bz": 0.0, "Ex": 3751589134191.326, "Ey": 0.0, "Ez": 3751589134191.332, "jx": 1.0100623329922576e+16, "jy": 0.0, "jz": 1.0100623329922578e+16 }, "electrons": { "particle_momentum_x": 5.668407513430198e-20, "particle_momentum_y": 0.0, "particle_momentum_z": 5.668407513430198e-20, "particle_position_x": 0.6553599999999999, "particle_position_y": 0.65536, "particle_weight": 3200000000000000.5 }, "positrons": { "particle_momentum_x": 5.668407513430198e-20, "particle_momentum_y": 0.0, "particle_momentum_z": 5.668407513430198e-20, "particle_position_x": 0.6553599999999999, "particle_position_y": 0.65536, "particle_weight": 3200000000000000.5 } } ``` as opposed to the old way ``` ---------------- New file for test_2d_langmuir_multi: { "lev=0": { "Bx": 0.0, "By": 5.726296856755232, "Bz": 0.0, "Ex": 3751589134191.326, "Ey": 0.0, "Ez": 3751589134191.332, "jx": 1.0100623329922576e+16, "jy": 0.0, "jz": 1.0100623329922578e+16 }, "electrons": { "particle_momentum_x": 5.668407513430198e-20, "particle_momentum_y": 0.0, "particle_momentum_z": 5.668407513430198e-20, "particle_position_x": 0.6553599999999999, "particle_position_y": 0.65536, "particle_weight": 3200000000000000.5 }, "positrons": { "particle_momentum_x": 5.668407513430198e-20, "particle_momentum_y": 0.0, "particle_momentum_z": 5.668407513430198e-20, "particle_position_x": 0.6553599999999999, "particle_position_y": 0.65536, "particle_weight": 3200000000000000.5 } } ---------------- ``` To-do: - [x] Update docs - [x] Update tool --------- Co-authored-by: Axel Huebl --- Docs/source/developers/checksum.rst | 45 +++++----- Docs/source/developers/testing.rst | 47 +++++++---- Docs/source/developers/workflows.rst | 4 +- .../update_benchmarks_from_azure_output.py | 83 ++++++++----------- 4 files changed, 94 insertions(+), 85 deletions(-) diff --git a/Docs/source/developers/checksum.rst b/Docs/source/developers/checksum.rst index 2452d074ba1..ccbea3408ef 100644 --- a/Docs/source/developers/checksum.rst +++ b/Docs/source/developers/checksum.rst @@ -1,32 +1,36 @@ .. _developers-checksum: -Checksum regression tests -========================= +Checksums on Tests +================== -WarpX has checksum regression tests: as part of CI testing, when running a given test, the checksum module computes one aggregated number per field (``Ex_checksum = np.sum(np.abs(Ex))``) and compares it to a reference (benchmark). This should be sensitive enough to make the test fail if your PR causes a significant difference, print meaningful error messages, and give you a chance to fix a bug or reset the benchmark if needed. +When running an automated test, we often compare the data of final time step of the test with expected values to catch accidental changes. +Instead of relying on reference files that we would have to store in their full size, we calculate an aggregate checksum. -The checksum module is located in ``Regression/Checksum/``, and the benchmarks are stored as human-readable `JSON `__ files in ``Regression/Checksum/benchmarks_json/``, with one file per benchmark (for instance, test ``Langmuir_2d`` has a corresponding benchmark ``Regression/Checksum/benchmarks_json/Langmuir_2d.json``). +For this purpose, the checksum Python module computes one aggregated number per field (e.g., the sum of the absolute values of the array elements) and compares it to a reference value (benchmark). +This should be sensitive enough to make the test fail if your PR causes a significant difference, print meaningful error messages, and give you a chance to fix a bug or reset the benchmark if needed. -For more details on the implementation, the Python files in ``Regression/Checksum/`` should be well documented. +The checksum module is located in ``Regression/Checksum/``, and the benchmarks are stored as human-readable `JSON `__ files in ``Regression/Checksum/benchmarks_json/``, with one file per benchmark (for example, the test ``test_2d_langmuir_multi`` has a corresponding benchmark ``Regression/Checksum/benchmarks_json/test_2d_langmuir_multi.json``). -From a user point of view, you should only need to use ``checksumAPI.py``. It contains Python functions that can be imported and used from an analysis Python script. It can also be executed directly as a Python script. Here are recipes for the main tasks related to checksum regression tests in WarpX CI. +For more details on the implementation, please refer to the Python implementation in ``Regression/Checksum/``. -Include a checksum regression test in an analysis Python script ---------------------------------------------------------------- +From a user point of view, you should only need to use ``checksumAPI.py``, which contains Python functions that can be imported and used from an analysis Python script or can also be executed directly as a Python script. + +How to compare checksums in your analysis script +------------------------------------------------ This relies on the function ``evaluate_checksum``: .. autofunction:: checksumAPI.evaluate_checksum -For an example, see +Here's an example: -.. literalinclude:: ../../../Examples/analysis_default_regression.py +.. literalinclude:: ../../../Examples/Tests/embedded_circle/analysis.py :language: python -This can also be included in an existing analysis script. Note that the plotfile must be ``_plt?????``, as is generated by the CI framework. +This can also be included as part of an existing analysis script. -Evaluate a checksum regression test from a bash terminal --------------------------------------------------------- +How to evaluate checksums from the command line +----------------------------------------------- You can execute ``checksumAPI.py`` as a Python script for that, and pass the plotfile that you want to evaluate, as well as the test name (so the script knows which benchmark to compare it to). @@ -41,11 +45,8 @@ See additional options * ``--rtol`` relative tolerance for the comparison * ``--atol`` absolute tolerance for the comparison (a sum of both is used by ``numpy.isclose()``) -Create/Reset a benchmark with new values that you know are correct ------------------------------------------------------------------- - -Create/Reset a benchmark from a plotfile generated locally -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +How to create or reset checksums with local benchmark values +------------------------------------------------------------ This is using ``checksumAPI.py`` as a Python script. @@ -65,8 +66,8 @@ Since this will automatically change the JSON file stored on the repo, make a se git add .json git commit -m "reset benchmark for because ..." --author="Tools " -Automated reset of a list of test benchmarks --------------------------------------------- +How to reset checksums for a list of tests with local benchmark values +---------------------------------------------------------------------- If you set the environment variable ``export CHECKSUM_RESET=ON`` before running tests that are compared against existing benchmarks, the test analysis will reset the benchmarks to the new values, skipping the comparison. @@ -80,8 +81,8 @@ With `CTest `__ (coming # ... check and commit changes ... -Reset a benchmark from the Azure pipeline output on Github -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +How to reset checksums for a list of tests with benchmark values from the Azure pipeline output +----------------------------------------------------------------------------------------------- Alternatively, the benchmarks can be reset using the output of the Azure continuous intergration (CI) tests on Github. The output can be accessed by following the steps below: diff --git a/Docs/source/developers/testing.rst b/Docs/source/developers/testing.rst index ee5c82aeea9..5fd4b498b07 100644 --- a/Docs/source/developers/testing.rst +++ b/Docs/source/developers/testing.rst @@ -3,33 +3,43 @@ Testing the code ================ -When adding a new feature, you want to make sure that (i) you did not break the existing code and (ii) your contribution gives correct results. While the code is tested regularly remotely (on the cloud when commits are pushed to an open PR, and every night on local clusters), it can also be useful to run tests on your custom input file. This section details how to use both automated and custom tests. +When proposing a code change, you want to make sure that -Continuous Integration in WarpX -------------------------------- +* the code change does not break the existing code; +* the code change gives correct results (numerics, physics, etc.). -Configuration -^^^^^^^^^^^^^ +WarpX follows the continuous integration (CI) software development practice, where automated builds and tests are run after merging code changes into the main branch. -Our regression tests are run with `CTest `__, an executable that comes with CMake. - -The test suite is ready to run once you have configured and built WarpX with CMake, following the instructions that you find in our :ref:`Users ` or :ref:`Developers ` sections. - -A test that requires a build option that was not configured and built will be skipped automatically. For example, if you configure and build WarpX in 1D only, any test of dimensionality other than 1D, which would require WarpX to be configured and built in the corresponding dimensionality, will be skipped automatically. +While the code is tested regularly remotely (on the cloud when commits are pushed to an open PR, and every night on local clusters), it can also be useful to run tests on your custom input file. How to run pre-commit tests locally ----------------------------------- -When proposing code changes to Warpx, we perform a couple of automated stylistic and correctness checks on the code change. -You can run those locally before you push to save some time, install them once like this: +First, when proposing a code change, we perform a couple of automated style and correctness checks. + +If you install the ``pre-commit`` tool on your local machine via .. code-block:: sh python -m pip install -U pre-commit pre-commit install +the style and correctness checks will run automatically on your local machine, after you commit the change and before you push. + +If you do not install the ``pre-commit`` tool on your local machine, these checks will run automatically as part of our CI workflows and a commit containing style and correctness changes might be added automatically to your branch. +In that case, you will need to pull that automated commit before pushing further changes. + See `pre-commit.com `__ and our ``.pre-commit-config.yaml`` file in the repository for more details. +How to configure the automated tests +------------------------------------ + +Our regression tests are run with `CTest `__, an executable that comes with CMake. + +The test suite is ready to run once you have configured and built WarpX with CMake, following the instructions that you find in our :ref:`Users ` or :ref:`Developers ` sections. + +A test that requires a build option that was not configured and built will be skipped automatically. For example, if you configure and build WarpX in 1D only, any test of dimensionality other than 1D, which would require WarpX to be configured and built in the corresponding dimensionality, will be skipped automatically. + How to run automated tests locally ---------------------------------- @@ -107,7 +117,15 @@ If you modify the code base locally and want to assess the effects of your code How to add automated tests -------------------------- -As mentioned above, the input files and scripts used by the automated tests can be found in the `Examples `__ directory, either under `Physics_applications `__ or `Tests `__. +An automated test typically consists of the following components: + +* input file or PICMI input script; +* analysis script; +* checksum file. + +To learn more about how to use checksums in automated tests, please see the corresponding section :ref:`Using checksums `. + +As mentioned above, the input files and scripts used by the automated tests can be found in the `Examples `__ directory, under either `Physics_applications `__ or `Tests `__. Each test directory must contain a file named ``CMakeLists.txt`` where all tests associated with the input files and scripts in that directory must be listed. @@ -173,7 +191,8 @@ A new test can be added by adding a corresponding entry in ``CMakeLists.txt`` as If you need a new Python package dependency for testing, please add it in `Regression/requirements.txt `__. -Sometimes two or more tests share a large number of input parameters. The shared input parameters can be collected in a "base" input file that can be passed as a runtime parameter in the actual test input files through the parameter ``FILE``. +Sometimes two or more tests share a large number of input parameters. +The shared input parameters can be collected in a "base" input file that can be passed as a runtime parameter in the actual test input files through the parameter ``FILE``. If the new test is added in a new directory that did not exist before, please add the name of that directory with the command ``add_subdirectory`` in `Physics_applications/CMakeLists.txt `__ or `Tests/CMakeLists.txt `__, depending on where the new test directory is located. diff --git a/Docs/source/developers/workflows.rst b/Docs/source/developers/workflows.rst index 00279018e9d..f7c81ae70d8 100644 --- a/Docs/source/developers/workflows.rst +++ b/Docs/source/developers/workflows.rst @@ -8,7 +8,7 @@ Workflows profiling testing - documentation checksum - local_compile run_clang_tidy_locally + local_compile + documentation diff --git a/Tools/DevUtils/update_benchmarks_from_azure_output.py b/Tools/DevUtils/update_benchmarks_from_azure_output.py index b2be4d17a7b..bcff995b21a 100644 --- a/Tools/DevUtils/update_benchmarks_from_azure_output.py +++ b/Tools/DevUtils/update_benchmarks_from_azure_output.py @@ -1,4 +1,4 @@ -# Copyright 2023 Neil Zaim +# Copyright 2023 Neil Zaim, Edoardo Zoni # # This file is part of WarpX. # @@ -9,56 +9,45 @@ import sys """ -This Python script updates the Azure benchmarks automatically using a raw Azure output textfile -that is given as the first and only argument of the script. - -In the Azure output, we read the lines contained between -"New file for Test_Name:" -and the next occurrence of -"'----------------'" -And use these lines to update the benchmarks +This Python script updates the Azure benchmarks automatically using a raw +Azure output text file that is passed as command line argument of the script. """ -azure_output_filename = sys.argv[1] +# read path to Azure output text file +azure_output = sys.argv[1] -pattern_test_name = "New file for (?P[\w\-]*)" -closing_string = "----------------" -benchmark_path = "../../Regression/Checksum/benchmarks_json/" -benchmark_suffix = ".json" +# string to identify failing tests that require a checksums reset +new_checksums = "New checksums" +failing_test = "" -first_line_read = False -current_test = "" +# path of all checksums benchmark files +benchmark_path = "../../Regression/Checksum/benchmarks_json/" -with open(azure_output_filename, "r") as f: +with open(azure_output, "r") as f: + # find length of Azure prefix to be removed from each line, + # first line of Azure output starts with "##[section]Starting:" + first_line = f.readline() + prefix_length = first_line.find("#") + # loop over lines for line in f: - if current_test == "": - # Here we search lines that read, for example, - # "New file for LaserAcceleration_BTD" - # and we set current_test = "LaserAcceleration_BTD" - match_test_name = re.search(pattern_test_name, line) - if match_test_name: - current_test = match_test_name.group("testname") - new_file_string = "" - + # remove Azure prefix from line + line = line[prefix_length:] + if failing_test == "": + # no failing test found yet + if re.search(new_checksums, line): + # failing test found, set failing test name + failing_test = line[line.find("test_") : line.find(".json")] + json_file_string = "" else: - # We add each line to the new file string until we find the line containing - # "----------------" - # which indicates that we have read the new file entirely - - if closing_string not in line: - if not first_line_read: - # Raw Azure output comes with a prefix at the beginning of each line that we do - # not need here. The first line that we will read is the prefix followed by the - # "{" character, so we determine how long the prefix is by finding the last - # occurrence of the "{" character in this line. - azure_indent = line.rfind("{") - first_line_read = True - new_file_string += line[azure_indent:] - - else: - # We have read the new file entirely. Dump it in the json file. - new_file_json = json.loads(new_file_string) - json_filepath = benchmark_path + current_test + benchmark_suffix - with open(json_filepath, "w") as f_json: - json.dump(new_file_json, f_json, indent=2) - current_test = "" + # extract and dump new checksums of failing test + json_file_string += line + if line.startswith("}"): # end of new checksums + json_file = json.loads(json_file_string) + json_filename = failing_test + ".json" + json_filepath = benchmark_path + json_filename + print(f"\nDumping new checksums file {json_filename}:") + print(json_file_string) + with open(json_filepath, "w") as json_f: + json.dump(json_file, json_f, indent=2) + # reset to empty string to continue search of failing tests + failing_test = "" From 27181aa06b73e21b8c94c8650c753bfa309bd137 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Wed, 9 Oct 2024 09:36:42 -0700 Subject: [PATCH 034/278] Docs: fix checksums section cross-reference (#5376) The checksums section title was changed to "Checksums on Tests" in the latest version of #5372, but the cross-reference in the testing section wasn't updated and still had the old name "Using checksums". --------- Co-authored-by: Axel Huebl --- Docs/source/developers/testing.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Docs/source/developers/testing.rst b/Docs/source/developers/testing.rst index 5fd4b498b07..111e3e7d7cb 100644 --- a/Docs/source/developers/testing.rst +++ b/Docs/source/developers/testing.rst @@ -1,6 +1,6 @@ .. _developers-testing: -Testing the code +Testing the Code ================ When proposing a code change, you want to make sure that @@ -123,7 +123,7 @@ An automated test typically consists of the following components: * analysis script; * checksum file. -To learn more about how to use checksums in automated tests, please see the corresponding section :ref:`Using checksums `. +To learn more about how to use checksums in automated tests, please see the corresponding section :ref:`Checksums on Tests `. As mentioned above, the input files and scripts used by the automated tests can be found in the `Examples `__ directory, under either `Physics_applications `__ or `Tests `__. From 1d2910e276b02e4f1c4c1486b710a97a97776809 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 9 Oct 2024 11:49:34 -0700 Subject: [PATCH 035/278] CMake: Fix List of Pip Options (#5378) We were not yet able to pass lists of options to `pip` commands in our `pip` CMake helper targets. This fixes it. Follow-up to #2822 X-ref: https://github.com/spack/spack/pull/46765 --- .github/workflows/macos.yml | 17 ++++++++--------- CMakeLists.txt | 10 +++++++--- Docs/source/install/cmake.rst | 10 +++++----- 3 files changed, 20 insertions(+), 17 deletions(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 463b2dc2501..0afaf6ea451 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -22,13 +22,16 @@ jobs: #CMAKE_GENERATOR: Ninja steps: - uses: actions/checkout@v4 - - name: install dependencies + - uses: actions/setup-python@v5 + name: Install Python + with: + python-version: '3.x' + - name: install brew dependencies run: | set +e brew unlink gcc brew update brew upgrade || true - brew install --overwrite python brew install ccache brew install fftw brew install libomp @@ -39,12 +42,12 @@ jobs: set -e brew tap openpmd/openpmd brew install openpmd-api - - python3 -m venv py-venv - source py-venv/bin/activate + - name: install pip dependencies + run: | python3 -m pip install --upgrade pip python3 -m pip install --upgrade build packaging setuptools wheel python3 -m pip install --upgrade mpi4py + python3 -m pip install --upgrade -r Regression/requirements.txt - name: CCache Cache uses: actions/cache@v4 with: @@ -60,8 +63,6 @@ jobs: export CCACHE_SLOPPINESS=time_macros ccache -z - source py-venv/bin/activate - cmake -S . -B build_dp \ -DCMAKE_VERBOSE_MAKEFILE=ON \ -DWarpX_EB=OFF \ @@ -71,7 +72,6 @@ jobs: cmake -S . -B build_sp \ -DCMAKE_VERBOSE_MAKEFILE=ON \ - -DPython_EXECUTABLE=$(which python3) \ -DWarpX_EB=OFF \ -DWarpX_PYTHON=ON \ -DWarpX_OPENPMD=ON \ @@ -85,7 +85,6 @@ jobs: - name: run pywarpx run: | - source py-venv/bin/activate export OMP_NUM_THREADS=1 mpirun -n 2 Examples/Physics_applications/laser_acceleration/inputs_test_3d_laser_acceleration_picmi.py diff --git a/CMakeLists.txt b/CMakeLists.txt index 5065baa0b6d..980b23183fd 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -714,9 +714,9 @@ endforeach() # if(WarpX_PYTHON) set(PY_PIP_OPTIONS "-v" CACHE STRING - "Additional parameters to pass to `pip`") + "Additional parameters to pass to `pip` as ; separated list") set(PY_PIP_INSTALL_OPTIONS "" CACHE STRING - "Additional parameters to pass to `pip install`") + "Additional parameters to pass to `pip install` as ; separated list") # ensure all targets are built before we package them in a wheel set(pyWarpX_INSTALL_TARGET_NAMES) @@ -739,7 +739,8 @@ if(WarpX_PYTHON) ${CMAKE_COMMAND} -E rm -f -r warpx-whl COMMAND ${CMAKE_COMMAND} -E env PYWARPX_LIB_DIR=$ - ${Python_EXECUTABLE} -m pip ${PY_PIP_OPTIONS} wheel --no-build-isolation --no-deps --wheel-dir=warpx-whl ${WarpX_SOURCE_DIR} + ${Python_EXECUTABLE} -m pip ${PY_PIP_OPTIONS} wheel --no-build-isolation --no-deps --wheel-dir=warpx-whl "${WarpX_SOURCE_DIR}" + COMMAND_EXPAND_LISTS VERBATIM WORKING_DIRECTORY ${WarpX_BINARY_DIR} DEPENDS @@ -754,6 +755,7 @@ if(WarpX_PYTHON) endif() add_custom_target(${WarpX_CUSTOM_TARGET_PREFIX}pip_install_requirements ${Python_EXECUTABLE} -m pip ${PY_PIP_OPTIONS} install ${PY_PIP_INSTALL_OPTIONS} -r "${WarpX_SOURCE_DIR}/${pyWarpX_REQUIREMENT_FILE}" + COMMAND_EXPAND_LISTS VERBATIM WORKING_DIRECTORY ${WarpX_BINARY_DIR} ) @@ -771,6 +773,7 @@ if(WarpX_PYTHON) add_custom_target(${WarpX_CUSTOM_TARGET_PREFIX}pip_install ${CMAKE_COMMAND} -E env WARPX_MPI=${WarpX_MPI} ${Python_EXECUTABLE} -m pip ${PY_PIP_OPTIONS} install --force-reinstall --no-index --no-deps ${PY_PIP_INSTALL_OPTIONS} --find-links=warpx-whl pywarpx + COMMAND_EXPAND_LISTS VERBATIM WORKING_DIRECTORY ${WarpX_BINARY_DIR} DEPENDS @@ -784,6 +787,7 @@ if(WarpX_PYTHON) add_custom_target(${WarpX_CUSTOM_TARGET_PREFIX}pip_install_nodeps ${CMAKE_COMMAND} -E env WARPX_MPI=${WarpX_MPI} ${Python_EXECUTABLE} -m pip ${PY_PIP_OPTIONS} install --force-reinstall --no-index --no-deps ${PY_PIP_INSTALL_OPTIONS} --find-links=warpx-whl pywarpx + COMMAND_EXPAND_LISTS VERBATIM WORKING_DIRECTORY ${WarpX_BINARY_DIR} DEPENDS diff --git a/Docs/source/install/cmake.rst b/Docs/source/install/cmake.rst index 60d9eecc2b4..41e4c40bc85 100644 --- a/Docs/source/install/cmake.rst +++ b/Docs/source/install/cmake.rst @@ -77,9 +77,9 @@ For example, this builds WarpX in all geometries, enables Python bindings and Nv Build Options ------------- -============================= ============================================ ========================================================= +============================= ============================================ =========================================================== CMake Option Default & Values Description -============================= ============================================ ========================================================= +============================= ============================================ =========================================================== ``CMAKE_BUILD_TYPE`` RelWithDebInfo/**Release**/Debug `Type of build, symbols & optimizations `__ ``CMAKE_INSTALL_PREFIX`` system-dependent path `Install path prefix `__ ``CMAKE_VERBOSE_MAKEFILE`` ON/**OFF** `Print all compiler commands to the terminal during build `__ @@ -105,9 +105,9 @@ CMake Option Default & Values Descr ``WarpX_QED_TABLES_GEN_OMP`` **AUTO**/ON/OFF Enables OpenMP support for QED lookup tables generation ``WarpX_SENSEI`` ON/**OFF** SENSEI in situ visualization ``Python_EXECUTABLE`` (newest found) Path to Python executable -``PY_PIP_OPTIONS`` ``-v`` Additional options for ``pip``, e.g., ``-vvv`` -``PY_PIP_INSTALL_OPTIONS`` Additional options for ``pip install``, e.g., ``--user`` -============================= ============================================ ========================================================= +``PY_PIP_OPTIONS`` ``-v`` Additional options for ``pip``, e.g., ``-vvv;-q`` +``PY_PIP_INSTALL_OPTIONS`` Additional options for ``pip install``, e.g., ``--user;-q`` +============================= ============================================ =========================================================== WarpX can be configured in further detail with options from AMReX, which are documented in the AMReX manual: From 65e82617839ac70b3f0951eab18fa04a06dfb93e Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Wed, 9 Oct 2024 17:31:50 -0500 Subject: [PATCH 036/278] Disable AMReX_LINEAR_SOLVER_INCFLO by default (#5364) We do not need to compile these linear solvers that are intended for incompressible flow solvers. This should speed up the build process a little bit. Introduced in https://github.com/AMReX-Codes/amrex/pull/4181 --- GNUmakefile | 3 +++ Source/ablastr/Make.package | 1 - Source/ablastr/fields/Make.package | 4 +++- cmake/dependencies/AMReX.cmake | 4 ++++ 4 files changed, 10 insertions(+), 2 deletions(-) diff --git a/GNUmakefile b/GNUmakefile index fe10983b780..1cc78403c7b 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -43,5 +43,8 @@ USE_RZ = FALSE USE_EB = FALSE +USE_LINEAR_SOLVERS_EM = TRUE +USE_LINEAR_SOLVERS_INCFLO = FALSE + WARPX_HOME := . include $(WARPX_HOME)/Source/Make.WarpX diff --git a/Source/ablastr/Make.package b/Source/ablastr/Make.package index b9ff3c72560..edbf43b7802 100644 --- a/Source/ablastr/Make.package +++ b/Source/ablastr/Make.package @@ -1,4 +1,3 @@ -#CEXE_sources += ParticleBoundaries.cpp include $(WARPX_HOME)/Source/ablastr/coarsen/Make.package include $(WARPX_HOME)/Source/ablastr/math/Make.package diff --git a/Source/ablastr/fields/Make.package b/Source/ablastr/fields/Make.package index 727a17b6de8..7441a6a1238 100644 --- a/Source/ablastr/fields/Make.package +++ b/Source/ablastr/fields/Make.package @@ -1,5 +1,7 @@ + +CEXE_sources += MultiFabRegister.cpp + ifeq ($(USE_FFT),TRUE) - CEXE_sources += MultiFabRegister.cpp ifeq ($(DIM),3) CEXE_sources += IntegratedGreenFunctionSolver.cpp endif diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 51ad361276a..6513841f327 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -92,6 +92,8 @@ macro(find_amrex) set(AMReX_PARTICLES ON CACHE INTERNAL "") set(AMReX_PROBINIT OFF CACHE INTERNAL "") set(AMReX_TINY_PROFILE ON CACHE BOOL "") + set(AMReX_LINEAR_SOLVERS_EM ON CACHE INTERNAL "") + set(AMReX_LINEAR_SOLVER_INCFLO OFF CACHE INTERNAL "") if(WarpX_ASCENT OR WarpX_SENSEI) set(AMReX_GPU_RDC ON CACHE BOOL "") @@ -200,6 +202,8 @@ macro(find_amrex) mark_as_advanced(AMReX_HYPRE) mark_as_advanced(AMReX_IPO) mark_as_advanced(AMReX_LINEAR_SOLVERS) + mark_as_advanced(AMReX_LINEAR_SOLVERS_INCFLO) + mark_as_advanced(AMReX_LINEAR_SOLVERS_EM) mark_as_advanced(AMReX_MEM_PROFILE) mark_as_advanced(AMReX_MPI) mark_as_advanced(AMReX_MPI_THREAD_MULTIPLE) From 89dc850e6c735b21221f61955204371bf367b773 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Wed, 9 Oct 2024 19:25:42 -0700 Subject: [PATCH 037/278] Expose `MultiParticleContainer.GetChargeDensity` to Python (#5382) Signed-off-by: roelof-groenewald --- Source/Python/Particles/MultiParticleContainer.cpp | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/Source/Python/Particles/MultiParticleContainer.cpp b/Source/Python/Particles/MultiParticleContainer.cpp index e709f0950b4..7b3b114b080 100644 --- a/Source/Python/Particles/MultiParticleContainer.cpp +++ b/Source/Python/Particles/MultiParticleContainer.cpp @@ -42,5 +42,12 @@ i_lens: int strength_E, strength_B: floats The electric and magnetic focusing strength of the lens)pbdoc" ) + + .def("get_charge_density", + [](MultiParticleContainer& mpc, int lev, bool local) { + return mpc.GetChargeDensity(lev, local); + }, + py::arg("lev"), py::arg("local") + ) ; } From c045eaf4a525605e69336feb40aeed7bf50bd63a Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 9 Oct 2024 19:26:28 -0700 Subject: [PATCH 038/278] CMake: No FFTW Needed for SYCL anymore (#5380) We do not need FFTW3 anymore to do FFTs on SYCL GPUs. Follow-up to #5127 X-ref: https://github.com/spack/spack/pull/46765 --- .github/workflows/intel.yml | 1 + cmake/dependencies/FFT.cmake | 16 ++++++++++++---- 2 files changed, 13 insertions(+), 4 deletions(-) diff --git a/.github/workflows/intel.yml b/.github/workflows/intel.yml index 1365fa76865..f27181c2e20 100644 --- a/.github/workflows/intel.yml +++ b/.github/workflows/intel.yml @@ -184,6 +184,7 @@ jobs: -DCMAKE_VERBOSE_MAKEFILE=ON \ -DWarpX_COMPUTE=SYCL \ -DWarpX_EB=ON \ + -DWarpX_FFT=ON \ -DWarpX_PYTHON=ON \ -DWarpX_MPI=OFF \ -DWarpX_OPENPMD=ON \ diff --git a/cmake/dependencies/FFT.cmake b/cmake/dependencies/FFT.cmake index 571006e8530..df0ef11ae53 100644 --- a/cmake/dependencies/FFT.cmake +++ b/cmake/dependencies/FFT.cmake @@ -48,14 +48,20 @@ if(ABLASTR_FFT) # # cuFFT (CUDA) - # TODO: check if `find_package` search works + if(WarpX_COMPUTE STREQUAL CUDA) + # nothing to do (cuFFT is part of the CUDA SDK) + # TODO: check if `find_package` search works for cuFFT # rocFFT (HIP) - if(WarpX_COMPUTE STREQUAL HIP) + elseif(WarpX_COMPUTE STREQUAL HIP) find_package(rocfft REQUIRED) - # FFTW (NOACC, OMP, SYCL) - elseif(NOT WarpX_COMPUTE STREQUAL CUDA) + elseif(WarpX_COMPUTE STREQUAL SYCL) + # nothing to do (oneMKL is part of oneAPI) + # TODO: check if `find_package` search works for oneMKL + + # FFTW (NOACC, OMP) + else() # On Windows, try searching for FFTW3(f)Config.cmake files first # Installed .pc files wrongly and unconditionally add -lm # https://github.com/FFTW/fftw3/issues/236 @@ -106,6 +112,8 @@ if(ABLASTR_FFT) warpx_make_third_party_includes_system(cufft FFT) elseif(WarpX_COMPUTE STREQUAL HIP) warpx_make_third_party_includes_system(roc::rocfft FFT) + elseif(WarpX_COMPUTE STREQUAL SYCL) + warpx_make_third_party_includes_system(AMReX::SYCL FFT) else() if(WarpX_FFTW_SEARCH STREQUAL CMAKE) warpx_make_third_party_includes_system(FFTW3::fftw3${HFFTWp} FFT) From b2840be3ccd9cd886a9d30eace7d6a01d7b6d1ff Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 9 Oct 2024 23:58:26 -0700 Subject: [PATCH 039/278] Fix CI: CodeQL Setup (#5385) Fix broken Python setup in CodeQL CI. --- .github/workflows/codeql.yml | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/.github/workflows/codeql.yml b/.github/workflows/codeql.yml index 5c36b9d9f21..e3549ae340a 100644 --- a/.github/workflows/codeql.yml +++ b/.github/workflows/codeql.yml @@ -31,6 +31,11 @@ jobs: - name: Checkout uses: actions/checkout@v4 + - uses: actions/setup-python@v5 + name: Install Python + with: + python-version: '3.x' + - name: Install Packages (C++) if: ${{ matrix.language == 'cpp' }} run: | @@ -38,9 +43,10 @@ jobs: sudo apt-get install --yes cmake openmpi-bin libopenmpi-dev libhdf5-openmpi-dev libadios-openmpi-dev ccache python -m pip install --upgrade pip + python -m pip install --upgrade pipx python -m pip install --upgrade wheel python -m pip install --upgrade cmake - export CMAKE="$HOME/.local/bin/cmake" && echo "CMAKE=$CMAKE" >> $GITHUB_ENV + python -m pipx install cmake - name: Set Up Cache if: ${{ matrix.language == 'cpp' }} @@ -54,7 +60,7 @@ jobs: - name: Configure (C++) if: ${{ matrix.language == 'cpp' }} run: | - $CMAKE -S . -B build -DWarpX_OPENPMD=ON + cmake -S . -B build -DWarpX_OPENPMD=ON - name: Initialize CodeQL uses: github/codeql-action/init@v3 @@ -75,7 +81,7 @@ jobs: export CCACHE_MAXSIZE=100M ccache -z - $CMAKE --build build -j 4 + cmake --build build -j 4 ccache -s du -hs ~/.cache/ccache @@ -83,7 +89,7 @@ jobs: # Make sure CodeQL has something to do touch Source/Utils/WarpXVersion.cpp export CCACHE_DISABLE=1 - $CMAKE --build build -j 4 + cmake --build build -j 4 - name: Perform CodeQL Analysis uses: github/codeql-action/analyze@v3 From a716670ba97e738456241791f933bbccfa3bdbce Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Thu, 10 Oct 2024 09:52:42 -0700 Subject: [PATCH 040/278] Generalize differential luminosity for photons (#5222) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The differential luminosity diagnostic was not valid for particles with mass 0. This PR generalizes the code for photons by expressing the center-of-mass energy with the 4-momentum: Screenshot 2024-09-06 at 9 27 33 PM which is valid for photons as well. I also slightly simplified the code that computes the term Screenshot 2024-09-06 at 9 30 45 PM I also added a test using photons. This is done by extracting an base input script from the existing luminosity test involving **electrons and positrons**, and creating an input script involving photons that leverages the base input script. --- Docs/source/usage/parameters.rst | 9 +-- Examples/Tests/diff_lumi_diag/CMakeLists.txt | 14 +++- Examples/Tests/diff_lumi_diag/analysis.py | 16 ++++- ..._test_3d_diff_lumi_diag => inputs_base_3d} | 29 +-------- .../inputs_test_3d_diff_lumi_diag_leptons | 31 +++++++++ .../inputs_test_3d_diff_lumi_diag_photons | 28 ++++++++ ...on => test_3d_diff_lumi_diag_leptons.json} | 0 .../test_3d_diff_lumi_diag_photons.json | 24 +++++++ .../ReducedDiags/DifferentialLuminosity.cpp | 64 +++++++++++++------ 9 files changed, 162 insertions(+), 53 deletions(-) rename Examples/Tests/diff_lumi_diag/{inputs_test_3d_diff_lumi_diag => inputs_base_3d} (81%) create mode 100644 Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_leptons create mode 100644 Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_photons rename Regression/Checksum/benchmarks_json/{test_3d_diff_lumi_diag.json => test_3d_diff_lumi_diag_leptons.json} (100%) create mode 100644 Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag_photons.json diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index b9d82d5014a..910ce448c0f 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -3466,14 +3466,15 @@ Reduced Diagnostics \frac{d\mathcal{L}}{d\mathcal{E}^*}(\mathcal{E}^*, t) = \int_0^t dt'\int d\boldsymbol{x}\,d\boldsymbol{p}_1 d\boldsymbol{p}_2\; \sqrt{ |\boldsymbol{v}_1 - \boldsymbol{v}_2|^2 - |\boldsymbol{v}_1\times\boldsymbol{v}_2|^2/c^2} \\ f_1(\boldsymbol{x}, \boldsymbol{p}_1, t')f_2(\boldsymbol{x}, \boldsymbol{p}_2, t') \delta(\mathcal{E}^* - \mathcal{E}^*(\boldsymbol{p}_1, \boldsymbol{p}_2)) - where :math:`\mathcal{E}^*(\boldsymbol{p}_1, \boldsymbol{p}_2) = \sqrt{m_1^2c^4 + m_2^2c^4 + 2(m_1 m_2 c^4 - \gamma_1 \gamma_2 - \boldsymbol{p}_1\cdot\boldsymbol{p}_2 c^2)}` is the energy in the center-of-mass frame, - and :math:`f_i` is the distribution function of species :math:`i`. Note that, if :math:`\sigma^*(\mathcal{E}^*)` + where :math:`f_i` is the distribution function of species :math:`i` and + :math:`\mathcal{E}^*(\boldsymbol{p}_1, \boldsymbol{p}_2) = \sqrt{m_1^2c^4 + m_2^2c^4 + 2 c^2{p_1}^\mu {p_2}_\mu}` + is the energy in the center-of-mass frame, where :math:`p^\mu = (\sqrt{m^2 c^2 + \boldsymbol{p}^2}, \boldsymbol{p})` + represents the 4-momentum. Note that, if :math:`\sigma^*(\mathcal{E}^*)` is the center-of-mass cross-section of a given collision process, then :math:`\int d\mathcal{E}^* \frac{d\mathcal{L}}{d\mathcal{E}^*} (\mathcal{E}^*, t)\sigma^*(\mathcal{E}^*)` gives the total number of collisions of that process (from the beginning of the simulation up until time :math:`t`). - The differential luminosity is given in units of :math:`\text{m}^{-2}.\text{eV}^{-1}`. For collider-relevant WarpX simulations + The differential luminosity is given in units of :math:`\text{m}^{-2}.\text{eV}^{-1}`. For collider-relevant WarpX simulations involving two crossing, high-energy beams of particles, the differential luminosity in :math:`\text{s}^{-1}.\text{m}^{-2}.\text{eV}^{-1}` can be obtained by multiplying the above differential luminosity by the expected repetition rate of the beams. diff --git a/Examples/Tests/diff_lumi_diag/CMakeLists.txt b/Examples/Tests/diff_lumi_diag/CMakeLists.txt index 1651d74115e..481847a023d 100644 --- a/Examples/Tests/diff_lumi_diag/CMakeLists.txt +++ b/Examples/Tests/diff_lumi_diag/CMakeLists.txt @@ -2,10 +2,20 @@ # add_warpx_test( - test_3d_diff_lumi_diag # name + test_3d_diff_lumi_diag_leptons # name 3 # dims 2 # nprocs - inputs_test_3d_diff_lumi_diag # inputs + inputs_test_3d_diff_lumi_diag_leptons # inputs + analysis.py # analysis + diags/diag1000080 # output + OFF # dependency +) + +add_warpx_test( + test_3d_diff_lumi_diag_photons # name + 3 # dims + 2 # nprocs + inputs_test_3d_diff_lumi_diag_photons # inputs analysis.py # analysis diags/diag1000080 # output OFF # dependency diff --git a/Examples/Tests/diff_lumi_diag/analysis.py b/Examples/Tests/diff_lumi_diag/analysis.py index 8f2061ff1dc..41501b1915d 100755 --- a/Examples/Tests/diff_lumi_diag/analysis.py +++ b/Examples/Tests/diff_lumi_diag/analysis.py @@ -37,16 +37,28 @@ * np.exp(-((E_bin - 2 * E_beam) ** 2) / (2 * sigma_E**2)) ) +# Extract test name from path +test_name = os.path.split(os.getcwd())[1] +print("test_name", test_name) + +# Pick tolerance +if "leptons" in test_name: + tol = 1e-2 +elif "photons" in test_name: + # In the photons case, the particles are + # initialized from a density distribution ; + # tolerance is larger due to lower particle statistics + tol = 6e-2 + # Check that the simulation result and analytical result match error = abs(dL_dE_sim - dL_dE_th).max() / abs(dL_dE_th).max() -tol = 1e-2 print("Relative error: ", error) print("Tolerance: ", tol) assert error < tol # compare checksums evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], + test_name=test_name, output_file=sys.argv[1], rtol=1e-2, ) diff --git a/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag b/Examples/Tests/diff_lumi_diag/inputs_base_3d similarity index 81% rename from Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag rename to Examples/Tests/diff_lumi_diag/inputs_base_3d index e8854937b6e..ba3c823b52b 100644 --- a/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag +++ b/Examples/Tests/diff_lumi_diag/inputs_base_3d @@ -6,12 +6,11 @@ my_constants.mc2_eV = m_e*clight*clight/q_e # BEAMS my_constants.beam_energy_eV = 125.e9 my_constants.beam_gamma = beam_energy_eV/(mc2_eV) -my_constants.beam_charge = 1.2e10*q_e +my_constants.beam_N = 1.2e10 my_constants.sigmax = 500e-9 my_constants.sigmay = 10e-9 my_constants.sigmaz = 300e-3 -my_constants.muz = -4*sigmaz -my_constants.nmacropart = 2e5 +my_constants.muz = 4*sigmaz # BOX my_constants.Lx = 8*sigmax @@ -62,17 +61,6 @@ warpx.poisson_solver = fft ################################# particles.species_names = beam1 beam2 -beam1.species_type = electron -beam1.injection_style = gaussian_beam -beam1.x_rms = sigmax -beam1.y_rms = sigmay -beam1.z_rms = sigmaz -beam1.x_m = 0 -beam1.y_m = 0 -beam1.z_m = muz -beam1.npart = nmacropart -beam1.q_tot = -beam_charge -beam1.z_cut = 4 beam1.momentum_distribution_type = gaussian beam1.uz_m = beam_gamma beam1.uy_m = 0.0 @@ -82,17 +70,6 @@ beam1.uy_th = 0 beam1.uz_th = 0.02*beam_gamma beam1.do_not_deposit = 1 -beam2.species_type = positron -beam2.injection_style = gaussian_beam -beam2.x_rms = sigmax -beam2.y_rms = sigmay -beam2.z_rms = sigmaz -beam2.x_m = 0 -beam2.y_m = 0 -beam2.z_m = -muz -beam2.npart = nmacropart -beam2.q_tot = beam_charge -beam2.z_cut = 4 beam2.momentum_distribution_type = gaussian beam2.uz_m = -beam_gamma beam2.uy_m = 0.0 @@ -108,7 +85,7 @@ beam2.do_not_deposit = 1 # FULL diagnostics.diags_names = diag1 -diag1.intervals = 1 +diag1.intervals = 80 diag1.diag_type = Full diag1.write_species = 1 diag1.fields_to_plot = rho_beam1 rho_beam2 diff --git a/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_leptons b/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_leptons new file mode 100644 index 00000000000..1cded30d3af --- /dev/null +++ b/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_leptons @@ -0,0 +1,31 @@ +# base input parameters +FILE = inputs_base_3d + +# Test with electrons/positrons: use gaussian beam distribution +# by providing the total charge (q_tot) + +my_constants.nmacropart = 2e5 + +beam1.species_type = electron +beam1.injection_style = gaussian_beam +beam1.x_rms = sigmax +beam1.y_rms = sigmay +beam1.z_rms = sigmaz +beam1.x_m = 0 +beam1.y_m = 0 +beam1.z_m = -muz +beam1.npart = nmacropart +beam1.q_tot = -beam_N*q_e +beam1.z_cut = 4 + +beam2.species_type = positron +beam2.injection_style = gaussian_beam +beam2.x_rms = sigmax +beam2.y_rms = sigmay +beam2.z_rms = sigmaz +beam2.x_m = 0 +beam2.y_m = 0 +beam2.z_m = muz +beam2.npart = nmacropart +beam2.q_tot = beam_N*q_e +beam2.z_cut = 4 diff --git a/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_photons b/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_photons new file mode 100644 index 00000000000..f0ef254d911 --- /dev/null +++ b/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_photons @@ -0,0 +1,28 @@ +# base input parameters +FILE = inputs_base_3d + +# Test with electrons/positrons: use parse_density_function + +beam1.species_type = electron +beam1.injection_style = "NUniformPerCell" +beam1.num_particles_per_cell_each_dim = 1 1 1 +beam1.profile = parse_density_function +beam1.density_function(x,y,z) = "beam_N/(sqrt(2*pi)*2*pi*sigmax*sigmay*sigmaz)*exp(-x*x/(2*sigmax*sigmax)-y*y/(2*sigmay*sigmay)-(z+muz)*(z+muz)/(2*sigmaz*sigmaz))" +beam1.xmin = -4*sigmax +beam1.xmax = 4*sigmax +beam1.ymin = -4*sigmay +beam1.ymax = 4*sigmay +beam1.zmin =-muz-4*sigmaz +beam1.zmax =-muz+4*sigmaz + +beam2.species_type = positron +beam2.injection_style = "NUniformPerCell" +beam2.num_particles_per_cell_each_dim = 1 1 1 +beam2.profile = parse_density_function +beam2.xmin = -4*sigmax +beam2.xmax = 4*sigmax +beam2.ymin = -4*sigmay +beam2.ymax = 4*sigmay +beam2.zmin = muz-4*sigmaz +beam2.zmax = muz+4*sigmaz +beam2.density_function(x,y,z) = "beam_N/(sqrt(2*pi)*2*pi*sigmax*sigmay*sigmaz)*exp(-x*x/(2*sigmax*sigmax)-y*y/(2*sigmay*sigmay)-(z-muz)*(z-muz)/(2*sigmaz*sigmaz))" diff --git a/Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag.json b/Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag_leptons.json similarity index 100% rename from Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag.json rename to Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag_leptons.json diff --git a/Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag_photons.json b/Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag_photons.json new file mode 100644 index 00000000000..09b2031cdd2 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag_photons.json @@ -0,0 +1,24 @@ +{ + "lev=0": { + "rho_beam1": 656097367.2335038, + "rho_beam2": 656097367.2335038 + }, + "beam1": { + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 1.7512476113279403e-11, + "particle_position_x": 0.2621440000000001, + "particle_position_y": 0.005242880000000001, + "particle_position_z": 314572.79999473685, + "particle_weight": 11997744756.90957 + }, + "beam2": { + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 1.7513431895752007e-11, + "particle_position_x": 0.2621440000000001, + "particle_position_y": 0.005242880000000001, + "particle_position_z": 314572.79999472946, + "particle_weight": 11997744756.909573 + } +} \ No newline at end of file diff --git a/Source/Diagnostics/ReducedDiags/DifferentialLuminosity.cpp b/Source/Diagnostics/ReducedDiags/DifferentialLuminosity.cpp index 59a32cf0545..ef5e0da6014 100644 --- a/Source/Diagnostics/ReducedDiags/DifferentialLuminosity.cpp +++ b/Source/Diagnostics/ReducedDiags/DifferentialLuminosity.cpp @@ -132,9 +132,8 @@ void DifferentialLuminosity::ComputeDiags (int step) // Since this diagnostic *accumulates* the luminosity in the // array d_data, we add contributions at *each timestep*, but // we only write the data to file at intervals specified by the user. - - const Real c2_over_qe = PhysConst::c*PhysConst::c/PhysConst::q_e; - const Real inv_c2 = 1._rt/(PhysConst::c*PhysConst::c); + const Real c_sq = PhysConst::c*PhysConst::c; + const Real c_over_qe = PhysConst::c/PhysConst::q_e; // get a reference to WarpX instance auto& warpx = WarpX::GetInstance(); @@ -187,6 +186,7 @@ void DifferentialLuminosity::ComputeDiags (int step) amrex::ParticleReal * const AMREX_RESTRICT u1x = soa_1.m_rdata[PIdx::ux]; amrex::ParticleReal * const AMREX_RESTRICT u1y = soa_1.m_rdata[PIdx::uy]; // v*gamma=p/m amrex::ParticleReal * const AMREX_RESTRICT u1z = soa_1.m_rdata[PIdx::uz]; + bool const species1_is_photon = species_1.AmIA(); const auto soa_2 = ptile_2.getParticleTileData(); index_type* AMREX_RESTRICT indices_2 = bins_2.permutationPtr(); @@ -196,6 +196,7 @@ void DifferentialLuminosity::ComputeDiags (int step) amrex::ParticleReal * const AMREX_RESTRICT u2x = soa_2.m_rdata[PIdx::ux]; amrex::ParticleReal * const AMREX_RESTRICT u2y = soa_2.m_rdata[PIdx::uy]; amrex::ParticleReal * const AMREX_RESTRICT u2z = soa_2.m_rdata[PIdx::uz]; + bool const species2_is_photon = species_2.AmIA(); // Extract low-level data auto const n_cells = static_cast(bins_1.numBins()); @@ -218,34 +219,59 @@ void DifferentialLuminosity::ComputeDiags (int step) index_type const j_1 = indices_1[i_1]; index_type const j_2 = indices_2[i_2]; - Real const u1_square = u1x[j_1]*u1x[j_1] + u1y[j_1]*u1y[j_1] + u1z[j_1]*u1z[j_1]; - Real const gamma1 = std::sqrt(1._rt + u1_square*inv_c2); - Real const u2_square = u2x[j_2]*u2x[j_2] + u2y[j_2]*u2y[j_2] + u2z[j_2]*u2z[j_2]; - Real const gamma2 = std::sqrt(1._rt + u2_square*inv_c2); - Real const u1_dot_u2 = u1x[j_1]*u2x[j_2] + u1y[j_1]*u2y[j_2] + u1z[j_1]*u2z[j_2]; + Real p1t=0, p1x=0, p1y=0, p1z=0; // components of 4-momentum of particle 1 + Real const u1_sq = u1x[j_1]*u1x[j_1] + u1y[j_1]*u1y[j_1] + u1z[j_1]*u1z[j_1]; + if (species1_is_photon) { + // photon case (momentum is normalized by m_e in WarpX) + p1t = PhysConst::m_e*std::sqrt( u1_sq ); + p1x = PhysConst::m_e*u1x[j_1]; + p1y = PhysConst::m_e*u1y[j_1]; + p1z = PhysConst::m_e*u1z[j_1]; + } else { + p1t = m1*std::sqrt( c_sq + u1_sq ); + p1x = m1*u1x[j_1]; + p1y = m1*u1y[j_1]; + p1z = m1*u1z[j_1]; + } + + Real p2t=0, p2x=0, p2y=0, p2z=0; // components of 4-momentum of particle 2 + Real const u2_sq = u2x[j_2]*u2x[j_2] + u2y[j_2]*u2y[j_2] + u2z[j_2]*u2z[j_2]; + if (species2_is_photon) { + // photon case (momentum is normalized by m_e in WarpX) + p2t = PhysConst::m_e*std::sqrt(u2_sq); + p2x = PhysConst::m_e*u2x[j_2]; + p2y = PhysConst::m_e*u2y[j_2]; + p2z = PhysConst::m_e*u2z[j_2]; + } else { + p2t = m2*std::sqrt( c_sq + u2_sq ); + p2x = m2*u2x[j_2]; + p2y = m2*u2y[j_2]; + p2z = m2*u2z[j_2]; + } // center of mass energy in eV - Real const E_com = c2_over_qe * std::sqrt(m1*m1 + m2*m2 + 2*m1*m2* (gamma1*gamma2 - u1_dot_u2*inv_c2)); + Real const E_com = c_over_qe * std::sqrt(m1*m1*c_sq + m2*m2*c_sq + 2*(p1t*p2t - p1x*p2x - p1y*p2y - p1z*p2z)); // determine particle bin int const bin = int(Math::floor((E_com-bin_min)/bin_size)); if ( bin<0 || bin>=num_bins ) { continue; } // discard if out-of-range - Real const v1_minus_v2_x = u1x[j_1]/gamma1 - u2x[j_2]/gamma2; - Real const v1_minus_v2_y = u1y[j_1]/gamma1 - u2y[j_2]/gamma2; - Real const v1_minus_v2_z = u1z[j_1]/gamma1 - u2z[j_2]/gamma2; - Real const v1_minus_v2_square = v1_minus_v2_x*v1_minus_v2_x + v1_minus_v2_y*v1_minus_v2_y + v1_minus_v2_z*v1_minus_v2_z; + Real const inv_p1t = 1.0_rt/p1t; + Real const inv_p2t = 1.0_rt/p2t; - Real const u1_cross_u2_x = u1y[j_1]*u2z[j_2] - u1z[j_1]*u2y[j_2]; - Real const u1_cross_u2_y = u1z[j_1]*u2x[j_2] - u1x[j_1]*u2z[j_2]; - Real const u1_cross_u2_z = u1x[j_1]*u2y[j_2] - u1y[j_1]*u2x[j_2]; + Real const beta1_sq = (p1x*p1x + p1y*p1y + p1z*p1z) * inv_p1t*inv_p1t; + Real const beta2_sq = (p2x*p2x + p2y*p2y + p2z*p2z) * inv_p2t*inv_p2t; + Real const beta1_dot_beta2 = (p1x*p2x + p1y*p2y + p1z*p2z) * inv_p1t*inv_p2t; - Real const v1_cross_v2_square = (u1_cross_u2_x*u1_cross_u2_x + u1_cross_u2_y*u1_cross_u2_y + u1_cross_u2_z*u1_cross_u2_z) / (gamma1*gamma1*gamma2*gamma2); + // Here we use the fact that: + // (v1 - v2)^2 = v1^2 + v2^2 - 2 v1.v2 + // and (v1 x v2)^2 = v1^2 v2^2 - (v1.v2)^2 + // we also use beta=v/c instead of v - Real const radicand = v1_minus_v2_square - v1_cross_v2_square * inv_c2; + Real const radicand = beta1_sq + beta2_sq - 2*beta1_dot_beta2 - beta1_sq*beta2_sq + beta1_dot_beta2*beta1_dot_beta2; - Real const dL_dEcom = std::sqrt( radicand ) * w1[j_1] * w2[j_2] / dV / bin_size * dt; // m^-2 eV^-1 + Real const dL_dEcom = PhysConst::c * std::sqrt( radicand ) * w1[j_1] * w2[j_2] / dV / bin_size * dt; // m^-2 eV^-1 amrex::HostDevice::Atomic::Add(&dptr_data[bin], dL_dEcom); From 005ef775f2a35feefe69d4e1f7069c99416b088f Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 10 Oct 2024 12:52:30 -0700 Subject: [PATCH 041/278] SYCL: 1D EB Compile (#5384) Attempt to fix 1D SYCL EB compile errors (throw not allowed on device). X-ref: https://github.com/spack/spack/pull/46765#issuecomment-2403937237 --- Source/EmbeddedBoundary/DistanceToEB.H | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/Source/EmbeddedBoundary/DistanceToEB.H b/Source/EmbeddedBoundary/DistanceToEB.H index 0c13724380c..0b27fd054cd 100644 --- a/Source/EmbeddedBoundary/DistanceToEB.H +++ b/Source/EmbeddedBoundary/DistanceToEB.H @@ -121,7 +121,13 @@ amrex::RealVect interp_normal (int i, int j, int k, const amrex::Real W[AMREX_SP #else amrex::ignore_unused(i, j, k, ic, jc, kc, W, Wc, phi, dxi); amrex::RealVect normal(0.0); - WARPX_ABORT_WITH_MESSAGE("Error: interp_distance not yet implemented in 1D"); + + AMREX_IF_ON_DEVICE(( + AMREX_DEVICE_ASSERT(0); + )) + AMREX_IF_ON_HOST(( + WARPX_ABORT_WITH_MESSAGE("Error: interp_normal not yet implemented in 1D"); + )) #endif return normal; From 4434e87ff9ed3dca833f4afec9c610d57b006364 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 11 Oct 2024 00:07:37 +0200 Subject: [PATCH 042/278] Move `isAnyBoundaryPML` to Warpx.cpp (#5353) `isAnyBoundaryPML` is used only inside `WarpX.cpp`. It does not need to be a member function of the WarpX class and it can be moved into an anonymous namespace inside `WarpX.cpp`. --- Source/WarpX.H | 2 +- Source/WarpX.cpp | 32 +++++++++++++++++++------------- 2 files changed, 20 insertions(+), 14 deletions(-) diff --git a/Source/WarpX.H b/Source/WarpX.H index c61fb92315f..bad63cd44d9 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -689,7 +689,7 @@ public: void DampJPML (int lev, PatchType patch_type); void CopyJPML (); - bool isAnyBoundaryPML(); + /** True if any of the particle boundary condition type is Thermal */ static bool isAnyParticleBoundaryThermal(); diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 250bab273d0..d1e3108e32a 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -195,6 +195,22 @@ amrex::IntVect m_rho_nodal_flag; WarpX* WarpX::m_instance = nullptr; +namespace +{ + + [[nodiscard]] bool + isAnyBoundaryPML( + const amrex::Array& field_boundary_lo, + const amrex::Array& field_boundary_hi) + { + constexpr auto is_pml = [](const FieldBoundaryType fbt) {return (fbt == FieldBoundaryType::PML);}; + const auto is_any_pml = + std::any_of(field_boundary_lo.begin(), field_boundary_lo.end(), is_pml) || + std::any_of(field_boundary_hi.begin(), field_boundary_hi.end(), is_pml); + return is_any_pml; + } +} + void WarpX::MakeWarpX () { ParseGeometryInput(); @@ -878,7 +894,7 @@ WarpX::ReadParameters () } #ifdef WARPX_DIM_RZ - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( isAnyBoundaryPML() == false || electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD, + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( ::isAnyBoundaryPML(field_boundary_lo, field_boundary_hi) == false || electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD, "PML are not implemented in RZ geometry with FDTD; please set a different boundary condition using boundary.field_lo and boundary.field_hi."); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( field_boundary_lo[1] != FieldBoundaryType::PML && field_boundary_hi[1] != FieldBoundaryType::PML, "PML are not implemented in RZ geometry along z; please set a different boundary condition using boundary.field_lo and boundary.field_hi."); @@ -2014,7 +2030,7 @@ WarpX::AllocLevelData (int lev, const BoxArray& ba, const DistributionMapping& d safe_guard_cells, WarpX::do_multi_J, WarpX::fft_do_time_averaging, - WarpX::isAnyBoundaryPML(), + ::isAnyBoundaryPML(field_boundary_lo, field_boundary_hi), WarpX::do_pml_in_domain, WarpX::pml_ncell, this->refRatio(), @@ -2742,7 +2758,7 @@ void WarpX::AllocLevelSpectralSolverRZ (amrex::Vector Date: Thu, 10 Oct 2024 16:31:33 -0700 Subject: [PATCH 043/278] Implement injection of particles from the embedded boundary (#5208) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Overview This PR implements flux injection of particles from the embedded boundary. It also adds a test that emits particles from a sphere in 3D as represented here: ![movie](https://github.com/user-attachments/assets/1e76cf87-fd7d-4fa3-8c83-363956226a42) as well as RZ and 2D versions of this test. (In 2D, the particles are emitted from a cylinder.) As can be seen in the above movie, particles are emitted from a single point within each cell (the centroid of the EB), instead of being emitted uniformly on the surface of the EB within the cell. This could be improved in a future PR. The implementation as well as the user interface largely re-use the infrastructure for the flux injection from a plane. However, as a result, the user interface is perhaps not very intuitive. In particular, when specify the velocity distribution, `uz` represents the direction normal to the EB while `ux`, `uy` represent the tangential directions. This again will be improved in follow-up PR. # Follow-up PRs - [ ] Change the interface of `gaussianflux` so as to specify the tangential and normal distribution. In other words, instead of: ``` electron.momentum_distribution_type = gaussianflux electron.ux_th = 0.01 electron.uy_th = 0.01 electron.uz_th = 0.1 electron.uz_m = 0.07 ``` we would do: ``` electron.momentum_distribution_type = gaussianflux electron.u_tangential_th = 0.01 # Tangential to the emitting surface electron.u_normal_th = 0.1 # Normal to the emitting surface electron.u_normal_m = 0.07 ``` - [ ] Change the interface so that the user does not need to specify the number of macroparticles per cell (which is problematic for EB, since difference cell contain different EB surface, and should in general emit different numbers of macroparticles). Instead, we would specify the weight of macroparticles, i.e. instead of ``` electron.injection_style = NFluxPerCell electron.num_particles_per_cell = 100 electron.flux_function(x,y,z,t) = "1.” ``` we would do ``` electron.injection_style = NFluxPerCell electron.flux_macroweight = 200 # Number of physical particles per macroparticle electron.flux_function(x,y,z,t) = "4e12” # Number of physical particles emitted per unit time and surface ``` - [ ] Add a way for the user to specify the total flux across the whole emitting surface Example: ``` electron.flux_function(x,y,z,t) = "(x>-1)*(x<1)" electron.total_flux = 4e12 # physical particle / second (not per unit area) ``` (In that case, `flux_function` would be rescaled internally by WarpX so as to emit the right number of particles.) - [ ] Add PICMI interface - [ ] Emit the particles uniformly from the surface of the EB within one cell --- Docs/source/usage/parameters.rst | 13 +- Examples/Tests/flux_injection/CMakeLists.txt | 30 ++++ .../analysis_flux_injection_from_eb.py | 161 +++++++++++++++++ .../Tests/flux_injection/inputs_base_from_eb | 42 +++++ .../inputs_test_2d_flux_injection_from_eb | 13 ++ .../inputs_test_3d_flux_injection_from_eb | 13 ++ .../inputs_test_rz_flux_injection_from_eb | 15 ++ .../test_2d_flux_injection_from_eb.json | 11 ++ .../test_3d_flux_injection_from_eb.json | 12 ++ .../test_rz_flux_injection_from_eb.json | 12 ++ Source/Initialization/PlasmaInjector.H | 2 + Source/Initialization/PlasmaInjector.cpp | 78 ++++---- Source/Particles/AddPlasmaUtilities.H | 139 +++++++++++++- .../Particles/PhysicalParticleContainer.cpp | 169 +++++++++++++----- 14 files changed, 627 insertions(+), 83 deletions(-) create mode 100755 Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py create mode 100644 Examples/Tests/flux_injection/inputs_base_from_eb create mode 100644 Examples/Tests/flux_injection/inputs_test_2d_flux_injection_from_eb create mode 100644 Examples/Tests/flux_injection/inputs_test_3d_flux_injection_from_eb create mode 100644 Examples/Tests/flux_injection/inputs_test_rz_flux_injection_from_eb create mode 100644 Regression/Checksum/benchmarks_json/test_2d_flux_injection_from_eb.json create mode 100644 Regression/Checksum/benchmarks_json/test_3d_flux_injection_from_eb.json create mode 100644 Regression/Checksum/benchmarks_json/test_rz_flux_injection_from_eb.json diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 910ce448c0f..5014d421fb8 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -971,16 +971,21 @@ Particle initialization The ``external_file`` option is currently implemented for 2D, 3D and RZ geometries, with record components in the cartesian coordinates ``(x,y,z)`` for 3D and RZ, and ``(x,z)`` for 2D. For more information on the `openPMD format `__ and how to build WarpX with it, please visit :ref:`the install section `. - * ``NFluxPerCell``: Continuously inject a flux of macroparticles from a planar surface. + * ``NFluxPerCell``: Continuously inject a flux of macroparticles from a surface. The emitting surface can be chosen to be either a plane + defined by the user (using some of the parameters listed below), or the embedded boundary (see :ref:`Embedded Boundary Conditions `). This requires the additional parameters: * ``.flux_profile`` (see the description of this parameter further below) - * ``.surface_flux_pos`` (`double`, location of the injection plane [meter]) + * ``.inject_from_embedded_boundary`` (`0` or `1`, default `0` ; whether to inject from the embedded boundary or from a user-specified plane. + When injecting from the embedded boundary, the momentum distribution specified by the user along ``z`` (see e.g. ``uz_m``, ``uz_th`` below) is interpreted + as the momentum distribution along the local normal to the embedded boundary.) - * ``.flux_normal_axis`` (`x`, `y`, or `z` for 3D, `x` or `z` for 2D, or `r`, `t`, or `z` for RZ. When `flux_normal_axis` is `r` or `t`, the `x` and `y` components of the user-specified momentum distribution are interpreted as the `r` and `t` components respectively) + * ``.surface_flux_pos`` (only used when injecting from a plane, `double`, location of the injection plane [meter]) - * ``.flux_direction`` (`-1` or `+1`, direction of flux relative to the plane) + * ``.flux_normal_axis`` (only used when injecting from a plane, `x`, `y`, or `z` for 3D, `x` or `z` for 2D, or `r`, `t`, or `z` for RZ. When `flux_normal_axis` is `r` or `t`, the `x` and `y` components of the user-specified momentum distribution are interpreted as the `r` and `t` components respectively) + + * ``.flux_direction`` (only used when injecting from a plane, `-1` or `+1`, direction of flux relative to the plane) * ``.num_particles_per_cell`` (`double`) diff --git a/Examples/Tests/flux_injection/CMakeLists.txt b/Examples/Tests/flux_injection/CMakeLists.txt index d09b83d7618..0929fc3d4c4 100644 --- a/Examples/Tests/flux_injection/CMakeLists.txt +++ b/Examples/Tests/flux_injection/CMakeLists.txt @@ -20,3 +20,33 @@ add_warpx_test( diags/diag1000120 # output OFF # dependency ) + +add_warpx_test( + test_3d_flux_injection_from_eb # name + 3 # dims + 2 # nprocs + inputs_test_3d_flux_injection_from_eb # inputs + analysis_flux_injection_from_eb.py # analysis + diags/diag1000010 # output + OFF # dependency +) + +add_warpx_test( + test_rz_flux_injection_from_eb # name + RZ # dims + 2 # nprocs + inputs_test_rz_flux_injection_from_eb # inputs + analysis_flux_injection_from_eb.py # analysis + diags/diag1000010 # output + OFF # dependency +) + +add_warpx_test( + test_2d_flux_injection_from_eb # name + 2 # dims + 2 # nprocs + inputs_test_2d_flux_injection_from_eb # inputs + analysis_flux_injection_from_eb.py # analysis + diags/diag1000010 # output + OFF # dependency +) diff --git a/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py b/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py new file mode 100755 index 00000000000..36ff50bea06 --- /dev/null +++ b/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py @@ -0,0 +1,161 @@ +#!/usr/bin/env python3 +# +# Copyright 2024 Remi Lehe +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + +""" +This script tests the emission of particles from the embedded boundary. +(In this case, the embedded boundary is a sphere in 3D and RZ, a cylinder in 2D.) +We check that the embedded boundary emits the correct number of particles, and that +the particle distributions are consistent with the expected distributions. +""" + +import os +import re +import sys + +import matplotlib.pyplot as plt +import numpy as np +import yt +from scipy.constants import c, m_e +from scipy.special import erf + +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +import checksumAPI + +yt.funcs.mylog.setLevel(0) + +# Open plotfile specified in command line +fn = sys.argv[1] +ds = yt.load(fn) +ad = ds.all_data() +t_max = ds.current_time.item() # time of simulation + +# Extract the dimensionality of the simulation +with open("./warpx_used_inputs", "r") as f: + warpx_used_inputs = f.read() +if re.search("geometry.dims = 2", warpx_used_inputs): + dims = "2D" +elif re.search("geometry.dims = RZ", warpx_used_inputs): + dims = "RZ" +elif re.search("geometry.dims = 3", warpx_used_inputs): + dims = "3D" + +# Total number of electrons expected: +# Simulation parameters determine the total number of particles emitted (Ntot) +flux = 1.0 # in m^-2.s^-1, from the input script +R = 2.0 # in m, radius of the sphere +if dims == "3D" or dims == "RZ": + emission_surface = 4 * np.pi * R**2 # in m^2 +elif dims == "2D": + emission_surface = 2 * np.pi * R # in m +Ntot = flux * emission_surface * t_max + +# Parameters of the histogram +hist_bins = 50 +hist_range = [-0.5, 0.5] + + +# Define function that histograms and checks the data +def gaussian_dist(u, u_th): + return 1.0 / ((2 * np.pi) ** 0.5 * u_th) * np.exp(-(u**2) / (2 * u_th**2)) + + +def gaussian_flux_dist(u, u_th, u_m): + normalization_factor = u_th**2 * np.exp(-(u_m**2) / (2 * u_th**2)) + ( + np.pi / 2 + ) ** 0.5 * u_m * u_th * (1 + erf(u_m / (2**0.5 * u_th))) + result = ( + 1.0 + / normalization_factor + * np.where(u > 0, u * np.exp(-((u - u_m) ** 2) / (2 * u_th**2)), 0) + ) + return result + + +def compare_gaussian(u, w, u_th, label=""): + du = (hist_range[1] - hist_range[0]) / hist_bins + w_hist, u_hist = np.histogram(u, bins=hist_bins, weights=w / du, range=hist_range) + u_hist = 0.5 * (u_hist[1:] + u_hist[:-1]) + w_th = Ntot * gaussian_dist(u_hist, u_th) + plt.plot(u_hist, w_hist, label=label + ": simulation") + plt.plot(u_hist, w_th, "--", label=label + ": theory") + assert np.allclose(w_hist, w_th, atol=0.07 * w_th.max()) + + +def compare_gaussian_flux(u, w, u_th, u_m, label=""): + du = (hist_range[1] - hist_range[0]) / hist_bins + w_hist, u_hist = np.histogram(u, bins=hist_bins, weights=w / du, range=hist_range) + u_hist = 0.5 * (u_hist[1:] + u_hist[:-1]) + w_th = Ntot * gaussian_flux_dist(u_hist, u_th, u_m) + plt.plot(u_hist, w_hist, label=label + ": simulation") + plt.plot(u_hist, w_th, "--", label=label + ": theory") + assert np.allclose(w_hist, w_th, atol=0.05 * w_th.max()) + + +# Load data and perform check + +plt.figure() + +if dims == "3D": + x = ad["electron", "particle_position_x"].to_ndarray() + y = ad["electron", "particle_position_y"].to_ndarray() + z = ad["electron", "particle_position_z"].to_ndarray() +elif dims == "2D": + x = ad["electron", "particle_position_x"].to_ndarray() + y = np.zeros_like(x) + z = ad["electron", "particle_position_y"].to_ndarray() +elif dims == "RZ": + theta = ad["electron", "particle_theta"].to_ndarray() + r = ad["electron", "particle_position_x"].to_ndarray() + x = r * np.cos(theta) + y = r * np.sin(theta) + z = ad["electron", "particle_position_y"].to_ndarray() +ux = ad["electron", "particle_momentum_x"].to_ndarray() / (m_e * c) +uy = ad["electron", "particle_momentum_y"].to_ndarray() / (m_e * c) +uz = ad["electron", "particle_momentum_z"].to_ndarray() / (m_e * c) +w = ad["electron", "particle_weight"].to_ndarray() + +# Check that the total number of particles emitted is correct +Ntot_sim = np.sum(w) +print("Ntot_sim = ", Ntot_sim) +print("Ntot = ", Ntot) +assert np.isclose(Ntot_sim, Ntot, rtol=0.01) + +# Check that none of the particles are inside the EB +# A factor 0.98 is applied to accomodate +# the cut-cell approximation of the sphere +assert np.all(x**2 + y**2 + z**2 > (0.98 * R) ** 2) + +# Check that the normal component of the velocity is consistent with the expected distribution +r = np.sqrt(x**2 + y**2 + z**2) +nx = x / r +ny = y / r +nz = z / r +u_n = ux * nx + uy * ny + uz * nz # normal component +compare_gaussian_flux(u_n, w, u_th=0.1, u_m=0.07, label="u_n") + +# Pick a direction that is orthogonal to the normal direction, and check the distribution +vx = ny / np.sqrt(nx**2 + ny**2) +vy = -nx / np.sqrt(nx**2 + ny**2) +vz = 0 +u_perp = ux * vx + uy * vy + uz * vz +compare_gaussian(u_perp, w, u_th=0.01, label="u_perp") + +# Pick the other perpendicular direction, and check the distribution +# The third direction is obtained by the cross product (n x v) +wx = ny * vz - nz * vy +wy = nz * vx - nx * vz +wz = nx * vy - ny * vx +u_perp2 = ux * wx + uy * wy + uz * wz +compare_gaussian(u_perp2, w, u_th=0.01, label="u_perp") + +plt.tight_layout() +plt.savefig("Distribution.png") + +# Verify checksum +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/flux_injection/inputs_base_from_eb b/Examples/Tests/flux_injection/inputs_base_from_eb new file mode 100644 index 00000000000..3e32d8799b6 --- /dev/null +++ b/Examples/Tests/flux_injection/inputs_base_from_eb @@ -0,0 +1,42 @@ +# Maximum number of time steps +max_step = 10 + +# The lo and hi ends of grids are multipliers of blocking factor +amr.blocking_factor = 8 + +# Maximum allowable size of each subdomain in the problem domain; +# this is used to decompose the domain for parallel calculations. +amr.max_grid_size = 8 + +# Maximum level in hierarchy (for now must be 0, i.e., one level in total) +amr.max_level = 0 + +# Deactivate Maxwell solver +algo.maxwell_solver = none +warpx.const_dt = 1e-9 + +# Embedded boundary +warpx.eb_implicit_function = "-(x**2+y**2+z**2-2**2)" + +# particles +particles.species_names = electron +algo.particle_shape = 3 + +electron.charge = -q_e +electron.mass = m_e +electron.injection_style = NFluxPerCell +electron.inject_from_embedded_boundary = 1 +electron.num_particles_per_cell = 100 +electron.flux_profile = parse_flux_function +electron.flux_function(x,y,z,t) = "1." +electron.momentum_distribution_type = gaussianflux +electron.ux_th = 0.01 +electron.uy_th = 0.01 +electron.uz_th = 0.1 +electron.uz_m = 0.07 + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 10 +diag1.diag_type = Full +diag1.fields_to_plot = none diff --git a/Examples/Tests/flux_injection/inputs_test_2d_flux_injection_from_eb b/Examples/Tests/flux_injection/inputs_test_2d_flux_injection_from_eb new file mode 100644 index 00000000000..f2e6f177887 --- /dev/null +++ b/Examples/Tests/flux_injection/inputs_test_2d_flux_injection_from_eb @@ -0,0 +1,13 @@ +FILE = inputs_base_from_eb + +# number of grid points +amr.n_cell = 16 16 + +# Geometry +geometry.dims = 2 +geometry.prob_lo = -4 -4 +geometry.prob_hi = 4 4 + +# Boundary condition +boundary.field_lo = periodic periodic +boundary.field_hi = periodic periodic diff --git a/Examples/Tests/flux_injection/inputs_test_3d_flux_injection_from_eb b/Examples/Tests/flux_injection/inputs_test_3d_flux_injection_from_eb new file mode 100644 index 00000000000..81ddc039977 --- /dev/null +++ b/Examples/Tests/flux_injection/inputs_test_3d_flux_injection_from_eb @@ -0,0 +1,13 @@ +FILE = inputs_base_from_eb + +# number of grid points +amr.n_cell = 16 16 16 + +# Geometry +geometry.dims = 3 +geometry.prob_lo = -4 -4 -4 +geometry.prob_hi = 4 4 4 + +# Boundary condition +boundary.field_lo = periodic periodic periodic +boundary.field_hi = periodic periodic periodic diff --git a/Examples/Tests/flux_injection/inputs_test_rz_flux_injection_from_eb b/Examples/Tests/flux_injection/inputs_test_rz_flux_injection_from_eb new file mode 100644 index 00000000000..4c970257f57 --- /dev/null +++ b/Examples/Tests/flux_injection/inputs_test_rz_flux_injection_from_eb @@ -0,0 +1,15 @@ +FILE = inputs_base_from_eb + +# number of grid points +amr.n_cell = 8 16 + +# Geometry +geometry.dims = RZ +geometry.prob_lo = 0 -4 +geometry.prob_hi = 4 4 + +# Boundary condition +boundary.field_lo = none periodic +boundary.field_hi = pec periodic + +electron.num_particles_per_cell = 300 diff --git a/Regression/Checksum/benchmarks_json/test_2d_flux_injection_from_eb.json b/Regression/Checksum/benchmarks_json/test_2d_flux_injection_from_eb.json new file mode 100644 index 00000000000..dd489f16e05 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_flux_injection_from_eb.json @@ -0,0 +1,11 @@ +{ + "lev=0": {}, + "electron": { + "particle_momentum_x": 6.990772711451971e-19, + "particle_momentum_y": 5.4131306169803364e-20, + "particle_momentum_z": 6.997294931789925e-19, + "particle_position_x": 35518.95120597846, + "particle_position_y": 35517.855675902414, + "particle_weight": 1.25355e-07 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_3d_flux_injection_from_eb.json b/Regression/Checksum/benchmarks_json/test_3d_flux_injection_from_eb.json new file mode 100644 index 00000000000..e947a8af07b --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_flux_injection_from_eb.json @@ -0,0 +1,12 @@ +{ + "lev=0": {}, + "electron": { + "particle_momentum_x": 4.371688233196277e-18, + "particle_momentum_y": 4.368885079657374e-18, + "particle_momentum_z": 4.367429424105371e-18, + "particle_position_x": 219746.94401890738, + "particle_position_y": 219690.7015248918, + "particle_position_z": 219689.45580938633, + "particle_weight": 4.954974999999999e-07 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_flux_injection_from_eb.json b/Regression/Checksum/benchmarks_json/test_rz_flux_injection_from_eb.json new file mode 100644 index 00000000000..23884de9725 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_rz_flux_injection_from_eb.json @@ -0,0 +1,12 @@ +{ + "lev=0": {}, + "electron": { + "particle_momentum_x": 6.734984863106283e-19, + "particle_momentum_y": 6.786279785869023e-19, + "particle_momentum_z": 1.0527983828124758e-18, + "particle_position_x": 53309.270966506396, + "particle_position_y": 53302.3776094842, + "particle_theta": 58707.74469425615, + "particle_weight": 4.991396867417661e-07 + } +} \ No newline at end of file diff --git a/Source/Initialization/PlasmaInjector.H b/Source/Initialization/PlasmaInjector.H index b9fe2323290..f14720d271c 100644 --- a/Source/Initialization/PlasmaInjector.H +++ b/Source/Initialization/PlasmaInjector.H @@ -131,6 +131,8 @@ public: int flux_normal_axis; int flux_direction; // -1 for left, +1 for right + bool m_inject_from_eb = false; // whether to inject from the embedded boundary + bool radially_weighted = true; std::string str_flux_function; diff --git a/Source/Initialization/PlasmaInjector.cpp b/Source/Initialization/PlasmaInjector.cpp index 3d846375a99..76bb7a5be42 100644 --- a/Source/Initialization/PlasmaInjector.cpp +++ b/Source/Initialization/PlasmaInjector.cpp @@ -9,6 +9,7 @@ */ #include "PlasmaInjector.H" +#include "EmbeddedBoundary/Enabled.H" #include "Initialization/GetTemperature.H" #include "Initialization/GetVelocity.H" #include "Initialization/InjectorDensity.H" @@ -303,50 +304,65 @@ void PlasmaInjector::setupNFluxPerCell (amrex::ParmParse const& pp_species) "(Please visit PR#765 for more information.)"); } #endif - utils::parser::getWithParser(pp_species, source_name, "surface_flux_pos", surface_flux_pos); - utils::parser::queryWithParser(pp_species, source_name, "flux_tmin", flux_tmin); - utils::parser::queryWithParser(pp_species, source_name, "flux_tmax", flux_tmax); - std::string flux_normal_axis_string; - utils::parser::get(pp_species, source_name, "flux_normal_axis", flux_normal_axis_string); - flux_normal_axis = -1; + + // Check whether injection from the embedded boundary is requested + utils::parser::queryWithParser(pp_species, source_name, "inject_from_embedded_boundary", m_inject_from_eb); + if (m_inject_from_eb) { + AMREX_ALWAYS_ASSERT_WITH_MESSAGE( EB::enabled(), + "Error: Embedded boundary injection is only available when " + "embedded boundaries are enabled."); + flux_normal_axis = 2; // Interpret z as the normal direction to the EB + flux_direction = 1; + } else { + // Injection is through a plane in this case. + // Parse the parameters of the plane (position, normal direction, etc.) + + utils::parser::getWithParser(pp_species, source_name, "surface_flux_pos", surface_flux_pos); + utils::parser::queryWithParser(pp_species, source_name, "flux_tmin", flux_tmin); + utils::parser::queryWithParser(pp_species, source_name, "flux_tmax", flux_tmax); + std::string flux_normal_axis_string; + utils::parser::get(pp_species, source_name, "flux_normal_axis", flux_normal_axis_string); + flux_normal_axis = -1; #ifdef WARPX_DIM_RZ - if (flux_normal_axis_string == "r" || flux_normal_axis_string == "R") { - flux_normal_axis = 0; - } - if (flux_normal_axis_string == "t" || flux_normal_axis_string == "T") { - flux_normal_axis = 1; - } + if (flux_normal_axis_string == "r" || flux_normal_axis_string == "R") { + flux_normal_axis = 0; + } + if (flux_normal_axis_string == "t" || flux_normal_axis_string == "T") { + flux_normal_axis = 1; + } #else # ifndef WARPX_DIM_1D_Z - if (flux_normal_axis_string == "x" || flux_normal_axis_string == "X") { - flux_normal_axis = 0; - } + if (flux_normal_axis_string == "x" || flux_normal_axis_string == "X") { + flux_normal_axis = 0; + } # endif #endif #ifdef WARPX_DIM_3D - if (flux_normal_axis_string == "y" || flux_normal_axis_string == "Y") { - flux_normal_axis = 1; - } + if (flux_normal_axis_string == "y" || flux_normal_axis_string == "Y") { + flux_normal_axis = 1; + } #endif - if (flux_normal_axis_string == "z" || flux_normal_axis_string == "Z") { - flux_normal_axis = 2; - } + if (flux_normal_axis_string == "z" || flux_normal_axis_string == "Z") { + flux_normal_axis = 2; + } #ifdef WARPX_DIM_3D - const std::string flux_normal_axis_help = "'x', 'y', or 'z'."; + const std::string flux_normal_axis_help = "'x', 'y', or 'z'."; #else # ifdef WARPX_DIM_RZ - const std::string flux_normal_axis_help = "'r' or 'z'."; + const std::string flux_normal_axis_help = "'r' or 'z'."; # elif WARPX_DIM_XZ - const std::string flux_normal_axis_help = "'x' or 'z'."; + const std::string flux_normal_axis_help = "'x' or 'z'."; # else - const std::string flux_normal_axis_help = "'z'."; + const std::string flux_normal_axis_help = "'z'."; # endif -#endif - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(flux_normal_axis >= 0, - "Error: Invalid value for flux_normal_axis. It must be " + flux_normal_axis_help); - utils::parser::getWithParser(pp_species, source_name, "flux_direction", flux_direction); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(flux_direction == +1 || flux_direction == -1, - "Error: flux_direction must be -1 or +1."); + #endif + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(flux_normal_axis >= 0, + "Error: Invalid value for flux_normal_axis. It must be " + flux_normal_axis_help); + utils::parser::getWithParser(pp_species, source_name, "flux_direction", flux_direction); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(flux_direction == +1 || flux_direction == -1, + "Error: flux_direction must be -1 or +1."); + } + // Construct InjectorPosition with InjectorPositionRandom. h_flux_pos = std::make_unique( (InjectorPositionRandomPlane*)nullptr, diff --git a/Source/Particles/AddPlasmaUtilities.H b/Source/Particles/AddPlasmaUtilities.H index 8f0489e3921..bb05d7be3c8 100644 --- a/Source/Particles/AddPlasmaUtilities.H +++ b/Source/Particles/AddPlasmaUtilities.H @@ -22,6 +22,29 @@ #include #include +struct PDim3 { + amrex::ParticleReal x, y, z; + + AMREX_GPU_HOST_DEVICE + PDim3(const amrex::XDim3& a): + x{static_cast(a.x)}, + y{static_cast(a.y)}, + z{static_cast(a.z)} + {} + + AMREX_GPU_HOST_DEVICE + ~PDim3() = default; + + AMREX_GPU_HOST_DEVICE + PDim3(PDim3 const &) = default; + AMREX_GPU_HOST_DEVICE + PDim3& operator=(PDim3 const &) = default; + AMREX_GPU_HOST_DEVICE + PDim3(PDim3&&) = default; + AMREX_GPU_HOST_DEVICE + PDim3& operator=(PDim3&&) = default; +}; + /* Finds the overlap region between the given tile_realbox and part_realbox, returning true if an overlap exists and false if otherwise. This also sets the parameters overlap_realbox, @@ -71,12 +94,124 @@ int compute_area_weights (const amrex::IntVect& iv, const int normal_axis) { return r; } + +#ifdef AMREX_USE_EB +/* + * \brief This computes the scale_fac (used for setting the particle weights) on a on area basis + * (used for flux injection from the embedded boundary). + * + * \param[in] dx: cell size in each direction + * \param[in] num_ppc_real: number of particles per cell + * \param[in] eb_bnd_normal_arr: array containing the normal to the embedded boundary + * \param[in] i, j, k: indices of the cell + * + * \return scale_fac: the scaling factor to be applied to the weight of the particles + */ +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +amrex::Real compute_scale_fac_area_eb ( + const amrex::GpuArray& dx, + const amrex::Real num_ppc_real, + amrex::Array4 const& eb_bnd_normal_arr, + int i, int j, int k ) { + using namespace amrex::literals; + // Scale particle weight by the area of the emitting surface, within one cell + // By definition, eb_bnd_area_arr is normalized (unitless). + // Here we undo the normalization (i.e. multiply by the surface used for normalization in amrex: + // see https://amrex-codes.github.io/amrex/docs_html/EB.html#embedded-boundary-data-structures) +#if defined(WARPX_DIM_3D) + const amrex::Real nx = eb_bnd_normal_arr(i,j,k,0); + const amrex::Real ny = eb_bnd_normal_arr(i,j,k,1); + const amrex::Real nz = eb_bnd_normal_arr(i,j,k,2); + amrex::Real scale_fac = std::sqrt(amrex::Math::powi<2>(nx*dx[1]*dx[2]) + + amrex::Math::powi<2>(ny*dx[0]*dx[2]) + + amrex::Math::powi<2>(nz*dx[0]*dx[1])); + +#elif defined(WARPX_DIM_RZ) || defined(WARPX_DIM_XZ) + const amrex::Real nx = eb_bnd_normal_arr(i,j,k,0); + const amrex::Real nz = eb_bnd_normal_arr(i,j,k,1); + amrex::Real scale_fac = std::sqrt(amrex::Math::powi<2>(nx*dx[1]) + + amrex::Math::powi<2>(nz*dx[0])); +#else + amrex::ignore_unused(dx, eb_bnd_normal_arr, i, j, k); + amrex::Real scale_fac = 0.0_rt; +#endif + // Do not multiply by eb_bnd_area_arr(i,j,k) here because this + // already taken into account by emitting a number of macroparticles + // that is proportional to the area of eb_bnd_area_arr(i,j,k). + scale_fac /= num_ppc_real; + return scale_fac; +} + +/* \brief Rotate the momentum of the particle so that the z direction + * transforms to the normal of the embedded boundary. + * + * More specifically, before calling this function, `pu.z` has the + * momentum distribution that is meant for the direction normal to the + * embedded boundary, and `pu.x`/`pu.y` have the momentum distribution that + * is meant for the tangentional direction. After calling this function, + * `pu.x`, `pu.y`, `pu.z` will have the correct momentum distribution, + * consistent with the local normal to the embedded boundary. + * + * \param[inout] pu momentum of the particle + * \param[in] eb_bnd_normal_arr: array containing the normal to the embedded boundary + * \param[in] i, j, k: indices of the cell + * */ +AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE +void rotate_momentum_eb ( + PDim3 & pu, + amrex::Array4 const& eb_bnd_normal_arr, + int i, int j, int k ) +{ + using namespace amrex::literals; + +#if defined(WARPX_DIM_3D) + // The minus sign below takes into account the fact that eb_bnd_normal_arr + // points towards the covered region, while particles are to be emitted + // *away* from the covered region. + amrex::Real const nx = -eb_bnd_normal_arr(i,j,k,0); + amrex::Real const ny = -eb_bnd_normal_arr(i,j,k,1); + amrex::Real const nz = -eb_bnd_normal_arr(i,j,k,2); + + // Rotate the momentum in theta and phi + amrex::Real const cos_theta = nz; + amrex::Real const sin_theta = std::sqrt(1._rt-nz*nz); + amrex::Real const nperp = std::sqrt(nx*nx + ny*ny); + amrex::Real cos_phi = 1; + amrex::Real sin_phi = 0; + if ( nperp > 0.0 ) { + cos_phi = nx/nperp; + sin_phi = ny/nperp; + } + // Apply rotation matrix + amrex::Real const ux = pu.x*cos_theta*cos_phi - pu.y*sin_phi + pu.z*sin_theta*cos_phi; + amrex::Real const uy = pu.x*cos_theta*sin_phi + pu.y*cos_phi + pu.z*sin_theta*sin_phi; + amrex::Real const uz = -pu.x*sin_theta + pu.z*cos_theta; + pu.x = ux; + pu.y = uy; + pu.z = uz; + +#elif defined(WARPX_DIM_RZ) || defined(WARPX_DIM_XZ) + // The minus sign below takes into account the fact that eb_bnd_normal_arr + // points towards the covered region, while particles are to be emitted + // *away* from the covered region. + amrex::Real const sin_theta = -eb_bnd_normal_arr(i,j,k,0); + amrex::Real const cos_theta = -eb_bnd_normal_arr(i,j,k,1); + amrex::Real const uz = pu.z*cos_theta - pu.x*sin_theta; + amrex::Real const ux = pu.x*cos_theta + pu.z*sin_theta; + pu.x = ux; + pu.z = uz; +#else + amrex::ignore_unused(pu, eb_bnd_normal_arr, i, j, k); +#endif +} +#endif //AMREX_USE_EB + /* This computes the scale_fac (used for setting the particle weights) on a on area basis - (used for flux injection). + (used for flux injection from a plane). */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE -amrex::Real compute_scale_fac_area (const amrex::GpuArray& dx, +amrex::Real compute_scale_fac_area_plane (const amrex::GpuArray& dx, const amrex::Real num_ppc_real, const int flux_normal_axis) { using namespace amrex::literals; amrex::Real scale_fac = AMREX_D_TERM(dx[0],*dx[1],*dx[2])/num_ppc_real; diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 26f9fee38d3..7c70c9a35c4 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -147,29 +147,6 @@ namespace return z0; } - struct PDim3 { - ParticleReal x, y, z; - - AMREX_GPU_HOST_DEVICE - PDim3(const amrex::XDim3& a): - x{static_cast(a.x)}, - y{static_cast(a.y)}, - z{static_cast(a.z)} - {} - - AMREX_GPU_HOST_DEVICE - ~PDim3() = default; - - AMREX_GPU_HOST_DEVICE - PDim3(PDim3 const &) = default; - AMREX_GPU_HOST_DEVICE - PDim3& operator=(PDim3 const &) = default; - AMREX_GPU_HOST_DEVICE - PDim3(PDim3&&) = default; - AMREX_GPU_HOST_DEVICE - PDim3& operator=(PDim3&&) = default; - }; - AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE XDim3 getCellCoords (const GpuArray& lo_corner, const GpuArray& dx, @@ -1371,6 +1348,22 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, const auto dx = geom.CellSizeArray(); const auto problo = geom.ProbLoArray(); +#ifdef AMREX_USE_EB + bool const inject_from_eb = plasma_injector.m_inject_from_eb; // whether to inject from EB or from a plane + // Extract data structures for embedded boundaries + amrex::FabArray const* eb_flag = nullptr; + amrex::MultiCutFab const* eb_bnd_area = nullptr; + amrex::MultiCutFab const* eb_bnd_normal = nullptr; + amrex::MultiCutFab const* eb_bnd_cent = nullptr; + if (inject_from_eb) { + amrex::EBFArrayBoxFactory const& eb_box_factory = WarpX::GetInstance().fieldEBFactory(0); + eb_flag = &eb_box_factory.getMultiEBCellFlagFab(); + eb_bnd_area = &eb_box_factory.getBndryArea(); + eb_bnd_normal = &eb_box_factory.getBndryNormal(); + eb_bnd_cent = &eb_box_factory.getBndryCent(); + } +#endif + amrex::LayoutData* cost = WarpX::getCosts(0); // Create temporary particle container to which particles will be added; @@ -1428,9 +1421,20 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, RealBox overlap_realbox; Box overlap_box; IntVect shifted; - const bool no_overlap = find_overlap_flux(tile_realbox, part_realbox, dx, problo, plasma_injector, overlap_realbox, overlap_box, shifted); - if (no_overlap) { - continue; // Go to the next tile +#ifdef AMREX_USE_EB + if (inject_from_eb) { + // Injection from EB + const amrex::FabType fab_type = (*eb_flag)[mfi].getType(tile_box); + if (fab_type == amrex::FabType::regular) { continue; } // Go to the next tile + if (fab_type == amrex::FabType::covered) { continue; } // Go to the next tile + overlap_box = tile_box; + overlap_realbox = part_realbox; + } else +#endif + { + // Injection from a plane + const bool no_overlap = find_overlap_flux(tile_realbox, part_realbox, dx, problo, plasma_injector, overlap_realbox, overlap_box, shifted); + if (no_overlap) { continue; } // Go to the next tile } const int grid_id = mfi.index(); @@ -1450,24 +1454,57 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, if (refine_injection) { fine_overlap_box = overlap_box & amrex::shift(fine_injection_box, -shifted); } + +#ifdef AMREX_USE_EB + // Extract data structures for embedded boundaries + amrex::Array4::value_type> eb_flag_arr; + amrex::Array4 eb_bnd_area_arr; + amrex::Array4 eb_bnd_normal_arr; + amrex::Array4 eb_bnd_cent_arr; + if (inject_from_eb) { + eb_flag_arr = eb_flag->array(mfi); + eb_bnd_area_arr = eb_bnd_area->array(mfi); + eb_bnd_normal_arr = eb_bnd_normal->array(mfi); + eb_bnd_cent_arr = eb_bnd_cent->array(mfi); + } +#endif + amrex::ParallelForRNG(overlap_box, [=] AMREX_GPU_DEVICE (int i, int j, int k, amrex::RandomEngine const& engine) noexcept { const IntVect iv(AMREX_D_DECL(i, j, k)); amrex::ignore_unused(j,k); - auto lo = getCellCoords(overlap_corner, dx, {0._rt, 0._rt, 0._rt}, iv); - auto hi = getCellCoords(overlap_corner, dx, {1._rt, 1._rt, 1._rt}, iv); - - if (flux_pos->overlapsWith(lo, hi)) + // Determine the number of macroparticles to inject in this cell (num_ppc_int) +#ifdef AMREX_USE_EB + amrex::Real num_ppc_real_in_this_cell = num_ppc_real; // user input: number of macroparticles per cell + if (inject_from_eb) { + // Injection from EB + // Skip cells that are not partially covered by the EB + if (eb_flag_arr(i,j,k).isRegular() || eb_flag_arr(i,j,k).isCovered()) { return; } + // Scale by the (normalized) area of the EB surface in this cell + num_ppc_real_in_this_cell *= eb_bnd_area_arr(i,j,k); + } else +#else + amrex::Real const num_ppc_real_in_this_cell = num_ppc_real; // user input: number of macroparticles per cell +#endif { - auto index = overlap_box.index(iv); - int r = 1; - if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { - r = compute_area_weights(rrfac, flux_normal_axis); - } - const int num_ppc_int = static_cast(num_ppc_real*r + amrex::Random(engine)); - pcounts[index] = num_ppc_int; + // Injection from a plane + auto lo = getCellCoords(overlap_corner, dx, {0._rt, 0._rt, 0._rt}, iv); + auto hi = getCellCoords(overlap_corner, dx, {1._rt, 1._rt, 1._rt}, iv); + // Skip cells that do not overlap with the plane + if (!flux_pos->overlapsWith(lo, hi)) { return; } } + + auto index = overlap_box.index(iv); + // Take into account refined injection region + int r = 1; + if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { + r = compute_area_weights(rrfac, flux_normal_axis); + } + const int num_ppc_int = static_cast(num_ppc_real_in_this_cell*r + amrex::Random(engine)); + pcounts[index] = num_ppc_int; + + amrex::ignore_unused(j,k); }); // Max number of new particles. All of them are created, @@ -1537,7 +1574,15 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, amrex::ignore_unused(j,k); const auto index = overlap_box.index(iv); - Real scale_fac = compute_scale_fac_area(dx, num_ppc_real, flux_normal_axis); + Real scale_fac; +#ifdef AMREX_USE_EB + if (inject_from_eb) { + scale_fac = compute_scale_fac_area_eb(dx, num_ppc_real, eb_bnd_normal_arr, i, j, k ); + } else +#endif + { + scale_fac = compute_scale_fac_area_plane(dx, num_ppc_real, flux_normal_axis); + } if (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) { scale_fac /= compute_area_weights(rrfac, flux_normal_axis); @@ -1548,13 +1593,32 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, const long ip = poffset[index] + i_part; pa_idcpu[ip] = amrex::SetParticleIDandCPU(pid+ip, cpuid); - // This assumes the flux_pos is of type InjectorPositionRandomPlane - const XDim3 r = (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) ? - // In the refined injection region: use refinement ratio `rrfac` - flux_pos->getPositionUnitBox(i_part, rrfac, engine) : - // Otherwise: use 1 as the refinement ratio - flux_pos->getPositionUnitBox(i_part, amrex::IntVect::TheUnitVector(), engine); - auto pos = getCellCoords(overlap_corner, dx, r, iv); + // Determine the position of the particle within the cell + XDim3 pos; + XDim3 r; +#ifdef AMREX_USE_EB + if (inject_from_eb) { +#if defined(WARPX_DIM_3D) + pos.x = overlap_corner[0] + (iv[0] + 0.5_rt + eb_bnd_cent_arr(i,j,k,0))*dx[0]; + pos.y = overlap_corner[1] + (iv[1] + 0.5_rt + eb_bnd_cent_arr(i,j,k,1))*dx[1]; + pos.z = overlap_corner[2] + (iv[2] + 0.5_rt + eb_bnd_cent_arr(i,j,k,2))*dx[2]; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + pos.x = overlap_corner[0] + (iv[0] + 0.5_rt + eb_bnd_cent_arr(i,j,k,0))*dx[0]; + pos.y = 0.0_rt; + pos.z = overlap_corner[1] + (iv[1] + 0.5_rt + eb_bnd_cent_arr(i,j,k,1))*dx[1]; +#endif + } else +#endif + { + // Injection from a plane + // This assumes the flux_pos is of type InjectorPositionRandomPlane + r = (fine_overlap_box.ok() && fine_overlap_box.contains(iv)) ? + // In the refined injection region: use refinement ratio `rrfac` + flux_pos->getPositionUnitBox(i_part, rrfac, engine) : + // Otherwise: use 1 as the refinement ratio + flux_pos->getPositionUnitBox(i_part, amrex::IntVect::TheUnitVector(), engine); + pos = getCellCoords(overlap_corner, dx, r, iv); + } auto ppos = PDim3(pos); // inj_mom would typically be InjectorMomentumGaussianFlux @@ -1595,6 +1659,15 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, continue; } +#ifdef AMREX_USE_EB + if (inject_from_eb) { + // Injection from EB: rotate momentum according to the normal of the EB surface + // (The above code initialized the momentum by assuming that z is the direction + // normal to the EB surface. Thus we need to rotate from z to the normal.) + rotate_momentum_eb(pu, eb_bnd_normal_arr, i, j , k); + } +#endif + #ifdef WARPX_DIM_RZ // Conversion from cylindrical to Cartesian coordinates // Replace the x and y, setting an angle theta. @@ -1610,7 +1683,11 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, const amrex::Real radial_position = ppos.x; ppos.x = radial_position*cos_theta; ppos.y = radial_position*sin_theta; - if (loc_flux_normal_axis != 2) { + if ((loc_flux_normal_axis != 2) +#ifdef AMREX_USE_EB + || (inject_from_eb) +#endif + ) { // Rotate the momentum // This because, when the flux direction is e.g. "r" // the `inj_mom` objects generates a v*Gaussian distribution From 3cda2c11e1d92ccd4b90c57debc9b399fa1978e6 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Thu, 10 Oct 2024 17:30:11 -0700 Subject: [PATCH 044/278] Avoid interpolating from guard cells in BTD (#5342) BTD diagnostics sometimes show artifacts at the edge of the range of collected data. (See for instance the red curve below.) My understanding is that this happens because the BTD collection planes use data from the guard cells outside of the simulation domain, when interpolating fields that are then used for the Lorentz back-transform. The guard cell data may not be physically correct (e.g. it may not have the right cancellation between `E` and `B`), and could thus cause this artifact. This PR avoids this issue by prevents the collection planes to collect data when it is half a cell from the edge of the simulation domain. See the example below, taken from https://github.com/ECP-WarpX/WarpX/pull/5337 (plot of the laser field, from the BTD diagnostic) ![Figure 40](https://github.com/user-attachments/assets/e4549856-4182-4a87-aa26-2d3bc6ac8e2c) The BTD diagnostics values are identical with this PR, except for the problematic point appearing at the edge of the domain. --- .../test_2d_rigid_injection_btd.json | 30 +++++++++---------- Source/Diagnostics/BTDiagnostics.cpp | 7 +++-- 2 files changed, 20 insertions(+), 17 deletions(-) diff --git a/Regression/Checksum/benchmarks_json/test_2d_rigid_injection_btd.json b/Regression/Checksum/benchmarks_json/test_2d_rigid_injection_btd.json index 90cf134201f..9e876d5c23e 100644 --- a/Regression/Checksum/benchmarks_json/test_2d_rigid_injection_btd.json +++ b/Regression/Checksum/benchmarks_json/test_2d_rigid_injection_btd.json @@ -1,22 +1,22 @@ { + "lev=0": { + "Bx": 3.719030475087696e-05, + "By": 0.004843257051761486, + "Bz": 5.522765606391185e-06, + "Ex": 1461264.5033270014, + "Ey": 11205.64142004876, + "Ez": 282020.7784731542, + "jx": 16437877.898892798, + "jy": 2492340.3149980744, + "jz": 215102423.57036853, + "rho": 0.7246235591902171 + }, "beam": { - "particle_momentum_x": 2.2080215038948936e-16, + "particle_momentum_x": 2.2080215038948934e-16, "particle_momentum_y": 2.18711072170811e-16, - "particle_momentum_z": 2.730924530737497e-15, - "particle_position_x": 0.0260823588888081, + "particle_momentum_z": 2.730924530737456e-15, + "particle_position_x": 0.026082358888808558, "particle_position_y": 0.5049438607316916, "particle_weight": 62415.090744607645 - }, - "lev=0": { - "Bx": 3.721807007218884e-05, - "By": 0.004860056238272468, - "Bz": 5.5335765596325185e-06, - "Ex": 1466447.517373168, - "Ey": 11214.10223280318, - "Ez": 283216.0961218869, - "jx": 16437877.898892513, - "jy": 2492340.3149980404, - "jz": 215102423.57036877, - "rho": 0.7246235591902177 } } \ No newline at end of file diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 631de298861..f10f337a1f1 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -999,12 +999,15 @@ BTDiagnostics::GetZSliceInDomainFlag (const int i_buffer, const int lev) { auto & warpx = WarpX::GetInstance(); const amrex::RealBox& boost_domain = warpx.Geom(lev).ProbDomain(); + const amrex::Real boost_cellsize = warpx.Geom(lev).CellSize(m_moving_window_dir); const amrex::Real buffer_zmin_lab = m_snapshot_domain_lab[i_buffer].lo( m_moving_window_dir ); const amrex::Real buffer_zmax_lab = m_snapshot_domain_lab[i_buffer].hi( m_moving_window_dir ); + // Exclude 0.5*boost_cellsize from the edge, to avoid that the interpolation to + // cell centers uses data from the guard cells. const bool slice_not_in_domain = - ( m_current_z_boost[i_buffer] <= boost_domain.lo(m_moving_window_dir) ) || - ( m_current_z_boost[i_buffer] >= boost_domain.hi(m_moving_window_dir) ) || + ( m_current_z_boost[i_buffer] <= boost_domain.lo(m_moving_window_dir) + 0.5_rt*boost_cellsize) || + ( m_current_z_boost[i_buffer] >= boost_domain.hi(m_moving_window_dir) - 0.5_rt*boost_cellsize) || ( m_current_z_lab[i_buffer] <= buffer_zmin_lab ) || ( m_current_z_lab[i_buffer] >= buffer_zmax_lab ); From dc68659c1b2d6a689f4b602bddd42f9099fb5928 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Thu, 10 Oct 2024 17:30:47 -0700 Subject: [PATCH 045/278] Update BackTransformed diagnostics to take into account arbitrary moving window velocity (#5341) In the `development` branch, the `BackTransformed` diagnostics assume that the moving window moves exactly at the speed of light. This PR generalizes the code for arbitrary moving window velocity. This PR does not add an automated test, but the upcoming PR #5337 will add a test which features a moving window with a speed different than `c`. This is a follow-up of #5226, which modified the transformation of the simulation box coordinates for arbitrary moving window velocity, but did not yet update the `BackTransformed` diagnostic code. --- Docs/source/usage/faq.rst | 4 ++-- Source/Diagnostics/BTDiagnostics.H | 1 + Source/Diagnostics/BTDiagnostics.cpp | 36 +++++++++++++++------------- 3 files changed, 22 insertions(+), 19 deletions(-) diff --git a/Docs/source/usage/faq.rst b/Docs/source/usage/faq.rst index 67cea8d6621..4ed0f8fa6af 100644 --- a/Docs/source/usage/faq.rst +++ b/Docs/source/usage/faq.rst @@ -74,10 +74,10 @@ Several BTD quantities differ slightly from the lab frame domain described in th In the following discussion, we will use a subscript input (e.g. :math:`\Delta z_{\rm input}`) to denote properties of the lab frame domain. -- The first back-transformed diagnostic (BTD) snapshot may not occur at :math:`t=0`. Rather, it occurs at :math:`t_0=\frac{z_{max}}c \beta(1+\beta)\gamma^2`. This is the first time when the boosted frame can complete the snapshot. +- The first back-transformed diagnostic (BTD) snapshot may not occur at :math:`t=0`. Rather, it occurs at :math:`t_0=\frac{z_{max}}c \beta/(1 - \beta \beta_{mw})`, where :math:`\beta_{mw}` represents the speed of the moving window. This is the first time when the boosted frame can complete the snapshot. - The grid spacing of the BTD snapshot is different from the grid spacing indicated in the input script. It is given by :math:`\Delta z_{\rm grid,snapshot}=\frac{c\Delta t_{\rm boost}}{\gamma\beta}`. For a CFL-limited time step, :math:`\Delta z_{\rm grid,snapshot}\approx \frac{1+\beta}{\beta} \Delta z_{\rm input}\approx 2 \Delta z_{\rm input}`. Hence in many common use cases at large boost, it is expected that the BTD snapshot has a grid spacing twice what is expressed in the input script. - The effective length of the BTD snapshot may be longer than anticipated from the input script because the grid spacing is different. Additionally, the number of grid points in the BTD snapshot is a multiple of ``.buffer_size`` whereas the number of grid cells specified in the input deck may not be. -- The code may require longer than anticipated to complete a BTD snapshot. The code starts filling the :math:`i^{th}` snapshot around step :math:`j_{\rm BTD start}={\rm ceil}\left( i\gamma(1-\beta)\frac{\Delta t_{\rm snapshot}}{\Delta t_{\rm boost}}\right)`. The code then saves information for one BTD cell every time step in the boosted frame simulation. The :math:`i^{th}` snapshot is completed and saved :math:`n_{z,{\rm snapshot}}=n_{\rm buffers}\cdot ({\rm buffer\ size})` time steps after it begins, which is when the effective snapshot length is covered by the simulation. +- The code may require longer than anticipated to complete a BTD snapshot. The code starts filling the :math:`i^{th}` snapshot around step :math:`j_{\rm BTD start}={\rm ceil}\left( i\gamma(1-\beta\beta_{mw})\frac{\Delta t_{\rm snapshot}}{\Delta t_{\rm boost}}\right)`. The code then saves information for one BTD cell every time step in the boosted frame simulation. The :math:`i^{th}` snapshot is completed and saved :math:`n_{z,{\rm snapshot}}=n_{\rm buffers}\cdot ({\rm buffer\ size})` time steps after it begins, which is when the effective snapshot length is covered by the simulation. What kinds of RZ output do you support? --------------------------------------- diff --git a/Source/Diagnostics/BTDiagnostics.H b/Source/Diagnostics/BTDiagnostics.H index d11db98276b..ab04f30ef18 100644 --- a/Source/Diagnostics/BTDiagnostics.H +++ b/Source/Diagnostics/BTDiagnostics.H @@ -161,6 +161,7 @@ private: * in z-direction for both 2D and 3D simulations in the Cartesian frame of reference. */ int m_moving_window_dir; + amrex::Real m_moving_window_beta; /** Number of back-transformed snapshots in the lab-frame requested by the user */ int m_num_snapshots_lab = std::numeric_limits::lowest(); diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index f10f337a1f1..312bbc7ec45 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -69,6 +69,7 @@ void BTDiagnostics::DerivedInitData () m_gamma_boost = WarpX::gamma_boost; m_beta_boost = std::sqrt( 1._rt - 1._rt/( m_gamma_boost * m_gamma_boost) ); m_moving_window_dir = WarpX::moving_window_dir; + m_moving_window_beta = WarpX::moving_window_v/PhysConst::c; // Currently, for BTD, all the data is averaged+coarsened to coarsest level // and then sliced+back-transformed+filled_to_buffer. // The number of levels to be output is nlev_output. @@ -138,7 +139,7 @@ void BTDiagnostics::DerivedInitData () const int lev = 0; const amrex::Real dt_boosted_frame = warpx.getdt(lev); const int moving_dir = WarpX::moving_window_dir; - const amrex::Real Lz_lab = warpx.Geom(lev).ProbLength(moving_dir) / WarpX::gamma_boost / (1._rt+WarpX::beta_boost); + const amrex::Real Lz_lab = warpx.Geom(lev).ProbLength(moving_dir) * WarpX::gamma_boost * (1._rt - WarpX::beta_boost*m_moving_window_beta); const int ref_ratio = 1; const amrex::Real dz_snapshot_grid = dz_lab(dt_boosted_frame, ref_ratio); // Need enough buffers so the snapshot length is longer than the lab frame length @@ -149,22 +150,21 @@ void BTDiagnostics::DerivedInitData () // the final snapshot starts filling when the // right edge of the moving window intersects the final snapshot // time of final snapshot : t_sn = t0 + i*dt_snapshot - // where t0 is the time of first BTD snapshot, t0 = zmax / c * beta / (1-beta) + // where t0 is the time of first BTD snapshot, t0 = zmax / c * beta / (1-beta*beta_mw) // // the right edge of the moving window at the time of the final snapshot // has space time coordinates - // time t_intersect = t_sn, position z_intersect=zmax + c*t_sn + // time t_intersect = t_sn, position z_intersect=zmax + v_mw*t_sn // the boosted time of this space time pair is // t_intersect_boost = gamma * (t_intersect - beta * z_intersect_boost/c) - // = gamma * (t_sn * (1 - beta) - beta * zmax / c) - // = gamma * (zmax*beta/c + i*dt_snapshot*(1-beta) - beta*zmax/c) - // = gamma * i * dt_snapshot * (1-beta) - // = i * dt_snapshot / gamma / (1+beta) + // = gamma * (t_sn * (1 - beta*beta_mw) - beta * zmax / c) + // = gamma * (zmax*beta/c + i*dt_snapshot*(1-beta*beta_mw) - beta*zmax/c) + // = gamma * (1-beta*beta_mw) * i * dt_snapshot // // if j = final snapshot starting step, then we want to solve - // j dt_boosted_frame >= t_intersect_boost = i * dt_snapshot / gamma / (1+beta) - // j >= i / gamma / (1+beta) * dt_snapshot / dt_boosted_frame - const int final_snapshot_starting_step = static_cast(std::ceil(final_snapshot_iteration / WarpX::gamma_boost / (1._rt+WarpX::beta_boost) * m_dt_snapshots_lab / dt_boosted_frame)); + // j dt_boosted_frame >= t_intersect_boost = i * gamma * (1-beta*beta_mw) * dt_snapshot + // j >= i * gamma * (1-beta*beta_mw) * dt_snapshot / dt_boosted_frame + const int final_snapshot_starting_step = static_cast(std::ceil(final_snapshot_iteration * WarpX::gamma_boost * (1._rt - WarpX::beta_boost*m_moving_window_beta) * m_dt_snapshots_lab / dt_boosted_frame)); const int final_snapshot_fill_iteration = final_snapshot_starting_step + num_buffers * m_buffer_size - 1; const amrex::Real final_snapshot_fill_time = final_snapshot_fill_iteration * dt_boosted_frame; if (WarpX::compute_max_step_from_btd) { @@ -256,7 +256,7 @@ BTDiagnostics::ReadParameters () bool snapshot_interval_is_specified = utils::parser::queryWithParser( pp_diag_name, "dt_snapshots_lab", m_dt_snapshots_lab); if ( utils::parser::queryWithParser(pp_diag_name, "dz_snapshots_lab", m_dz_snapshots_lab) ) { - m_dt_snapshots_lab = m_dz_snapshots_lab/PhysConst::c; + m_dt_snapshots_lab = m_dz_snapshots_lab/WarpX::moving_window_v; snapshot_interval_is_specified = true; } WARPX_ALWAYS_ASSERT_WITH_MESSAGE(snapshot_interval_is_specified, @@ -338,13 +338,15 @@ BTDiagnostics::InitializeBufferData ( int i_buffer , int lev, bool restart) // When restarting boosted simulations, the code below needs to take // into account the fact that the position of the box at the beginning // of the simulation, is not the one that we had at t=0 (because of the moving window) - const amrex::Real boosted_moving_window_v = (WarpX::moving_window_v - m_beta_boost*PhysConst::c) - / (1._rt - m_beta_boost * WarpX::moving_window_v/PhysConst::c); + const amrex::Real boosted_moving_window_v = (m_moving_window_beta - m_beta_boost) + / (1._rt - m_beta_boost*m_moving_window_beta); // Lab-frame time for the i^th snapshot if (!restart) { - const amrex::Real zmax_0 = warpx.Geom(lev).ProbHi(m_moving_window_dir); + const amrex::Real zmax_boost = warpx.Geom(lev).ProbHi(m_moving_window_dir); m_t_lab.at(i_buffer) = m_intervals.GetBTDIteration(i_buffer) * m_dt_snapshots_lab - + m_gamma_boost*m_beta_boost*zmax_0/PhysConst::c; + + m_gamma_boost*m_beta_boost*zmax_boost/PhysConst::c; + // Note: gamma_boost*beta_boost*zmax_boost is equal to + // beta_boost*zmax_lab/(1-beta_boost*beta_moving_window) } // Define buffer domain in boosted frame at level, lev, with user-defined lo and hi @@ -403,9 +405,9 @@ BTDiagnostics::InitializeBufferData ( int i_buffer , int lev, bool restart) // Define buffer_domain in lab-frame for the i^th snapshot. // Replace z-dimension with lab-frame co-ordinates. const amrex::Real zmin_buffer_lab = ( diag_dom.lo(m_moving_window_dir) - boosted_moving_window_v * warpx.gett_new(0) ) - / ( (1.0_rt + m_beta_boost) * m_gamma_boost); + * (1.0_rt - m_beta_boost*m_moving_window_beta) * m_gamma_boost; const amrex::Real zmax_buffer_lab = ( diag_dom.hi(m_moving_window_dir) - boosted_moving_window_v * warpx.gett_new(0) ) - / ( (1.0_rt + m_beta_boost) * m_gamma_boost); + * (1.0_rt - m_beta_boost*m_moving_window_beta) * m_gamma_boost; // Initialize buffer counter and z-positions of the i^th snapshot in // boosted-frame and lab-frame From d3711661dc54700183d8a096d702650060ee030c Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Fri, 11 Oct 2024 11:33:29 -0700 Subject: [PATCH 046/278] Add WarpX example for FEL simulation (#5337) This adds an example for how to run FEL simulations with the boosted-frame technique. https://warpx--5337.org.readthedocs.build/en/5337/usage/examples/free_electron_laser/README.html --------- Co-authored-by: Brian Naranjo Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- Docs/source/refs.bib | 35 +++++ Docs/source/usage/examples.rst | 2 +- .../source/usage/examples/free_electron_laser | 1 + Docs/source/usage/parameters.rst | 2 +- Examples/Physics_applications/CMakeLists.txt | 1 + .../free_electron_laser/CMakeLists.txt | 12 ++ .../free_electron_laser/README.rst | 46 ++++++ .../free_electron_laser/analysis_fel.py | 145 ++++++++++++++++++ .../free_electron_laser/inputs_test_1d_fel | 92 +++++++++++ .../free_electron_laser/plot_sim.py | 52 +++++++ .../Checksum/benchmarks_json/test_1d_fel.json | 31 ++++ 11 files changed, 417 insertions(+), 2 deletions(-) create mode 120000 Docs/source/usage/examples/free_electron_laser create mode 100644 Examples/Physics_applications/free_electron_laser/CMakeLists.txt create mode 100644 Examples/Physics_applications/free_electron_laser/README.rst create mode 100755 Examples/Physics_applications/free_electron_laser/analysis_fel.py create mode 100644 Examples/Physics_applications/free_electron_laser/inputs_test_1d_fel create mode 100644 Examples/Physics_applications/free_electron_laser/plot_sim.py create mode 100644 Regression/Checksum/benchmarks_json/test_1d_fel.json diff --git a/Docs/source/refs.bib b/Docs/source/refs.bib index 130e0ce4da7..5bbaf633179 100644 --- a/Docs/source/refs.bib +++ b/Docs/source/refs.bib @@ -444,3 +444,38 @@ @article{Vranic2015 issn = {0010-4655}, doi = {https://doi.org/10.1016/j.cpc.2015.01.020}, } + +@misc{Fallahi2020, + title={MITHRA 2.0: A Full-Wave Simulation Tool for Free Electron Lasers}, + author={Arya Fallahi}, + year={2020}, + eprint={2009.13645}, + archivePrefix={arXiv}, + primaryClass={physics.acc-ph}, + url={https://arxiv.org/abs/2009.13645}, +} + +@article{VayFELA2009, + title = {FULL ELECTROMAGNETIC SIMULATION OF FREE-ELECTRON LASER AMPLIFIER PHYSICS VIA THE LORENTZ-BOOSTED FRAME APPROACH}, + author = {Fawley, William M and Vay, Jean-Luc}, + abstractNote = {Numerical simulation of some systems containing charged particles with highly relativistic directed motion can by speeded up by orders of magnitude by choice of the proper Lorentz-boosted frame[1]. A particularly good example is that of short wavelength free-electron lasers (FELs) in which a high energy electron beam interacts with a static magnetic undulator. In the optimal boost frame with Lorentz factor gamma_F , the red-shifted FEL radiation and blue shifted undulator have identical wavelengths and the number of required time-steps (presuming the Courant condition applies) decreases by a factor of 2(gamma_F)**2 for fully electromagnetic simulation. We have adapted the WARP code [2]to apply this method to several FEL problems involving coherent spontaneous emission (CSE) from pre-bunched ebeams, including that in a biharmonic undulator.}, + url = {https://www.osti.gov/biblio/964405}, + place = {United States}, + year = {2009}, + month = {4}, +} + +@article{VayFELB2009, + author = {Fawley, W. M. and Vay, J.‐L.}, + title = "{Use of the Lorentz‐Boosted Frame Transformation to Simulate Free‐Electron Laser Amplifier Physics}", + journal = {AIP Conference Proceedings}, + volume = {1086}, + number = {1}, + pages = {346-350}, + year = {2009}, + month = {01}, + abstract = "{Recently [1] it has been pointed out that numerical simulation of some systems containing charged particles with highly relativistic directed motion can by speeded up by orders of magnitude by choice of the proper Lorentz boosted frame. A particularly good example is that of short wavelength free‐electron lasers (FELs) in which a high energy (E0⩾250 MeV) electron beam interacts with a static magnetic undulator. In the optimal boost frame with Lorentz factor γF, the red‐shifted FEL radiation and blue shifted undulator have identical wavelengths and the number of required time‐steps (presuming the Courant condition applies) decreases by a factor of γF2 for fully electromagnetic simulation.We have adapted the WARP code [2] to apply this method to several FEL problems including coherent spontaneous emission (CSE) from pre‐bunched e‐beams, and strong exponential gain in a single pass amplifier configuration. We discuss our results and compare with those from the “standard” FEL simulation approach which adopts the eikonal approximation for propagation of the radiation field.}", + issn = {0094-243X}, + doi = {10.1063/1.3080930}, + url = {https://doi.org/10.1063/1.3080930}, +} diff --git a/Docs/source/usage/examples.rst b/Docs/source/usage/examples.rst index f1bd2ec4266..237c10ab5fb 100644 --- a/Docs/source/usage/examples.rst +++ b/Docs/source/usage/examples.rst @@ -44,7 +44,7 @@ Particle Accelerator & Beam Physics examples/gaussian_beam/README.rst examples/beam_beam_collision/README.rst - + examples/free_electron_laser/README.rst High Energy Astrophysical Plasma Physics ---------------------------------------- diff --git a/Docs/source/usage/examples/free_electron_laser b/Docs/source/usage/examples/free_electron_laser new file mode 120000 index 00000000000..1ce0fedd798 --- /dev/null +++ b/Docs/source/usage/examples/free_electron_laser @@ -0,0 +1 @@ +../../../../Examples/Physics_applications/free_electron_laser \ No newline at end of file diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 5014d421fb8..a6ba9a2773d 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -257,7 +257,7 @@ Overall simulation parameters ``warpx.self_fields_absolute_tolerance``). * ``fft``: Poisson's equation is solved using an Integrated Green Function method (which requires FFT calculations). - See these references for more details :cite:t:`QiangPhysRevSTAB2006`, :cite:t:`QiangPhysRevSTAB2006err`. + See these references for more details :cite:t:`param-QiangPhysRevSTAB2006`, :cite:t:`param-QiangPhysRevSTAB2006err`. It only works in 3D and it requires the compilation flag ``-DWarpX_FFT=ON``. If mesh refinement is enabled, this solver only works on the coarsest level. On the refined patches, the Poisson equation is solved with the multigrid solver. diff --git a/Examples/Physics_applications/CMakeLists.txt b/Examples/Physics_applications/CMakeLists.txt index e4f8565a140..7f0f0ecfaf7 100644 --- a/Examples/Physics_applications/CMakeLists.txt +++ b/Examples/Physics_applications/CMakeLists.txt @@ -3,6 +3,7 @@ add_subdirectory(beam_beam_collision) add_subdirectory(capacitive_discharge) +add_subdirectory(free_electron_laser) add_subdirectory(laser_acceleration) add_subdirectory(laser_ion) add_subdirectory(plasma_acceleration) diff --git a/Examples/Physics_applications/free_electron_laser/CMakeLists.txt b/Examples/Physics_applications/free_electron_laser/CMakeLists.txt new file mode 100644 index 00000000000..f5bc8d857d2 --- /dev/null +++ b/Examples/Physics_applications/free_electron_laser/CMakeLists.txt @@ -0,0 +1,12 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_1d_fel # name + 1 # dims + 2 # nprocs + inputs_test_1d_fel # inputs + analysis_fel.py # analysis + diags/diag_labframe # output + OFF # dependency +) diff --git a/Examples/Physics_applications/free_electron_laser/README.rst b/Examples/Physics_applications/free_electron_laser/README.rst new file mode 100644 index 00000000000..00d6ef2758c --- /dev/null +++ b/Examples/Physics_applications/free_electron_laser/README.rst @@ -0,0 +1,46 @@ +.. _examples-free-electron-laser: + +Free-electron laser +=================== + +This example shows how to simulate the physics of a free-electron laser (FEL) using WarpX. +In this example, a relativistic electron beam is sent through an undulator (represented by an external, +oscillating magnetic field). The radiation emitted by the beam grows exponentially +as the beam travels through the undulator, due to the Free-Electron-Laser instability. + +The parameters of the simulation are taken from section 5.1 of :cite:t:`ex-Fallahi2020`. + +The simulation is performed in 1D, and uses the boosted-frame technique as described in +:cite:t:`ex-VayFELA2009` and :cite:t:`ex-VayFELB2009` to reduce the computational cost (the Lorentz frame of the simulation is moving at the average speed of the beam in the undulator). +Even though the simulation is run in this boosted frame, the results are reconstructed in the +laboratory frame, using WarpX's ``BackTransformed`` diagnostic. + +The effect of space-charge is intentionally turned off in this example, as it may not be properly modeled in 1D. +This is achieved by initializing two species of opposite charge (electrons and positrons) to +represent the physical electron beam, as discussed in :cite:t:`ex-VayFELB2009`. + +Run +--- + +This example can be run with the WarpX executable using an input file: ``warpx.1d inputs_test_1d_fel``. For `MPI-parallel `__ runs, prefix these lines with ``mpiexec -n 4 ...`` or ``srun -n 4 ...``, depending on the system. + +.. literalinclude:: inputs_test_1d_fel + :language: ini + :caption: You can copy this file from ``Examples/Physics_applications/free_electron_laser/inputs_test_1d_fel``. + +Visualize +--------- + +The figure below shows the results of the simulation. The left panel shows the exponential growth of the radiation along the undulator (note that the vertical axis is plotted in log scale). The right panel shows a snapshot of the simulation, +1.6 m into the undulator. Microbunching of the beam is visible in the electron density (blue). One can also see the +emitted FEL radiation (red) slipping ahead of the beam. + +.. figure:: https://gist.githubusercontent.com/RemiLehe/871a1e24c69e353c5dbb4625cd636cd1/raw/7f4e3da7e0001cff6c592190fee8622580bbe37a/FEL.png + :alt: Results of the WarpX FEL simulation. + :width: 100% + +This figure was obtained with the script below, which can be run with ``python3 plot_sim.py``. + +.. literalinclude:: plot_sim.py + :language: ini + :caption: You can copy this file from ``Examples/Physics_applications/free_electron_laser/plot_sim.py``. diff --git a/Examples/Physics_applications/free_electron_laser/analysis_fel.py b/Examples/Physics_applications/free_electron_laser/analysis_fel.py new file mode 100755 index 00000000000..3ab80d195c0 --- /dev/null +++ b/Examples/Physics_applications/free_electron_laser/analysis_fel.py @@ -0,0 +1,145 @@ +#!/usr/bin/env python + +""" +This script tests that the FEL is correctly modelled in the simulation. + +The physical parameters are the same as the ones from section 5.1 +of https://arxiv.org/pdf/2009.13645 + +The simulation uses the boosted-frame technique as described in +https://www.osti.gov/servlets/purl/940581 +In particular, the effect of space-charge is effectively turned off +by initializing an electron and positron beam on top of each other, +each having half the current of the physical beam. + +The script checks that the radiation wavelength and gain length +are the expected ones. The check is performed both in the +lab-frame diagnostics and boosted-frame diagnostics. +""" + +import os +import sys + +import numpy as np +from openpmd_viewer import OpenPMDTimeSeries +from scipy.constants import c, e, m_e + +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum + +# Physical parameters of the test +gamma_bunch = 100.6 +Bu = 0.5 +lambda_u = 3e-2 +k_u = 2 * np.pi / lambda_u +K = e * Bu / (m_e * c * k_u) # Undulator parameter +gamma_boost = ( + gamma_bunch / (1 + K * K / 2) ** 0.5 +) # Lorentz factor of the ponderomotive frame +beta_boost = (1 - 1.0 / gamma_boost**2) ** 0.5 + + +# Analyze the diagnostics showing quantities in the lab frame +filename = sys.argv[1] +ts_lab = OpenPMDTimeSeries(filename) + + +# Extract the growth of the peak electric field +def extract_peak_E_lab(iteration): + """ + Extract the position of the peak electric field + """ + Ex, info = ts_lab.get_field("E", "x", iteration=iteration) + Ex_max = abs(Ex).max() + z_max = info.z[abs(Ex).argmax()] + return z_max, Ex_max + + +# Loop through all iterations +# Since the radiation power is proportional to the square of the peak electric field, +# the log of the power is equal to the log of the square of the peak electric field, +# up to an additive constant. +z_lab_peak, E_lab_peak = ts_lab.iterate(extract_peak_E_lab) +log_P_peak = np.log(E_lab_peak**2) + +# Pick the iterations between which the growth of the log of the power is linear +# (i.e. the growth of the power is exponential) and fit a line to extract the +# gain length. +i_start = 6 +i_end = 23 +# Perform linear fit +p = np.polyfit(z_lab_peak[i_start:i_end], log_P_peak[i_start:i_end], 1) +# Extract the gain length +Lg = 1 / p[0] +Lg_expected = 0.22 # Expected gain length from https://arxiv.org/pdf/2009.13645 +print(f"Gain length: {Lg}") +assert abs(Lg - Lg_expected) / Lg_expected < 0.15 + +# Check that the radiation wavelength is the expected one +iteration_check = 14 +Ex, info = ts_lab.get_field("E", "x", iteration=iteration_check) +Nz = len(info.z) +fft_E = abs(np.fft.fft(Ex)) +lambd = 1.0 / np.fft.fftfreq(Nz, d=info.dz) +lambda_radiation_lab = lambd[fft_E[: Nz // 2].argmax()] +lambda_expected = lambda_u / (2 * gamma_boost**2) +print(f"lambda_radiation_lab: {lambda_radiation_lab}") +print(f"lambda_expected: {lambda_expected}") +assert abs(lambda_radiation_lab - lambda_expected) / lambda_expected < 0.01 + +# Analyze the diagnostics showing quantities in the boosted frame +ts = OpenPMDTimeSeries("diags/diag_boostedframe") + + +# Extract the growth of the peak electric field +def extract_peak_E_boost(iteration): + """ + Extract the peak electric field in a *boosted-frame* snapshot. + Also return the position of the peak in the lab frame. + """ + Ex, info = ts.get_field("E", "x", iteration=iteration) + By, info = ts.get_field("B", "y", iteration=iteration) + E_lab = gamma_boost * (Ex + c * beta_boost * By) + E_lab_peak = abs(E_lab).max() + z_boost_peak = info.z[abs(E_lab).argmax()] + t_boost_peak = ts.current_t + z_lab_peak = gamma_boost * (z_boost_peak + beta_boost * c * t_boost_peak) + return z_lab_peak, E_lab_peak + + +# Loop through all iterations +z_lab_peak, E_lab_peak = ts.iterate(extract_peak_E_boost) +log_P_peak = np.log(E_lab_peak**2) + +# Pick the iterations between which the growth of the log of the power is linear +# (i.e. the growth of the power is exponential) and fit a line to extract the +# gain length. +i_start = 16 +i_end = 25 +# Perform linear fit +p = np.polyfit(z_lab_peak[i_start:i_end], log_P_peak[i_start:i_end], 1) +# Extract the gain length +Lg = 1 / p[0] +Lg_expected = 0.22 # Expected gain length from https://arxiv.org/pdf/2009.13645 +print(f"Gain length: {Lg}") +assert abs(Lg - Lg_expected) / Lg_expected < 0.15 + +# Check that the radiation wavelength is the expected one +iteration_check = 2000 +Ex, info = ts.get_field("E", "x", iteration=iteration_check) +By, info = ts.get_field("B", "y", iteration=iteration_check) +E_lab = gamma_boost * (Ex + c * beta_boost * By) +Nz = len(info.z) +fft_E = abs(np.fft.fft(E_lab)) +lambd = 1.0 / np.fft.fftfreq(Nz, d=info.dz) +lambda_radiation_boost = lambd[fft_E[: Nz // 2].argmax()] +lambda_radiation_lab = lambda_radiation_boost / (2 * gamma_boost) +lambda_expected = lambda_u / (2 * gamma_boost**2) +assert abs(lambda_radiation_lab - lambda_expected) / lambda_expected < 0.01 + +# compare checksums +evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + output_format="openpmd", +) diff --git a/Examples/Physics_applications/free_electron_laser/inputs_test_1d_fel b/Examples/Physics_applications/free_electron_laser/inputs_test_1d_fel new file mode 100644 index 00000000000..79fdadab8ae --- /dev/null +++ b/Examples/Physics_applications/free_electron_laser/inputs_test_1d_fel @@ -0,0 +1,92 @@ +my_constants.gamma_bunch=100.6 +my_constants.Bu = 0.5 +my_constants.lambda_u = 3e-2 +my_constants.k_u= 2*pi/lambda_u +my_constants.K = q_e*Bu/(m_e*clight*k_u) # Undulator parameter + +warpx.gamma_boost = gamma_bunch/sqrt(1+K*K/2) # Lorentz factor of the ponderomotive frame +warpx.boost_direction = z +algo.maxwell_solver = yee +algo.particle_shape = 2 +algo.particle_pusher = vay + +# geometry +geometry.dims = 1 +geometry.prob_hi = 0 +geometry.prob_lo = -192e-6 + +amr.max_grid_size = 1024 +amr.max_level = 0 +amr.n_cell = 1024 + +# boundary +boundary.field_hi = absorbing_silver_mueller +boundary.field_lo = absorbing_silver_mueller +boundary.particle_hi = absorbing +boundary.particle_lo = absorbing + +# diagnostics +diagnostics.diags_names = diag_labframe diag_boostedframe + +# Diagnostic that show quantities in the frame +# of the simulation (boosted-frame) +diag_boostedframe.diag_type = Full +diag_boostedframe.format = openpmd +diag_boostedframe.intervals = 100 + +# Diagnostic that show quantities +# reconstructed in the lab frame +diag_labframe.diag_type = BackTransformed +diag_labframe.num_snapshots_lab = 25 +diag_labframe.dz_snapshots_lab = 0.1 +diag_labframe.format = openpmd +diag_labframe.buffer_size = 64 + +# Run the simulation long enough for +# all backtransformed diagnostic to be complete +warpx.compute_max_step_from_btd = 1 + +particles.species_names = electrons positrons +particles.rigid_injected_species= electrons positrons + +electrons.charge = -q_e +electrons.injection_style = nuniformpercell +electrons.mass = m_e +electrons.momentum_distribution_type = constant +electrons.num_particles_per_cell_each_dim = 8 +electrons.profile = constant +electrons.density = 2.7e19/2 +electrons.ux = 0.0 +electrons.uy = 0.0 +electrons.uz = gamma_bunch +electrons.zmax = -25e-6 +electrons.zmin = -125e-6 +electrons.zinject_plane=0.0 +electrons.rigid_advance=0 + +positrons.charge = q_e +positrons.injection_style = nuniformpercell +positrons.mass = m_e +positrons.momentum_distribution_type = constant +positrons.num_particles_per_cell_each_dim = 8 +positrons.profile = constant +positrons.density = 2.7e19/2 +positrons.ux = 0.0 +positrons.uy = 0.0 +positrons.uz = gamma_bunch +positrons.zmax = -25e-6 +positrons.zmin = -125e-6 +positrons.zinject_plane=0.0 +positrons.rigid_advance=0 + +warpx.do_moving_window = 1 +warpx.moving_window_dir = z +warpx.moving_window_v = sqrt(1-(1+K*K/2)/(gamma_bunch*gamma_bunch)) + +# Undulator field +particles.B_ext_particle_init_style = parse_B_ext_particle_function +particles.Bx_external_particle_function(x,y,z,t) = 0 +particles.By_external_particle_function(x,y,z,t) = if( z>0, Bu*cos(k_u*z), 0 ) +particles.Bz_external_particle_function(x,y,z,t) =0.0 + +warpx.cfl = 0.99 diff --git a/Examples/Physics_applications/free_electron_laser/plot_sim.py b/Examples/Physics_applications/free_electron_laser/plot_sim.py new file mode 100644 index 00000000000..e7635d65790 --- /dev/null +++ b/Examples/Physics_applications/free_electron_laser/plot_sim.py @@ -0,0 +1,52 @@ +import matplotlib.pyplot as plt +from openpmd_viewer import OpenPMDTimeSeries + +ts = OpenPMDTimeSeries("./diags/diag_labframe/") + + +def extract_peak_E(iteration): + """ + Extract peak electric field and its position + """ + Ex, info = ts.get_field("E", "x", iteration=iteration) + Ex_max = abs(Ex).max() + z_max = info.z[abs(Ex).argmax()] + return z_max, Ex_max + + +# Loop through the lab-frame snapshots and extract the peak electric field +z_max, Ex_max = ts.iterate(extract_peak_E) + +# Create a figure +plt.figure(figsize=(8, 4)) + +# Plot of the E field growth +plt.subplot(121) # Span all rows in the first column +plt.semilogy(z_max, Ex_max) +plt.ylim(2e7, 2e9) +plt.xlabel("z (m)") +plt.ylabel("Peak $E_x$ (V/m)") +plt.title("Growth of the radiation field\n along the undulator") + +# Plots of snapshot +iteration = 16 +plt.subplot(122) # Upper right panel + + +plt.ylabel("$E_x$ (V/m)") +plt.xlabel("") +ts.get_particle(["z"], iteration=iteration, nbins=300, species="electrons", plot=True) +plt.title("") +plt.ylim(0, 30e12) +plt.ylabel("Electron density (a. u.)", color="b") +plt.twinx() +Ex, info = ts.get_field("E", "x", iteration=iteration, plot=True) +plt.ylabel("$E_x$ (V/m)", color="r") +plt.plot(info.z, Ex, color="r") +plt.ylim(-0.6e9, 0.4e9) +plt.xlabel("z (m)") +plt.title("Snapshot 1.6 m into the undulator") + +plt.tight_layout() + +plt.savefig("FEL.png") diff --git a/Regression/Checksum/benchmarks_json/test_1d_fel.json b/Regression/Checksum/benchmarks_json/test_1d_fel.json new file mode 100644 index 00000000000..2bd9c1fad80 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_1d_fel.json @@ -0,0 +1,31 @@ +{ + "lev=0": { + "Bx": 0.0, + "By": 514.5044890273722, + "Bz": 0.0, + "Ex": 154245109024.33972, + "Ey": 0.0, + "Ez": 0.0, + "jx": 1161126105.5594487, + "jy": 0.0, + "jz": 0.0 + }, + "electrons": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 13607.569953355982, + "particle_momentum_x": 3.095483353687591e-19, + "particle_momentum_y": 0.0, + "particle_momentum_z": 1.5419514460764825e-16, + "particle_weight": 1349823909946836.0 + }, + "positrons": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 13607.569953355982, + "particle_momentum_x": 3.095483353687591e-19, + "particle_momentum_y": 0.0, + "particle_momentum_z": 1.5419514460764825e-16, + "particle_weight": 1349823909946836.0 + } +} \ No newline at end of file From baf32f3731499f20da78afd2ffe6afb9c76631fb Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 11 Oct 2024 14:26:10 -0700 Subject: [PATCH 047/278] CI: skip build/test jobs if only `Docs/` is modified (#5387) Could we do this to make sure that we run the GitHub Actions and Azure jobs (build, test) only if _at least one file outside the_ `Docs` _directory_ is modified, i.e., skip those jobs if only files in the `Docs` directory are modified? I think it would be safe to do so (and a bit of a waste of resources to not do so...), but I leave it open for discussion. If merged, we could test this rebasing #5386 and seeing if the correct CI jobs are skipped. Note that this PR leaves the other CI jobs untouched, e.g., `source`, `docs`, `CodeQL`, etc. --- .azure-pipelines.yml | 3 +++ .github/workflows/clang_sanitizers.yml | 2 ++ .github/workflows/clang_tidy.yml | 2 ++ .github/workflows/cuda.yml | 2 ++ .github/workflows/hip.yml | 2 ++ .github/workflows/insitu.yml | 2 ++ .github/workflows/intel.yml | 2 ++ .github/workflows/macos.yml | 2 ++ .github/workflows/ubuntu.yml | 2 ++ .github/workflows/windows.yml | 2 ++ 10 files changed, 21 insertions(+) diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index a32ecb8fa24..607edecedd6 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -9,6 +9,9 @@ pr: branches: include: - development + paths: + exclude: + - Docs jobs: - job: diff --git a/.github/workflows/clang_sanitizers.yml b/.github/workflows/clang_sanitizers.yml index 067488911bb..e89cb676a03 100644 --- a/.github/workflows/clang_sanitizers.yml +++ b/.github/workflows/clang_sanitizers.yml @@ -5,6 +5,8 @@ on: branches: - "development" pull_request: + paths-ignore: + - "Docs/**" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-clangsanitizers diff --git a/.github/workflows/clang_tidy.yml b/.github/workflows/clang_tidy.yml index 9088e3af134..edb3e8b1988 100644 --- a/.github/workflows/clang_tidy.yml +++ b/.github/workflows/clang_tidy.yml @@ -5,6 +5,8 @@ on: branches: - "development" pull_request: + paths-ignore: + - "Docs/**" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-clangtidy diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 2209f425d1f..a4fc4e49ace 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -5,6 +5,8 @@ on: branches: - "development" pull_request: + paths-ignore: + - "Docs/**" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-cuda diff --git a/.github/workflows/hip.yml b/.github/workflows/hip.yml index 12513caa19a..8ba39de7742 100644 --- a/.github/workflows/hip.yml +++ b/.github/workflows/hip.yml @@ -5,6 +5,8 @@ on: branches: - "development" pull_request: + paths-ignore: + - "Docs/**" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-hip diff --git a/.github/workflows/insitu.yml b/.github/workflows/insitu.yml index 0cc6a1ced5e..50b482d28d3 100644 --- a/.github/workflows/insitu.yml +++ b/.github/workflows/insitu.yml @@ -5,6 +5,8 @@ on: branches: - "development" pull_request: + paths-ignore: + - "Docs/**" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-insituvis diff --git a/.github/workflows/intel.yml b/.github/workflows/intel.yml index f27181c2e20..170008d0672 100644 --- a/.github/workflows/intel.yml +++ b/.github/workflows/intel.yml @@ -5,6 +5,8 @@ on: branches: - "development" pull_request: + paths-ignore: + - "Docs/**" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-intel diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 0afaf6ea451..069567d39ec 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -5,6 +5,8 @@ on: branches: - "development" pull_request: + paths-ignore: + - "Docs/**" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-macos diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index 6435ed7e66a..bbe20679781 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -5,6 +5,8 @@ on: branches: - "development" pull_request: + paths-ignore: + - "Docs/**" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-ubuntu diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 1d8b0fd0495..ae4843e0536 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -5,6 +5,8 @@ on: branches: - "development" pull_request: + paths-ignore: + - "Docs/**" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-windows From f55687979867c642f25d6a044d0acbcc3cfc343d Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 11 Oct 2024 16:59:40 -0700 Subject: [PATCH 048/278] `PYBIND11_FINDPYTHON=ON` (#5390) Reuse our `find_package(Python ...)` call and use new CMake logic in pybind11. https://pybind11.readthedocs.io/en/stable/compiling.html#modules-with-cmake https://cmake.org/cmake/help/latest/command/find_package.html#config-mode-version-selection X-ref: https://github.com/openPMD/openPMD-api/pull/1677#issuecomment-2407767112 Fix #5159 Signed-off-by: Axel Huebl --- CMakeLists.txt | 2 +- cmake/dependencies/pybind11.cmake | 5 +++++ setup.py | 8 ++++++++ 3 files changed, 14 insertions(+), 1 deletion(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index 980b23183fd..c08c72489cb 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -254,7 +254,7 @@ endif() # Python if(WarpX_PYTHON) - find_package(Python COMPONENTS Interpreter Development.Module REQUIRED) + find_package(Python 3.8 COMPONENTS Interpreter Development.Module REQUIRED) # default installation directories: Python warpx_set_default_install_dirs_python() diff --git a/cmake/dependencies/pybind11.cmake b/cmake/dependencies/pybind11.cmake index 94d38e69112..50b00013f7a 100644 --- a/cmake/dependencies/pybind11.cmake +++ b/cmake/dependencies/pybind11.cmake @@ -10,6 +10,11 @@ function(find_pybind11) message(STATUS "pybind11 repository: ${WarpX_pybind11_repo} (${WarpX_pybind11_branch})") include(FetchContent) endif() + + # rely on our find_package(Python ...) call + # https://pybind11.readthedocs.io/en/stable/compiling.html#modules-with-cmake + set(PYBIND11_FINDPYTHON ON) + if(WarpX_pybind11_internal OR WarpX_pybind11_src) set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) diff --git a/setup.py b/setup.py index 9683c8ab675..f2bc72ff386 100644 --- a/setup.py +++ b/setup.py @@ -84,7 +84,15 @@ def build_extension(self, ext): r_dim = re.search(r"warpx_(1|2|rz|3)(?:d*)", ext.name) dims = r_dim.group(1).upper() + pyv = sys.version_info cmake_args = [ + # Python: use the calling interpreter in CMake + # https://cmake.org/cmake/help/latest/module/FindPython.html#hints + # https://cmake.org/cmake/help/latest/command/find_package.html#config-mode-version-selection + f"-DPython_ROOT_DIR={sys.prefix}", + f"-DPython_FIND_VERSION={pyv.major}.{pyv.minor}.{pyv.micro}", + "-DPython_FIND_VERSION_EXACT=TRUE", + "-DPython_FIND_STRATEGY=LOCATION", "-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=" + os.path.join(extdir, "pywarpx"), "-DCMAKE_RUNTIME_OUTPUT_DIRECTORY=" + extdir, "-DWarpX_DIMS=" + dims, From 61e870603c06b273724b505d62953853e989c2f4 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 14 Oct 2024 14:52:34 -0700 Subject: [PATCH 049/278] HPC: Rename `SRUN_CPUS_PER_TASK` to `SLURM_...` (#5340) This environment variable was used for Perlmutter when `--cpus-per-task=N` did not work yet. It was copied around to other templates. These days, `--cpus-per-task` should work and the name of the env variable was renamed in SLURM to `SLURM_CPUS_PER_TASK`. https://slurm.schedmd.com/sbatch.html#OPT_SLURM_CPUS_PER_TASK Thanks to NERSC engineers for reporting this update! --- Tools/machines/greatlakes-umich/greatlakes_v100.sbatch | 3 +-- Tools/machines/karolina-it4i/karolina_gpu.sbatch | 7 +++---- Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch | 3 ++- Tools/machines/perlmutter-nersc/perlmutter_cpu.sbatch | 5 +++-- Tools/machines/perlmutter-nersc/perlmutter_gpu.sbatch | 4 ++-- Tools/machines/tioga-llnl/tioga_mi300a.sbatch | 4 ++-- 6 files changed, 13 insertions(+), 13 deletions(-) diff --git a/Tools/machines/greatlakes-umich/greatlakes_v100.sbatch b/Tools/machines/greatlakes-umich/greatlakes_v100.sbatch index 0353c08456f..4814c439dd9 100644 --- a/Tools/machines/greatlakes-umich/greatlakes_v100.sbatch +++ b/Tools/machines/greatlakes-umich/greatlakes_v100.sbatch @@ -26,8 +26,7 @@ INPUTS=inputs # per node are 2x 2.4 GHz Intel Xeon Gold 6148 # note: the system seems to only expose cores (20 per socket), # not hyperthreads (40 per socket) -export SRUN_CPUS_PER_TASK=20 -export OMP_NUM_THREADS=${SRUN_CPUS_PER_TASK} +export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK} # GPU-aware MPI optimizations GPU_AWARE_MPI="amrex.use_gpu_aware_mpi=1" diff --git a/Tools/machines/karolina-it4i/karolina_gpu.sbatch b/Tools/machines/karolina-it4i/karolina_gpu.sbatch index 6171ff03abc..ccb4f3dc2c3 100644 --- a/Tools/machines/karolina-it4i/karolina_gpu.sbatch +++ b/Tools/machines/karolina-it4i/karolina_gpu.sbatch @@ -25,13 +25,12 @@ #SBATCH -o stdout_%j #SBATCH -e stderr_%j -# OpenMP threads per MPI rank -export OMP_NUM_THREADS=16 -export SRUN_CPUS_PER_TASK=16 - # set user rights to u=rwx;g=r-x;o=--- umask 0027 +# OpenMP threads per MPI rank +export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK} + # executable & inputs file or python interpreter & PICMI script here EXE=./warpx.rz INPUTS=./inputs_rz diff --git a/Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch b/Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch index bef40942ed6..933f21093a2 100644 --- a/Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch +++ b/Tools/machines/lonestar6-tacc/lonestar6_a100.sbatch @@ -14,6 +14,7 @@ #SBATCH -q regular #SBATCH -C gpu #SBATCH --exclusive +#SBATCH --cpus-per-task=32 #SBATCH --gpu-bind=none #SBATCH --gpus-per-node=4 #SBATCH -o WarpX.o%j @@ -27,7 +28,7 @@ INPUTS=inputs_small export MPICH_OFI_NIC_POLICY=GPU # threads for OpenMP and threaded compressors per MPI rank -export SRUN_CPUS_PER_TASK=32 +export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK} # depends on https://github.com/ECP-WarpX/WarpX/issues/2009 #GPU_AWARE_MPI="amrex.the_arena_is_managed=0 amrex.use_gpu_aware_mpi=1" diff --git a/Tools/machines/perlmutter-nersc/perlmutter_cpu.sbatch b/Tools/machines/perlmutter-nersc/perlmutter_cpu.sbatch index d13c7e3b4e5..84e93dbb8ea 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_cpu.sbatch +++ b/Tools/machines/perlmutter-nersc/perlmutter_cpu.sbatch @@ -13,6 +13,8 @@ #SBATCH -A #SBATCH -q regular #SBATCH -C cpu +# 8 cores per chiplet, 2x SMP +#SBATCH --cpus-per-task=16 #SBATCH --ntasks-per-node=16 #SBATCH --exclusive #SBATCH -o WarpX.o%j @@ -30,10 +32,9 @@ INPUTS=inputs_small # This will be our MPI rank assignment (2x8 is 16 ranks/node). # threads for OpenMP and threaded compressors per MPI rank -export SRUN_CPUS_PER_TASK=16 # 8 cores per chiplet, 2x SMP export OMP_PLACES=threads export OMP_PROC_BIND=spread -export OMP_NUM_THREADS=${SRUN_CPUS_PER_TASK} +export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK} srun --cpu-bind=cores \ ${EXE} ${INPUTS} \ diff --git a/Tools/machines/perlmutter-nersc/perlmutter_gpu.sbatch b/Tools/machines/perlmutter-nersc/perlmutter_gpu.sbatch index f2ea5fa3e7f..37bd5d60c54 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_gpu.sbatch +++ b/Tools/machines/perlmutter-nersc/perlmutter_gpu.sbatch @@ -17,6 +17,7 @@ # A100 80GB (256 nodes) #S BATCH -C gpu&hbm80g #SBATCH --exclusive +#SBATCH --cpus-per-task=16 # ideally single:1, but NERSC cgroups issue #SBATCH --gpu-bind=none #SBATCH --ntasks-per-node=4 @@ -33,8 +34,7 @@ export MPICH_OFI_NIC_POLICY=GPU # threads for OpenMP and threaded compressors per MPI rank # note: 16 avoids hyperthreading (32 virtual cores, 16 physical) -export SRUN_CPUS_PER_TASK=16 -export OMP_NUM_THREADS=${SRUN_CPUS_PER_TASK} +export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK} # GPU-aware MPI optimizations GPU_AWARE_MPI="amrex.use_gpu_aware_mpi=1" diff --git a/Tools/machines/tioga-llnl/tioga_mi300a.sbatch b/Tools/machines/tioga-llnl/tioga_mi300a.sbatch index 0e29e24adcb..94ee97bc6a1 100644 --- a/Tools/machines/tioga-llnl/tioga_mi300a.sbatch +++ b/Tools/machines/tioga-llnl/tioga_mi300a.sbatch @@ -12,6 +12,7 @@ #SBATCH -J WarpX #S BATCH -A # project name not needed yet #SBATCH -p mi300a +#SBATCH --cpus-per-task=16 #SBATCH --gpu-bind=none #SBATCH --ntasks-per-node=4 #SBATCH --gpus-per-node=4 @@ -27,8 +28,7 @@ export MPICH_OFI_NIC_POLICY=GPU # threads for OpenMP and threaded compressors per MPI rank # note: 16 avoids hyperthreading (32 virtual cores, 16 physical) -export SRUN_CPUS_PER_TASK=16 -export OMP_NUM_THREADS=${SRUN_CPUS_PER_TASK} +export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK} # GPU-aware MPI optimizations GPU_AWARE_MPI="amrex.use_gpu_aware_mpi=1" From 3f730b35bc32d49b7429fed1ce58708a0a2e1f62 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Tue, 15 Oct 2024 08:54:43 -0700 Subject: [PATCH 050/278] CI: avoid duplicate runs for secondary branches on main repo (#5394) The fix introduced in #5308 was not correct for Azure pipelines. In GitHub Actions we trigger a run on the `push` event only for the `development` branch. The Azure equivalent of that is triggering a run on the `trigger` event only for the `development` branch. However, since the `trigger` event was completely absent from the Azure pipeline file (that is, the default setup was being used), I had erroneously added the filter branch to the `pr` event instead, unlike what I did for GitHub actions where the `push` was exposed in the YAML files. This was originally aimed at avoiding duplicate runs for "individual CI" when `pre-commit` opens a pull request by pushing to a secondary branch `pre-commit-ci-update-config` in the main repo (instead of a fork). The new setup is tested in #5393, where I copied these changes and where one can see that a commit pushed to that PR does not trigger an "individual CI" Azure pipeline anymore, but only a "PR automated" one. Hopefully this is correct for the merge commits that get pushed to `development` once a PR is closed, but we'll be able to test this only after merging a PR. --- .azure-pipelines.yml | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index 607edecedd6..bdcfe1c9864 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -3,12 +3,13 @@ pool: vmImage: 'ubuntu-20.04' -pr: - autoCancel: true - drafts: false +trigger: branches: include: - development +pr: + autoCancel: true + drafts: false paths: exclude: - Docs From 09f9e8bad4ac109e167f255e29e961e3604dd5b7 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 15 Oct 2024 17:58:32 +0000 Subject: [PATCH 051/278] [pre-commit.ci] pre-commit autoupdate (#5393) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/mgedmin/check-manifest: 0.49 → 0.50](https://github.com/mgedmin/check-manifest/compare/0.49...0.50) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 8ba600be560..ea5499fa469 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -89,7 +89,7 @@ repos: # Checks the manifest for missing files (native support) - repo: https://github.com/mgedmin/check-manifest - rev: "0.49" + rev: "0.50" hooks: - id: check-manifest # This is a slow hook, so only run this if --hook-stage manual is passed From 1c676b9904ddca98159f8cb61e6b24a5b9170702 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 21 Oct 2024 12:47:22 -0700 Subject: [PATCH 052/278] CI: fix `AppleClang` workflow (#5399) As suggested by @WeiqunZhang: We should move `CXXFLAGS: "-Werror -Wno-error=pass-failed"` to when WarpX builds. It is picked up by `pip`. It didn't fail before probably because there was cached version of Python stuff. Now there is probably a new version of something that requires rebuilding some packages. --- .github/workflows/macos.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 069567d39ec..0ddfcf38b41 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -18,7 +18,6 @@ jobs: runs-on: macos-latest if: github.event.pull_request.draft == false env: - CXXFLAGS: "-Werror -Wno-error=pass-failed" HOMEBREW_NO_INSTALLED_DEPENDENTS_CHECK: TRUE # For macOS, Ninja is slower than the default: #CMAKE_GENERATOR: Ninja @@ -65,6 +64,8 @@ jobs: export CCACHE_SLOPPINESS=time_macros ccache -z + export CXXFLAGS="-Werror -Wno-error=pass-failed" + cmake -S . -B build_dp \ -DCMAKE_VERBOSE_MAKEFILE=ON \ -DWarpX_EB=OFF \ From 05e09b1ed809b3375a73d153f6a84d741cb0fb98 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 21 Oct 2024 16:22:14 -0700 Subject: [PATCH 053/278] AMReX/pyAMReX/PICSAR: weekly update (#5391) - Weekly update to latest AMReX: ```console ./Tools/Release/updateAMReX.py ``` - Weekly update to latest pyAMReX: ```console ./Tools/Release/updatepyAMReX.py ``` - Weekly update to latest PICSAR (no changes): ```console ./Tools/Release/updatePICSAR.py ``` --- .github/workflows/cuda.yml | 2 +- Docs/source/developers/particles.rst | 2 +- Source/Diagnostics/ParticleIO.cpp | 6 +++--- Source/Initialization/WarpXInitData.cpp | 12 +++++------ .../NamedComponentParticleContainer.H | 4 ++-- Source/Particles/ParticleBoundaryBuffer.cpp | 20 +++++++++---------- .../Particles/PhysicalParticleContainer.cpp | 16 +++++++-------- .../Particles/WarpXParticleContainer.cpp | 2 +- cmake/dependencies/AMReX.cmake | 2 +- cmake/dependencies/pyAMReX.cmake | 2 +- 10 files changed, 34 insertions(+), 34 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index a4fc4e49ace..1f70e7128bd 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -137,7 +137,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach e1222803739ed2342b9ff6fc2d57316ff0d6cb0c && cd - + cd ../amrex && git checkout --detach 62c2a81eac7862d526e5861ef2befc00b7f5b759 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/Docs/source/developers/particles.rst b/Docs/source/developers/particles.rst index 37260d1ed64..1f1e2eab606 100644 --- a/Docs/source/developers/particles.rst +++ b/Docs/source/developers/particles.rst @@ -141,7 +141,7 @@ Attribute name ``int``/``real`` Description Wher Wheeler process physics is used. ==================== ================ ================================== ===== ==== ====================== -WarpX allows extra runtime attributes to be added to particle containers (through ``AddRealComp("attrname")`` or ``AddIntComp("attrname")``). +WarpX allows extra runtime attributes to be added to particle containers (through ``NewRealComp("attrname")`` or ``NewIntComp("attrname")``). The attribute name can then be used to access the values of that attribute. For example, using a particle iterator, ``pti``, to loop over the particles the command ``pti.GetAttribs(particle_comps["attrname"]).dataPtr();`` will return the values of the ``"attrname"`` attribute. diff --git a/Source/Diagnostics/ParticleIO.cpp b/Source/Diagnostics/ParticleIO.cpp index e94039ec079..05c44f5f594 100644 --- a/Source/Diagnostics/ParticleIO.cpp +++ b/Source/Diagnostics/ParticleIO.cpp @@ -173,7 +173,7 @@ MultiParticleContainer::Restart (const std::string& dir) + " was found in the checkpoint file, but it has not been added yet. " + " Adding it now." ); - pc->AddRealComp(comp_name); + pc->NewRealComp(comp_name); } } @@ -206,7 +206,7 @@ MultiParticleContainer::Restart (const std::string& dir) + " was found in the checkpoint file, but it has not been added yet. " + " Adding it now." ); - pc->AddIntComp(comp_name); + pc->NewIntComp(comp_name); } } @@ -258,7 +258,7 @@ storePhiOnParticles ( PinnedMemoryParticleContainer& tmp, is_full_diagnostic, "Output of the electrostatic potential (phi) on the particles was requested, " "but this is only available with `diag_type = Full`."); - tmp.AddRealComp("phi"); + tmp.NewRealComp("phi"); int const phi_index = tmp.getParticleComps().at("phi"); auto& warpx = WarpX::GetInstance(); #ifdef AMREX_USE_OMP diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index ce9c3d50a1e..14d189d0dd5 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -688,15 +688,15 @@ WarpX::InitFromScratch () // Add space to save the positions and velocities at the start of the time steps for (auto const& pc : *mypc) { #if (AMREX_SPACEDIM >= 2) - pc->AddRealComp("x_n"); + pc->NewRealComp("x_n"); #endif #if defined(WARPX_DIM_3D) || defined(WARPX_DIM_RZ) - pc->AddRealComp("y_n"); + pc->NewRealComp("y_n"); #endif - pc->AddRealComp("z_n"); - pc->AddRealComp("ux_n"); - pc->AddRealComp("uy_n"); - pc->AddRealComp("uz_n"); + pc->NewRealComp("z_n"); + pc->NewRealComp("ux_n"); + pc->NewRealComp("uy_n"); + pc->NewRealComp("uz_n"); } } diff --git a/Source/Particles/NamedComponentParticleContainer.H b/Source/Particles/NamedComponentParticleContainer.H index 02f4c44314a..57c65746d18 100644 --- a/Source/Particles/NamedComponentParticleContainer.H +++ b/Source/Particles/NamedComponentParticleContainer.H @@ -159,7 +159,7 @@ public: * @param name Name of the new component * @param comm Whether to communicate this component, in the particle Redistribute */ - void AddRealComp (const std::string& name, bool comm=true) + void NewRealComp (const std::string& name, bool comm=true) { auto search = particle_comps.find(name); if (search == particle_comps.end()) { @@ -177,7 +177,7 @@ public: * @param name Name of the new component * @param comm Whether to communicate this component, in the particle Redistribute */ - void AddIntComp (const std::string& name, bool comm=true) + void NewIntComp (const std::string& name, bool comm=true) { auto search = particle_icomps.find(name); if (search == particle_icomps.end()) { diff --git a/Source/Particles/ParticleBoundaryBuffer.cpp b/Source/Particles/ParticleBoundaryBuffer.cpp index 0391dcc6178..a1f1c46d894 100644 --- a/Source/Particles/ParticleBoundaryBuffer.cpp +++ b/Source/Particles/ParticleBoundaryBuffer.cpp @@ -384,11 +384,11 @@ void ParticleBoundaryBuffer::gatherParticlesFromDomainBoundaries (MultiParticleC if (!buffer[i].isDefined()) { buffer[i] = pc.make_alike(); - buffer[i].AddIntComp("stepScraped", false); - buffer[i].AddRealComp("deltaTimeScraped", false); - buffer[i].AddRealComp("nx", false); - buffer[i].AddRealComp("ny", false); - buffer[i].AddRealComp("nz", false); + buffer[i].NewIntComp("stepScraped", false); + buffer[i].NewRealComp("deltaTimeScraped", false); + buffer[i].NewRealComp("nx", false); + buffer[i].NewRealComp("ny", false); + buffer[i].NewRealComp("nz", false); } auto& species_buffer = buffer[i]; @@ -481,11 +481,11 @@ void ParticleBoundaryBuffer::gatherParticlesFromEmbeddedBoundaries ( if (!buffer[i].isDefined()) { buffer[i] = pc.make_alike(); - buffer[i].AddIntComp("stepScraped", false); - buffer[i].AddRealComp("deltaTimeScraped", false); - buffer[i].AddRealComp("nx", false); - buffer[i].AddRealComp("ny", false); - buffer[i].AddRealComp("nz", false); + buffer[i].NewIntComp("stepScraped", false); + buffer[i].NewRealComp("deltaTimeScraped", false); + buffer[i].NewRealComp("nx", false); + buffer[i].NewRealComp("ny", false); + buffer[i].NewRealComp("nz", false); } diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 7c70c9a35c4..c973e9afafa 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -342,12 +342,12 @@ PhysicalParticleContainer::PhysicalParticleContainer (AmrCore* amr_core, int isp #ifdef WARPX_QED pp_species_name.query("do_qed_quantum_sync", m_do_qed_quantum_sync); if (m_do_qed_quantum_sync) { - AddRealComp("opticalDepthQSR"); + NewRealComp("opticalDepthQSR"); } pp_species_name.query("do_qed_breit_wheeler", m_do_qed_breit_wheeler); if (m_do_qed_breit_wheeler) { - AddRealComp("opticalDepthBW"); + NewRealComp("opticalDepthBW"); } if(m_do_qed_quantum_sync){ @@ -368,7 +368,7 @@ PhysicalParticleContainer::PhysicalParticleContainer (AmrCore* amr_core, int isp str_int_attrib_function.at(i)); m_user_int_attrib_parser.at(i) = std::make_unique( utils::parser::makeParser(str_int_attrib_function.at(i),{"x","y","z","ux","uy","uz","t"})); - AddIntComp(m_user_int_attribs.at(i)); + NewIntComp(m_user_int_attribs.at(i)); } // User-defined real attributes @@ -383,19 +383,19 @@ PhysicalParticleContainer::PhysicalParticleContainer (AmrCore* amr_core, int isp str_real_attrib_function.at(i)); m_user_real_attrib_parser.at(i) = std::make_unique( utils::parser::makeParser(str_real_attrib_function.at(i),{"x","y","z","ux","uy","uz","t"})); - AddRealComp(m_user_real_attribs.at(i)); + NewRealComp(m_user_real_attribs.at(i)); } // If old particle positions should be saved add the needed components pp_species_name.query("save_previous_position", m_save_previous_position); if (m_save_previous_position) { #if (AMREX_SPACEDIM >= 2) - AddRealComp("prev_x"); + NewRealComp("prev_x"); #endif #if defined(WARPX_DIM_3D) - AddRealComp("prev_y"); + NewRealComp("prev_y"); #endif - AddRealComp("prev_z"); + NewRealComp("prev_z"); #ifdef WARPX_DIM_RZ amrex::Abort("Saving previous particle positions not yet implemented in RZ"); #endif @@ -3121,7 +3121,7 @@ PhysicalParticleContainer::InitIonizationModule () physical_element == "H" || !do_adk_correction, "Correction to ADK by Zhang et al., PRA 90, 043410 (2014) only works with Hydrogen"); // Add runtime integer component for ionization level - AddIntComp("ionizationLevel"); + NewIntComp("ionizationLevel"); // Get atomic number and ionization energies from file const int ion_element_id = utils::physics::ion_map_ids.at(physical_element); ion_atomic_number = utils::physics::ion_atomic_numbers[ion_element_id]; diff --git a/Source/Python/Particles/WarpXParticleContainer.cpp b/Source/Python/Particles/WarpXParticleContainer.cpp index aa2cd7a2091..7bf02aab62b 100644 --- a/Source/Python/Particles/WarpXParticleContainer.cpp +++ b/Source/Python/Particles/WarpXParticleContainer.cpp @@ -30,7 +30,7 @@ void init_WarpXParticleContainer (py::module& m) > wpc (m, "WarpXParticleContainer"); wpc .def("add_real_comp", - [](WarpXParticleContainer& pc, const std::string& name, bool comm) { pc.AddRealComp(name, comm); }, + [](WarpXParticleContainer& pc, const std::string& name, bool comm) { pc.NewRealComp(name, comm); }, py::arg("name"), py::arg("comm") ) .def("add_n_particles", diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 6513841f327..9b6fa824d82 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -283,7 +283,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "e1222803739ed2342b9ff6fc2d57316ff0d6cb0c" +set(WarpX_amrex_branch "62c2a81eac7862d526e5861ef2befc00b7f5b759" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index 9543dac2ee2..48dbebcc5c6 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "3699781e4284921f9ccdbbbbc57169ff79c0de20" +set(WarpX_pyamrex_branch "d96b4948cc5812be82dbff1df5d62927c866ae07" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") From d72255a8ae79731c927b4aefe1a02122de9d935e Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 22 Oct 2024 00:36:44 +0000 Subject: [PATCH 054/278] [pre-commit.ci] pre-commit autoupdate (#5402) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.6.9 → v0.7.0](https://github.com/astral-sh/ruff-pre-commit/compare/v0.6.9...v0.7.0) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index ea5499fa469..29f612a3ef6 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.9 + rev: v0.7.0 hooks: # Run the linter - id: ruff From f2686d62dd5c6d0a4901122402b8084cfcc059ba Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Tue, 22 Oct 2024 12:54:34 -0500 Subject: [PATCH 055/278] Fix a typo in CMake option disabling AMReX incflo solvers (#5405) --- cmake/dependencies/AMReX.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 9b6fa824d82..2c4976777e2 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -93,7 +93,7 @@ macro(find_amrex) set(AMReX_PROBINIT OFF CACHE INTERNAL "") set(AMReX_TINY_PROFILE ON CACHE BOOL "") set(AMReX_LINEAR_SOLVERS_EM ON CACHE INTERNAL "") - set(AMReX_LINEAR_SOLVER_INCFLO OFF CACHE INTERNAL "") + set(AMReX_LINEAR_SOLVERS_INCFLO OFF CACHE INTERNAL "") if(WarpX_ASCENT OR WarpX_SENSEI) set(AMReX_GPU_RDC ON CACHE BOOL "") From acd1434320107b8b18ec377fab4a4313f3da989d Mon Sep 17 00:00:00 2001 From: David Grote Date: Tue, 22 Oct 2024 13:00:57 -0700 Subject: [PATCH 056/278] Update picmistandard to 0.31.0 (#5406) The new version of picmistandard is compatible with NumPy version 2. --- Docs/requirements.txt | 2 +- Python/setup.py | 2 +- Tools/machines/karolina-it4i/install_dependencies.sh | 2 +- requirements.txt | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/Docs/requirements.txt b/Docs/requirements.txt index bc34e69cd65..7581638551e 100644 --- a/Docs/requirements.txt +++ b/Docs/requirements.txt @@ -13,7 +13,7 @@ openpmd-viewer # for checksumAPI # PICMI API docs # note: keep in sync with version in ../requirements.txt -picmistandard==0.30.0 +picmistandard==0.31.0 # for development against an unreleased PICMI version, use: # picmistandard @ git+https://github.com/picmi-standard/picmi.git#subdirectory=PICMI_Python diff --git a/Python/setup.py b/Python/setup.py index d57ebc65223..c0e38baced2 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -70,7 +70,7 @@ package_dir={"pywarpx": "pywarpx"}, description="""Wrapper of WarpX""", package_data=package_data, - install_requires=["numpy", "picmistandard==0.30.0", "periodictable"], + install_requires=["numpy", "picmistandard==0.31.0", "periodictable"], python_requires=">=3.8", zip_safe=False, ) diff --git a/Tools/machines/karolina-it4i/install_dependencies.sh b/Tools/machines/karolina-it4i/install_dependencies.sh index c1b6e93ab00..9cc4f1ee144 100755 --- a/Tools/machines/karolina-it4i/install_dependencies.sh +++ b/Tools/machines/karolina-it4i/install_dependencies.sh @@ -53,7 +53,7 @@ python -m pip install --user --upgrade matplotlib #python -m pip install --user --upgrade yt # install or update WarpX dependencies -python -m pip install --user --upgrade picmistandard==0.30.0 +python -m pip install --user --upgrade picmistandard==0.31.0 python -m pip install --user --upgrade lasy # optional: for optimas (based on libEnsemble & ax->botorch->gpytorch->pytorch) diff --git a/requirements.txt b/requirements.txt index 272c4903e94..2c8b749abe0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ periodictable~=1.5 # PICMI # note: don't forget to update the version in Docs/requirements.txt, too -picmistandard==0.30.0 +picmistandard==0.31.0 # for development against an unreleased PICMI version, use: #picmistandard @ git+https://github.com/picmi-standard/picmi.git#subdirectory=PICMI_Python From d34cc6ca449a7d2724c85712c86d2b18fb98035f Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 23 Oct 2024 20:31:37 +0200 Subject: [PATCH 057/278] Docs: fix typo in documentation for Leonardo supercomputer (CINECA) (#5403) This PR fixes a tiny typo in the docs --- Docs/source/install/hpc/leonardo.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/source/install/hpc/leonardo.rst b/Docs/source/install/hpc/leonardo.rst index 568a5612250..bfd584288fe 100644 --- a/Docs/source/install/hpc/leonardo.rst +++ b/Docs/source/install/hpc/leonardo.rst @@ -65,7 +65,7 @@ Finally, since Leonardo does not yet provide software modules for some of our de .. code-block:: bash - bash $HOME/src/warpx/Tools/machines/leonardo_cineca/install_gpu_dependencies.sh + bash $HOME/src/warpx/Tools/machines/leonardo-cineca/install_gpu_dependencies.sh source $HOME/sw/venvs/warpx/bin/activate .. dropdown:: Script Details From a25faff09907773cb0c48c82061ea68215ad6d11 Mon Sep 17 00:00:00 2001 From: Marco Garten Date: Thu, 24 Oct 2024 16:23:25 -0700 Subject: [PATCH 058/278] Add Time-Averaged Field Diagnostics (#5285) This PR adds time-averaged field diagnostics to the WarpX output. To-do: - [x] code - [x] docs - [x] tests - [x] example Follow-up PRs: - meta-data - make compatible with adaptive time stepping This PR is based on work performed during the *2024 WarpX Refactoring Hackathon* and was created together with @RevathiJambunathan. Successfully merging this pull request may close #5165. --------- Co-authored-by: RevathiJambunathan Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Co-authored-by: Edoardo Zoni --- Docs/source/usage/parameters.rst | 55 ++++- Docs/source/usage/python.rst | 2 + .../laser_ion/CMakeLists.txt | 8 +- .../Physics_applications/laser_ion/README.rst | 2 +- .../analysis_default_openpmd_regression.py | 1 - .../laser_ion/analysis_test_laser_ion.py | 85 +++++++ .../laser_ion/inputs_test_2d_laser_ion_acc | 32 ++- .../inputs_test_2d_laser_ion_acc_picmi.py | 17 +- .../Physics_applications/laser_ion/plot_2d.py | 2 +- Python/pywarpx/picmi.py | 51 +++++ Source/Diagnostics/BTDiagnostics.H | 2 +- Source/Diagnostics/BTDiagnostics.cpp | 4 +- .../Diagnostics/BoundaryScrapingDiagnostics.H | 2 +- .../BoundaryScrapingDiagnostics.cpp | 4 +- Source/Diagnostics/Diagnostics.H | 12 +- Source/Diagnostics/Diagnostics.cpp | 21 +- Source/Diagnostics/FullDiagnostics.H | 24 +- Source/Diagnostics/FullDiagnostics.cpp | 215 +++++++++++++++++- Source/Diagnostics/MultiDiagnostics.H | 2 - Source/Diagnostics/MultiDiagnostics.cpp | 13 +- Source/Diagnostics/WarpXOpenPMD.cpp | 2 + 21 files changed, 507 insertions(+), 49 deletions(-) delete mode 120000 Examples/Physics_applications/laser_ion/analysis_default_openpmd_regression.py create mode 100755 Examples/Physics_applications/laser_ion/analysis_test_laser_ion.py diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index a6ba9a2773d..af559aa1fba 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2633,10 +2633,11 @@ Diagnostics and output In-situ visualization ^^^^^^^^^^^^^^^^^^^^^ -WarpX has four types of diagnostics: -``FullDiagnostics`` consist in dumps of fields and particles at given iterations, -``BackTransformedDiagnostics`` are used when running a simulation in a boosted frame, to reconstruct output data to the lab frame, -``BoundaryScrapingDiagnostics`` are used to collect the particles that are absorbed at the boundary, throughout the simulation, and +WarpX has five types of diagnostics: +``Full`` diagnostics consist in dumps of fields and particles at given iterations, +``TimeAveraged`` diagnostics only allow field data, which they output after averaging over a period of time, +``BackTransformed`` diagnostics are used when running a simulation in a boosted frame, to reconstruct output data to the lab frame, +``BoundaryScraping`` diagnostics are used to collect the particles that are absorbed at the boundary, throughout the simulation, and ``ReducedDiags`` allow the user to compute some reduced quantity (particle temperature, max of a field) and write a small amount of data to text files. Similar to what is done for physical species, WarpX has a class Diagnostics that allows users to initialize different diagnostics, each of them with different fields, resolution and period. This currently applies to standard diagnostics, but should be extended to back-transformed diagnostics and reduced diagnostics (and others) in a near future. @@ -2882,12 +2883,58 @@ In-situ capabilities can be used by turning on Sensei or Ascent (provided they a * ``warpx.mffile_nstreams`` (`int`) optional (default `4`) Limit the number of concurrent readers per file. + +.. _running-cpp-parameters-diagnostics-timeavg: + +Time-Averaged Diagnostics +^^^^^^^^^^^^^^^^^^^^^^^^^ + +``TimeAveraged`` diagnostics are a special type of ``Full`` diagnostics that allows for the output of time-averaged field data. +This type of diagnostics can be created using ``.diag_type = TimeAveraged``. +We support only field data and related options from the list at `Full Diagnostics`_. + +.. note:: + + As with ``Full`` diagnostics, ``TimeAveraged`` diagnostics output the initial **instantaneous** conditions of the selected fields on step 0 (unless more specific output intervals exclude output for step 0). + +In addition, ``TimeAveraged`` diagnostic options include: + +* ``.time_average_mode`` (`string`, default `none`) + Describes the operating mode for time averaged field output. + + * ``none`` for no averaging (instantaneous fields) + + * ``fixed_start`` for a diagnostic that averages all fields between the current output step and a fixed point in time + + * ``dynamic_start`` for a constant averaging period and output at different points in time (non-overlapping) + + .. note:: + + To enable time-averaged field output with intervals tightly spaced enough for overlapping averaging periods, + please create additional instances of ``TimeAveraged`` diagnostics. + +* ``.average_period_steps`` (`int`) + Configures the number of time steps in an averaging period. + Set this only in the ``dynamic_start`` mode and only if ``average_period_time`` has not already been set. + Will be ignored in the ``fixed_start`` mode (with warning). + +* ``.average_period_time`` (`float`, in seconds) + Configures the time (SI units) in an averaging period. + Set this only in the ``dynamic_start`` mode and only if ``average_period_steps`` has not already been set. + Will be ignored in the ``fixed_start`` mode (with warning). + +* ``.average_start_step`` (`int`) + Configures the time step at which time-averaging begins. + Set this only in the ``fixed_start`` mode. + Will be ignored in the ``dynamic_start`` mode (with warning). + .. _running-cpp-parameters-diagnostics-btd: BackTransformed Diagnostics ^^^^^^^^^^^^^^^^^^^^^^^^^^^ ``BackTransformed`` diag type are used when running a simulation in a boosted frame, to reconstruct output data to the lab frame. This option can be set using ``.diag_type = BackTransformed``. We support the following list of options from `Full Diagnostics`_ + ``.format``, ``.openpmd_backend``, ``.dump_rz_modes``, ``.file_prefix``, ``.diag_lo``, ``.diag_hi``, ``.write_species``, ``.species``. Additional options for this diagnostic include: diff --git a/Docs/source/usage/python.rst b/Docs/source/usage/python.rst index 38b0a31d7f3..8b40684feb9 100644 --- a/Docs/source/usage/python.rst +++ b/Docs/source/usage/python.rst @@ -114,6 +114,8 @@ Diagnostics .. autoclass:: pywarpx.picmi.FieldDiagnostic +.. autoclass:: pywarpx.picmi.TimeAveragedFieldDiagnostic + .. autoclass:: pywarpx.picmi.ElectrostaticFieldDiagnostic .. autoclass:: pywarpx.picmi.Checkpoint diff --git a/Examples/Physics_applications/laser_ion/CMakeLists.txt b/Examples/Physics_applications/laser_ion/CMakeLists.txt index f05203de0e8..66d53165290 100644 --- a/Examples/Physics_applications/laser_ion/CMakeLists.txt +++ b/Examples/Physics_applications/laser_ion/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_laser_ion_acc # inputs - analysis_default_openpmd_regression.py # analysis - diags/diag1/ # output + analysis_test_laser_ion.py # analysis + diags/diagInst/ # output OFF # dependency ) @@ -16,7 +16,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_laser_ion_acc_picmi.py # inputs - analysis_default_openpmd_regression.py # analysis - diags/diag1/ # output + analysis_test_laser_ion.py # analysis + diags/diagInst/ # output OFF # dependency ) diff --git a/Examples/Physics_applications/laser_ion/README.rst b/Examples/Physics_applications/laser_ion/README.rst index e55cf6889d4..c5dc5af3a77 100644 --- a/Examples/Physics_applications/laser_ion/README.rst +++ b/Examples/Physics_applications/laser_ion/README.rst @@ -87,7 +87,7 @@ Visualize :alt: Particle densities for electrons (top), protons (middle), and electrons again in logarithmic scale (bottom). :width: 80% - Particle densities for electrons (top), protons (middle), and electrons again in logarithmic scale (bottom). + Particle densities for electrons (top), protons (middle), and electrons again in logarithmic scale (bottom). Particle density output illustrates the evolution of the target in time and space. Logarithmic scales can help to identify where the target becomes transparent for the laser pulse (bottom panel in :numref:`fig-tnsa-densities` ). diff --git a/Examples/Physics_applications/laser_ion/analysis_default_openpmd_regression.py b/Examples/Physics_applications/laser_ion/analysis_default_openpmd_regression.py deleted file mode 120000 index 73e5ec47001..00000000000 --- a/Examples/Physics_applications/laser_ion/analysis_default_openpmd_regression.py +++ /dev/null @@ -1 +0,0 @@ -../../analysis_default_openpmd_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/laser_ion/analysis_test_laser_ion.py b/Examples/Physics_applications/laser_ion/analysis_test_laser_ion.py new file mode 100755 index 00000000000..d2106d33803 --- /dev/null +++ b/Examples/Physics_applications/laser_ion/analysis_test_laser_ion.py @@ -0,0 +1,85 @@ +#!/usr/bin/env python3 + +import os +import sys + +import numpy as np +import openpmd_api as io + +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +from checksumAPI import evaluate_checksum + + +def load_field_from_iteration( + series, iteration: int, field: str, coord: str = None +) -> np.ndarray: + """Load iteration of field data from file.""" + + it = series.iterations[iteration] + field_obj = it.meshes[f"{field}"] + + if field_obj.scalar: + field_data = field_obj[io.Mesh_Record_Component.SCALAR].load_chunk() + elif coord in [item[0] for item in list(field_obj.items())]: + field_data = field_obj[coord].load_chunk() + else: + raise Exception( + f"Specified coordinate: f{coord} is not available for field: f{field}." + ) + series.flush() + + return field_data + + +def compare_time_avg_with_instantaneous_diags(dir_inst: str, dir_avg: str): + """Compare instantaneous data (multiple iterations averaged in post-processing) with in-situ averaged data.""" + + field = "E" + coord = "z" + avg_period_steps = 5 + avg_output_step = 100 + + path_tpl_inst = f"{dir_inst}/openpmd_%T.h5" + path_tpl_avg = f"{dir_avg}/openpmd_%T.h5" + + si = io.Series(path_tpl_inst, io.Access.read_only) + sa = io.Series(path_tpl_avg, io.Access.read_only) + + ii0 = si.iterations[0] + fi0 = ii0.meshes[field][coord] + shape = fi0.shape + + data_inst = np.zeros(shape) + + for i in np.arange(avg_output_step - avg_period_steps + 1, avg_output_step + 1): + data_inst += load_field_from_iteration(si, i, field, coord) + + data_inst = data_inst / avg_period_steps + + data_avg = load_field_from_iteration(sa, avg_output_step, field, coord) + + # Compare the data + if np.allclose(data_inst, data_avg, rtol=1e-12): + print("Test passed: actual data is close to expected data.") + else: + print("Test failed: actual data is not close to expected data.") + sys.exit(1) + + +if __name__ == "__main__": + # NOTE: works only in the example directory due to relative path import + # compare checksums + evaluate_checksum( + test_name=os.path.split(os.getcwd())[1], + output_file=sys.argv[1], + output_format="openpmd", + ) + + # TODO: implement intervals parser for PICMI that allows more complex output periods + test_name = os.path.split(os.getcwd())[1] + if "picmi" not in test_name: + # Functionality test for TimeAveragedDiagnostics + compare_time_avg_with_instantaneous_diags( + dir_inst=sys.argv[1], + dir_avg="diags/diagTimeAvg/", + ) diff --git a/Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc b/Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc index 5ad8334e9ef..d69ed6dc375 100644 --- a/Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc +++ b/Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc @@ -200,18 +200,32 @@ laser1.profile_focal_distance = 4.0e-6 # focal distance from the antenna [m] ################################# # Diagnostics # -diagnostics.diags_names = diag1 openPMDfw openPMDbw +diagnostics.diags_names = diagInst diagTimeAvg openPMDfw openPMDbw -diag1.intervals = 100 -diag1.diag_type = Full -diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho rho_electrons rho_hydrogen +# instantaneous field and particle diagnostic +diagInst.intervals = 100,96:100 # second interval only for CI testing the time-averaged diags +diagInst.diag_type = Full +diagInst.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho rho_electrons rho_hydrogen # reduce resolution of output fields -diag1.coarsening_ratio = 4 4 +diagInst.coarsening_ratio = 4 4 # demonstration of a spatial and momentum filter -diag1.electrons.plot_filter_function(t,x,y,z,ux,uy,uz) = (uz>=0) * (x<1.0e-6) * (x>-1.0e-6) -diag1.hydrogen.plot_filter_function(t,x,y,z,ux,uy,uz) = (uz>=0) * (x<1.0e-6) * (x>-1.0e-6) -diag1.format = openpmd -diag1.openpmd_backend = h5 +diagInst.electrons.plot_filter_function(t,x,y,z,ux,uy,uz) = (uz>=0) * (x<1.0e-6) * (x>-1.0e-6) +diagInst.hydrogen.plot_filter_function(t,x,y,z,ux,uy,uz) = (uz>=0) * (x<1.0e-6) * (x>-1.0e-6) +diagInst.format = openpmd +diagInst.openpmd_backend = h5 + +# time-averaged particle and field diagnostic +diagTimeAvg.intervals = 100 +diagTimeAvg.diag_type = TimeAveraged +diagTimeAvg.time_average_mode = dynamic_start +#diagTimeAvg.average_period_time = 2.67e-15 # period of 800 nm light waves +diagTimeAvg.average_period_steps = 5 # use only either `time` or `steps` +diagTimeAvg.write_species = 0 +diagTimeAvg.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho rho_electrons rho_hydrogen +# reduce resolution of output fields +diagTimeAvg.coarsening_ratio = 4 4 +diagTimeAvg.format = openpmd +diagTimeAvg.openpmd_backend = h5 openPMDfw.intervals = 100 openPMDfw.diag_type = Full diff --git a/Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc_picmi.py b/Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc_picmi.py index 04f9111ec5f..66ba5f64091 100755 --- a/Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc_picmi.py +++ b/Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc_picmi.py @@ -140,7 +140,7 @@ # Diagnostics particle_diag = picmi.ParticleDiagnostic( - name="diag1", + name="diagInst", period=100, warpx_format="openpmd", warpx_openpmd_backend="h5", @@ -153,7 +153,7 @@ for ncell_comp, cr in zip([nx, nz], coarsening_ratio): ncell_field.append(int(ncell_comp / cr)) field_diag = picmi.FieldDiagnostic( - name="diag1", + name="diagInst", grid=grid, period=100, number_of_cells=ncell_field, @@ -162,6 +162,18 @@ warpx_openpmd_backend="h5", ) +field_time_avg_diag = picmi.TimeAveragedFieldDiagnostic( + name="diagTimeAvg", + grid=grid, + period=100, + number_of_cells=ncell_field, + data_list=["B", "E", "J", "rho", "rho_electrons", "rho_hydrogen"], + warpx_format="openpmd", + warpx_openpmd_backend="h5", + warpx_time_average_mode="dynamic_start", + warpx_average_period_time=2.67e-15, +) + particle_fw_diag = picmi.ParticleDiagnostic( name="openPMDfw", period=100, @@ -292,6 +304,7 @@ # Add full diagnostics sim.add_diagnostic(particle_diag) sim.add_diagnostic(field_diag) +sim.add_diagnostic(field_time_avg_diag) sim.add_diagnostic(particle_fw_diag) sim.add_diagnostic(particle_bw_diag) # Add reduced diagnostics diff --git a/Examples/Physics_applications/laser_ion/plot_2d.py b/Examples/Physics_applications/laser_ion/plot_2d.py index f8a3b05d8a3..b3aefb80606 100644 --- a/Examples/Physics_applications/laser_ion/plot_2d.py +++ b/Examples/Physics_applications/laser_ion/plot_2d.py @@ -259,7 +259,7 @@ def visualize_particle_histogram_iteration( "-d", "--diag_dir", type=str, - default="./diags/diag1", + default="./diags/diagInst", help="Directory containing density and field diagnostics", ) parser.add_argument( diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 478b4d5802e..c7a27f62df0 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -3271,6 +3271,57 @@ def diagnostic_initialize_inputs(self): ElectrostaticFieldDiagnostic = FieldDiagnostic +class TimeAveragedFieldDiagnostic(FieldDiagnostic): + """ + See `Input Parameters `__ for more information. + + Parameters + ---------- + warpx_time_average_mode: str + Type of time averaging diagnostic + Supported values include ``"none"``, ``"fixed_start"``, and ``"dynamic_start"`` + + * ``"none"`` for no averaging (instantaneous fields) + * ``"fixed_start"`` for a diagnostic that averages all fields between the current output step and a fixed point in time + * ``"dynamic_start"`` for a constant averaging period and output at different points in time (non-overlapping) + + warpx_average_period_steps: int, optional + Configures the number of time steps in an averaging period. + Set this only in the ``"dynamic_start"`` mode and only if ``warpx_average_period_time`` has not already been set. + Will be ignored in the ``"fixed_start"`` mode (with warning). + + warpx_average_period_time: float, optional + Configures the time (SI units) in an averaging period. + Set this only in the ``"dynamic_start"`` mode and only if ``average_period_steps`` has not already been set. + Will be ignored in the ``"fixed_start"`` mode (with warning). + + warpx_average_start_steps: int, optional + Configures the time step at which time-averaging begins. + Set this only in the ``"fixed_start"`` mode. + Will be ignored in the ``"dynamic_start"`` mode (with warning). + """ + + def init(self, kw): + super().init(kw) + self.time_average_mode = kw.pop("warpx_time_average_mode", None) + self.average_period_steps = kw.pop("warpx_average_period_steps", None) + self.average_period_time = kw.pop("warpx_average_period_time", None) + self.average_start_step = kw.pop("warpx_average_start_step", None) + + def diagnostic_initialize_inputs(self): + super().diagnostic_initialize_inputs() + + self.diagnostic.set_or_replace_attr("diag_type", "TimeAveraged") + + if "write_species" not in self.diagnostic.argvattrs: + self.diagnostic.write_species = False + + self.diagnostic.time_average_mode = self.time_average_mode + self.diagnostic.average_period_steps = self.average_period_steps + self.diagnostic.average_period_time = self.average_period_time + self.diagnostic.average_start_step = self.average_start_step + + class Checkpoint(picmistandard.base._ClassWithInit, WarpXDiagnosticBase): """ Sets up checkpointing of the simulation, allowing for later restarts diff --git a/Source/Diagnostics/BTDiagnostics.H b/Source/Diagnostics/BTDiagnostics.H index ab04f30ef18..c7137f45c9d 100644 --- a/Source/Diagnostics/BTDiagnostics.H +++ b/Source/Diagnostics/BTDiagnostics.H @@ -28,7 +28,7 @@ class BTDiagnostics final : public Diagnostics { public: - BTDiagnostics (int i, const std::string& name); + BTDiagnostics (int i, const std::string& name, DiagTypes diag_type); private: /** Whether to plot raw (i.e., NOT cell-centered) fields */ diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 312bbc7ec45..4939e2fb207 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -55,8 +55,8 @@ namespace constexpr int permission_flag_rwxrxrx = 0755; } -BTDiagnostics::BTDiagnostics (int i, const std::string& name) - : Diagnostics{i, name}, +BTDiagnostics::BTDiagnostics (int i, const std::string& name, DiagTypes diag_type) + : Diagnostics{i, name, diag_type}, m_cell_centered_data_name("BTD_cell_centered_data_" + name) { ReadParameters(); diff --git a/Source/Diagnostics/BoundaryScrapingDiagnostics.H b/Source/Diagnostics/BoundaryScrapingDiagnostics.H index 3e5fc1f19eb..f78e7b4574b 100644 --- a/Source/Diagnostics/BoundaryScrapingDiagnostics.H +++ b/Source/Diagnostics/BoundaryScrapingDiagnostics.H @@ -23,7 +23,7 @@ public: * @param i index of diagnostics in MultiDiagnostics::alldiags * @param name diagnostics name in the inputs file */ - BoundaryScrapingDiagnostics (int i, const std::string& name); + BoundaryScrapingDiagnostics (int i, const std::string& name, DiagTypes diag_type); private: /** Read relevant parameters for BoundaryScraping */ diff --git a/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp b/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp index 3757082ab4d..8df58b6fb28 100644 --- a/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp +++ b/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp @@ -22,8 +22,8 @@ using namespace amrex::literals; -BoundaryScrapingDiagnostics::BoundaryScrapingDiagnostics (int i, const std::string& name) - : Diagnostics{i, name} +BoundaryScrapingDiagnostics::BoundaryScrapingDiagnostics (int i, const std::string& name, DiagTypes diag_type) + : Diagnostics{i, name, diag_type} { ReadParameters(); } diff --git a/Source/Diagnostics/Diagnostics.H b/Source/Diagnostics/Diagnostics.H index 20550364fb7..d0c70e76c1f 100644 --- a/Source/Diagnostics/Diagnostics.H +++ b/Source/Diagnostics/Diagnostics.H @@ -21,6 +21,8 @@ #include #include +/** All types of diagnostics. */ +enum struct DiagTypes {Full, BackTransformed, BoundaryScraping, TimeAveraged}; /** * \brief base class for diagnostics. * Contains main routines to filter, compute and flush diagnostics. @@ -35,7 +37,7 @@ public: * @param i index of diagnostics in MultiDiagnostics::alldiags * @param name diagnostics name in the inputs file */ - Diagnostics (int i, std::string name); + Diagnostics (int i, std::string name, DiagTypes diag_type); /** Virtual Destructor to handle clean destruction of derived classes */ virtual ~Diagnostics (); @@ -45,6 +47,8 @@ public: Diagnostics(Diagnostics&& ) = default; Diagnostics& operator=(Diagnostics&& ) = default; + /** Stores the diag type */ + DiagTypes m_diag_type; /** Pack (stack) all fields in the cell-centered output MultiFab m_mf_output. * * Fields are computed (e.g., cell-centered or back-transformed) @@ -266,6 +270,12 @@ protected: * The second vector is loops over the total number of levels. */ amrex::Vector< amrex::Vector< amrex::MultiFab > > m_mf_output; + /** summation multifab, where all fields (computed, cell-centered, and stacked) + * are summed for every step in a time averaging period. + * The first vector is for total number of snapshots. (=1 for FullDiagnostics) + * The second vector is loops over the total number of levels. + */ + amrex::Vector< amrex::Vector > m_sum_mf_output; /** Geometry that defines the domain attributes corresponding to output multifab. * Specifically, the user-defined physical co-ordinates for the diagnostics diff --git a/Source/Diagnostics/Diagnostics.cpp b/Source/Diagnostics/Diagnostics.cpp index fd079479285..0f659065185 100644 --- a/Source/Diagnostics/Diagnostics.cpp +++ b/Source/Diagnostics/Diagnostics.cpp @@ -38,8 +38,8 @@ using namespace amrex::literals; -Diagnostics::Diagnostics (int i, std::string name) - : m_diag_name(std::move(name)), m_diag_index(i) +Diagnostics::Diagnostics (int i, std::string name, DiagTypes diag_type) + : m_diag_type(diag_type), m_diag_name(std::move(name)), m_diag_index(i) { } @@ -536,6 +536,14 @@ Diagnostics::InitBaseData () m_mf_output[i].resize( nmax_lev ); } + // allocate vector of buffers and vector of levels for each buffer for summation multifab for TimeAveragedDiagnostics + if (m_diag_type == DiagTypes::TimeAveraged) { + m_sum_mf_output.resize(m_num_buffers); + for (int i = 0; i < m_num_buffers; ++i) { + m_sum_mf_output[i].resize( nmax_lev ); + } + } + // allocate vector of geometry objects corresponding to each output multifab. m_geom_output.resize( m_num_buffers ); for (int i = 0; i < m_num_buffers; ++i) { @@ -575,6 +583,15 @@ Diagnostics::ComputeAndPack () // Check that the proper number of components of mf_avg were updated. AMREX_ALWAYS_ASSERT( icomp_dst == m_varnames.size() ); + if (m_diag_type == DiagTypes::TimeAveraged) { + + const amrex::Real real_a = 1.0; + // Compute m_sum_mf_output += real_a*m_mf_output + amrex::MultiFab::Saxpy( + m_sum_mf_output[i_buffer][lev], real_a, m_mf_output[i_buffer][lev], + 0, 0, m_mf_output[i_buffer][lev].nComp(), m_mf_output[i_buffer][lev].nGrowVect()); + } + // needed for contour plots of rho, i.e. ascent/sensei if (m_format == "sensei" || m_format == "ascent") { ablastr::utils::communication::FillBoundary(m_mf_output[i_buffer][lev], WarpX::do_single_precision_comms, diff --git a/Source/Diagnostics/FullDiagnostics.H b/Source/Diagnostics/FullDiagnostics.H index 1b999a9b361..61f63aa78e2 100644 --- a/Source/Diagnostics/FullDiagnostics.H +++ b/Source/Diagnostics/FullDiagnostics.H @@ -4,12 +4,22 @@ #include "Diagnostics.H" #include "Utils/Parser/IntervalsParser.H" +#include + #include class FullDiagnostics final : public Diagnostics { public: - FullDiagnostics (int i, const std::string& name); + FullDiagnostics (int i, const std::string& name, DiagTypes diag_type); + /** Type of time averaging for diagnostics (fields only) + * None corresponds to instantaneous diags + * Static corresponds to a fixed starting step for averaging, + * will average until the end, and dump out intermediate average results + * Dynamic corresponds to a moving period for averaging where the start step + * is as many steps before the output interval as the averaging period is long. + */ + enum struct TimeAverageType {None, Static, Dynamic}; private: /** Read user-requested parameters for full diagnostics */ void ReadParameters (); @@ -25,10 +35,20 @@ private: * before writing the diagnostic. */ bool m_solver_deposits_current = true; - /** Flush m_mf_output and particles to file for the i^th buffer */ + /** Whether the diagnostics are averaging data over time or not */ + TimeAverageType m_time_average_mode = TimeAverageType::None; + /** Period to average fields over: in steps */ + int m_average_period_steps = -1; + /** Period to average fields over: in seconds */ + amrex::Real m_average_period_time = -1.0; + /** Time step to start averaging */ + int m_average_start_step = -1; + /** Flush m_mf_output or m_sum_mf_output and particles to file for the i^th buffer */ void Flush (int i_buffer, bool /* force_flush */) override; /** Flush raw data */ void FlushRaw (); + /** Initialize Data required to compute TimeAveraged diagnostics */ + void DerivedInitData () override; /** whether to compute and pack cell-centered data in m_mf_output * \param[in] step current time step * \param[in] force_flush if true, return true for any step since output must be diff --git a/Source/Diagnostics/FullDiagnostics.cpp b/Source/Diagnostics/FullDiagnostics.cpp index e5eefc82de5..eeca8ffdb44 100644 --- a/Source/Diagnostics/FullDiagnostics.cpp +++ b/Source/Diagnostics/FullDiagnostics.cpp @@ -47,8 +47,8 @@ using namespace amrex::literals; using warpx::fields::FieldType; -FullDiagnostics::FullDiagnostics (int i, const std::string& name): - Diagnostics{i, name}, +FullDiagnostics::FullDiagnostics (int i, const std::string& name, DiagTypes diag_type): + Diagnostics{i, name, diag_type}, m_solver_deposits_current{ (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::None) || (WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic)} @@ -57,6 +57,27 @@ FullDiagnostics::FullDiagnostics (int i, const std::string& name): BackwardCompatibility(); } +void +FullDiagnostics::DerivedInitData() { + if (m_diag_type == DiagTypes::TimeAveraged) { + auto & warpx = WarpX::GetInstance(); + if (m_time_average_mode == TimeAverageType::Dynamic) { + + // already checked in ReadParameters that only one of the parameters is set + // calculate the other averaging period parameter from the other one, respectively + if (m_average_period_steps > 0) { + m_average_period_time = m_average_period_steps * warpx.getdt(0); + } else if (m_average_period_time > 0) { + m_average_period_steps = static_cast (std::round(m_average_period_time / warpx.getdt(0))); + } + amrex::Print() << Utils::TextMsg::Info( + "Initializing TimeAveragedDiagnostics " + m_diag_name + + " with an averaging period of " + std::to_string(m_average_period_steps) + " steps" + ); + } + } +} + void FullDiagnostics::InitializeParticleBuffer () { @@ -101,6 +122,92 @@ FullDiagnostics::ReadParameters () const bool plot_raw_fields_guards_specified = pp_diag_name.query("plot_raw_fields_guards", m_plot_raw_fields_guards); const bool raw_specified = plot_raw_fields_specified || plot_raw_fields_guards_specified; + if (m_diag_type == DiagTypes::TimeAveraged) { + std::string m_time_average_mode_str = "none"; + /** Whether the diagnostics are averaging data over time or not + * Valid options are "fixed_start" and "dynamic_start". + */ + pp_diag_name.get("time_average_mode", m_time_average_mode_str); + + const amrex::ParmParse pp_warpx("warpx"); + std::vector dt_interval_vec = {"-1"}; + const bool timestep_may_vary = pp_warpx.queryarr("dt_update_interval", dt_interval_vec); + amrex::Print() << Utils::TextMsg::Warn("Time step varies?" + std::to_string(timestep_may_vary)); + if (timestep_may_vary) { + WARPX_ABORT_WITH_MESSAGE( + "Time-averaged diagnostics (encountered in: " + + m_diag_name + ") are currently not supported with adaptive time-stepping" + ); + } + + if (m_time_average_mode_str == "fixed_start") { + m_time_average_mode = TimeAverageType::Static; + } else if (m_time_average_mode_str == "dynamic_start") { + m_time_average_mode = TimeAverageType::Dynamic; + } else if (m_time_average_mode_str == "none") { + m_time_average_mode = TimeAverageType::None; + } else { + WARPX_ABORT_WITH_MESSAGE( + "Unknown time averaging mode. Valid entries are: none, fixed_start, dynamic_start" + ); + } + + const bool averaging_period_steps_specified = pp_diag_name.query( + "average_period_steps", m_average_period_steps + ); + const bool averaging_period_time_specified = pp_diag_name.queryWithParser( + "average_period_time", m_average_period_time + ); + + if (m_time_average_mode == TimeAverageType::Static) { + // This fails if users do not specify a start. + pp_diag_name.get("average_start_step", m_average_start_step); + if (m_average_start_step == 0) { + WARPX_ABORT_WITH_MESSAGE( + "Static-start time-averaged diagnostic " + m_diag_name + " requires a positive (non-zero) value " + "for the 'average_start_step' parameter." + ); + } + + if (averaging_period_time_specified || averaging_period_steps_specified) { + const std::string period_spec_warn_msg = "An averaging period was specified for the 'static_start' averaging mode " \ + "but will be IGNORED. Averaging will be performed between step " \ + + std::to_string(m_average_start_step) \ + + " and the specified intervals."; + ablastr::warn_manager::WMRecordWarning( + "Diagnostics", + period_spec_warn_msg, + ablastr::warn_manager::WarnPriority::medium + ); + } + + } + + if (m_time_average_mode == TimeAverageType::Dynamic) { + // one of the two averaging period options must be set but neither none nor both + if ( + (averaging_period_steps_specified && averaging_period_time_specified) + || !(averaging_period_steps_specified || averaging_period_time_specified) + ) { + WARPX_ABORT_WITH_MESSAGE("Please specify either 'average_period_steps' or 'average_period_time', not both."); + } + + int unused_start_step = -1; + const bool averaging_start_on_dynamic_period_specified = pp_diag_name.query("average_start_step", unused_start_step); + if (averaging_start_on_dynamic_period_specified) { + const std::string start_spec_warn_msg = "An averaging start step was specified for the 'dynamic_start'" \ + "time-averaged diagnostic " + m_diag_name + " but will be IGNORED. " \ + "Averaging will begin with the first averaging period."; + ablastr::warn_manager::WMRecordWarning( + "Diagnostics", + start_spec_warn_msg, + ablastr::warn_manager::WarnPriority::medium + ); + } + } + } + + #ifdef WARPX_DIM_RZ pp_diag_name.query("dump_rz_modes", m_dump_rz_modes); #else @@ -138,11 +245,44 @@ FullDiagnostics::Flush ( int i_buffer, bool /* force_flush */ ) // is supported for BackTransformed Diagnostics, in BTDiagnostics class. auto & warpx = WarpX::GetInstance(); - m_flush_format->WriteToFile( - m_varnames, m_mf_output.at(i_buffer), m_geom_output.at(i_buffer), warpx.getistep(), - warpx.gett_new(0), - m_output_species.at(i_buffer), nlev_output, m_file_prefix, - m_file_min_digits, m_plot_raw_fields, m_plot_raw_fields_guards); + // Get the time step on coarsest level. + const int step = warpx.getistep(0); + // For time-averaged diagnostics, we still write out an instantaneous diagnostic on step 0 + // to accommodate a user workflow that only uses that type of diagnostic. + // This allows for quicker turnaround in setup by avoiding having to set an additional instantaneous diagnostic. + if (m_diag_type == DiagTypes::TimeAveraged && step > 0) { + if (m_time_average_mode == TimeAverageType::Static || m_time_average_mode == TimeAverageType::Dynamic) { + // Loop over the output levels and divide by the number of steps in the averaging period + for (int lev = 0; lev < nlev_output; ++lev) { + m_sum_mf_output.at(i_buffer).at(lev).mult(1._rt/static_cast(m_average_period_steps)); + } + + m_flush_format->WriteToFile( + m_varnames, m_sum_mf_output.at(i_buffer), m_geom_output.at(i_buffer), warpx.getistep(), + warpx.gett_new(0), + m_output_species.at(i_buffer), nlev_output, m_file_prefix, + m_file_min_digits, m_plot_raw_fields, m_plot_raw_fields_guards); + + // Reset the values in the dynamic start time-averaged diagnostics after flush + if (m_time_average_mode == TimeAverageType::Dynamic) { + for (int lev = 0; lev < nlev_output; ++lev) { + m_sum_mf_output.at(i_buffer).at(lev).setVal(0.); + } + } + } + } else { + if (m_diag_type == DiagTypes::TimeAveraged && step == 0) { + // For both dynamic_start and fixed_start at step 0 we prepare an instantaneous output + amrex::Print() << Utils::TextMsg::Info("Time-averaged diagnostic " + m_diag_name + + " is preparing an instantaneous output during step " + std::to_string(step)); + } + + m_flush_format->WriteToFile( + m_varnames, m_mf_output.at(i_buffer), m_geom_output.at(i_buffer), warpx.getistep(), + warpx.gett_new(0), + m_output_species.at(i_buffer), nlev_output, m_file_prefix, + m_file_min_digits, m_plot_raw_fields, m_plot_raw_fields_guards); + } FlushRaw(); } @@ -165,9 +305,60 @@ FullDiagnostics::DoDump (int step, int /*i_buffer*/, bool force_flush) bool FullDiagnostics::DoComputeAndPack (int step, bool force_flush) { + // Start averaging at output step (from diag.intervals) - period + 1 + bool in_averaging_period = false; + if (m_diag_type == DiagTypes::TimeAveraged) { + + if (step > 0) { + + if (m_time_average_mode == TimeAverageType::Dynamic) { + m_average_start_step = m_intervals.nextContains(step) - m_average_period_steps; + // check that the periods do not overlap and that the start step is not negative + if (m_average_start_step > 0) { + // The start step cannot be on an interval step because then we would begin a new period and also output the old one + if (m_average_start_step < m_intervals.previousContains(step)) { + WARPX_ABORT_WITH_MESSAGE( + "Averaging periods may not overlap within a single diagnostic. " + "Please create a second diagnostic for overlapping time averaging periods " + "and account for the increased memory consumption." + ); + } + } else { + WARPX_ABORT_WITH_MESSAGE( + "The step to begin time averaging (" + + std::to_string(m_average_start_step) + + ") for diagnostic " + m_diag_name + " must be a positive number." + ); + } + + if (step >= m_average_start_step && step <= m_intervals.nextContains(step)) { + in_averaging_period = true; + + if (m_time_average_mode == TimeAverageType::Static) { + // Update time averaging period to current step + m_average_period_steps = step - m_average_start_step; + } + } + // Print information on when time-averaging is active + if (in_averaging_period) { + if (step == m_average_start_step) { + amrex::Print() << Utils::TextMsg::Info( + "Begin time averaging for " + m_diag_name + " and output at step " + + std::to_string(m_intervals.nextContains(step)) + ); + } else { + amrex::Print() + << Utils::TextMsg::Info( + "Time-averaging during this step for diagnostic: " + m_diag_name); + } + } + } + } + } // Data must be computed and packed for full diagnostics // whenever the data needs to be flushed. - return (force_flush || m_intervals.contains(step+1)); + return (force_flush || m_intervals.contains(step+1) || in_averaging_period); + } void @@ -600,6 +791,7 @@ FullDiagnostics::InitializeBufferData (int i_buffer, int lev, bool restart ) { diag_dom.setHi( idim, warpx.Geom(lev).ProbLo(idim) + (ba.getCellCenteredBox( static_cast(ba.size())-1 ).bigEnd(idim) + 1) * warpx.Geom(lev).CellSize(idim)); } + } WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -614,6 +806,13 @@ FullDiagnostics::InitializeBufferData (int i_buffer, int lev, bool restart ) { int const ncomp = static_cast(m_varnames.size()); m_mf_output[i_buffer][lev] = amrex::MultiFab(ba, dmap, ncomp, ngrow); + if (m_diag_type == DiagTypes::TimeAveraged) { + // Allocate MultiFab for cell-centered field output accumulation. The data will be averaged before flushing. + m_sum_mf_output[i_buffer][lev] = amrex::MultiFab(ba, dmap, ncomp, ngrow); + // Initialize to zero because we add data. + m_sum_mf_output[i_buffer][lev].setVal(0.); + } + if (lev == 0) { // The extent of the domain covered by the diag multifab, m_mf_output //default non-periodic geometry for diags diff --git a/Source/Diagnostics/MultiDiagnostics.H b/Source/Diagnostics/MultiDiagnostics.H index d220396ed12..a22e20b44da 100644 --- a/Source/Diagnostics/MultiDiagnostics.H +++ b/Source/Diagnostics/MultiDiagnostics.H @@ -11,8 +11,6 @@ #include #include -/** All types of diagnostics. */ -enum struct DiagTypes {Full, BackTransformed, BoundaryScraping}; /** * \brief This class contains a vector of all diagnostics in the simulation. diff --git a/Source/Diagnostics/MultiDiagnostics.cpp b/Source/Diagnostics/MultiDiagnostics.cpp index ea14919c713..2119ac276f9 100644 --- a/Source/Diagnostics/MultiDiagnostics.cpp +++ b/Source/Diagnostics/MultiDiagnostics.cpp @@ -21,12 +21,12 @@ MultiDiagnostics::MultiDiagnostics () */ alldiags.resize( ndiags ); for (int i=0; i(i, diags_names[i]); + if ( diags_types[i] == DiagTypes::Full || diags_types[i] == DiagTypes::TimeAveraged ){ + alldiags[i] = std::make_unique(i, diags_names[i], diags_types[i]); } else if ( diags_types[i] == DiagTypes::BackTransformed ){ - alldiags[i] = std::make_unique(i, diags_names[i]); + alldiags[i] = std::make_unique(i, diags_names[i], diags_types[i]); } else if ( diags_types[i] == DiagTypes::BoundaryScraping ){ - alldiags[i] = std::make_unique(i, diags_names[i]); + alldiags[i] = std::make_unique(i, diags_names[i], diags_types[i]); } else { WARPX_ABORT_WITH_MESSAGE("Unknown diagnostic type"); } @@ -68,9 +68,10 @@ MultiDiagnostics::ReadParameters () std::string diag_type_str; pp_diag_name.get("diag_type", diag_type_str); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - diag_type_str == "Full" || diag_type_str == "BackTransformed" || diag_type_str == "BoundaryScraping", - ".diag_type must be Full or BackTransformed or BoundaryScraping"); + diag_type_str == "Full" || diag_type_str == "TimeAveraged" || diag_type_str == "BackTransformed" || diag_type_str == "BoundaryScraping", + ".diag_type must be Full, TimeAveraged, BackTransformed or BoundaryScraping"); if (diag_type_str == "Full") { diags_types[i] = DiagTypes::Full; } + if (diag_type_str == "TimeAveraged") { diags_types[i] = DiagTypes::TimeAveraged; } if (diag_type_str == "BackTransformed") { diags_types[i] = DiagTypes::BackTransformed; } if (diag_type_str == "BoundaryScraping") { diags_types[i] = DiagTypes::BoundaryScraping; } } diff --git a/Source/Diagnostics/WarpXOpenPMD.cpp b/Source/Diagnostics/WarpXOpenPMD.cpp index 4e1b6238adf..e38ae8c8300 100644 --- a/Source/Diagnostics/WarpXOpenPMD.cpp +++ b/Source/Diagnostics/WarpXOpenPMD.cpp @@ -1195,6 +1195,8 @@ WarpXOpenPMDPlot::SetupFields ( openPMD::Container< openPMD::Mesh >& meshes, if (WarpX::do_dive_cleaning) { meshes.setAttribute("chargeCorrectionParameters", "period=1"); } + // TODO set meta-data information for time-averaged quantities + // but we need information of the specific diagnostic in here } From dda2dc43fb29cfa3f2c44597cfc8212d52e842ee Mon Sep 17 00:00:00 2001 From: Arianna Formenti Date: Mon, 28 Oct 2024 09:37:51 -0700 Subject: [PATCH 059/278] Docs: how to generate QED tables in beam-beam example (#5416) This PR adds details in the beam-beam collision example about how to generate the QED lookup tables. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../beam_beam_collision/README.rst | 38 ++++++++++++++++++- .../inputs_test_3d_beam_beam_collision | 6 +++ 2 files changed, 42 insertions(+), 2 deletions(-) diff --git a/Examples/Physics_applications/beam_beam_collision/README.rst b/Examples/Physics_applications/beam_beam_collision/README.rst index 28fdc1ee70e..d75d43c6d4d 100644 --- a/Examples/Physics_applications/beam_beam_collision/README.rst +++ b/Examples/Physics_applications/beam_beam_collision/README.rst @@ -30,6 +30,26 @@ For `MPI-parallel `__ runs, prefix these lines with ` :caption: You can copy this file from ``Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision``. +QED tables +---------- + +The quantum synchrotron and nonlinear Breit-Wheeler modules are based on a Monte Carlo algorithm that computes the probabilities of an event from tabulated values. +WarpX comes with `builtin` tables (see the input file above), however these are low resolution tables that may not provide accurate results. +There are two ways to generate your own lookup table: + +* Inside WarpX, at runtime: the tables are generated by WarpX itself at the beginning of the simulation. + This requires to compile WarpX with ``-DWarpX_QED_TABLE_GEN=ON`` and to add the desired tables parameters in WarpX's input file. + `Here `__ are more details. + +* Outside of WarpX, using an external table generator: the tables are pregenerated, before running the actual simulation. + This standalone tool can be compiled at the same time as WarpX using ``-DWarpX_QED_TOOLS=ON``. + The table parameters are then passed to the table generator and do not need to be added to WarpX's input file. + `Here `__ are more details. + +Once the tables have been generated, they can be loaded in the input file using +``qed_qs,bw.lookup_table_mode=load`` and ``qed_qs,bw.load_table_from=/path/to/your/table``. + + Visualize --------- @@ -42,13 +62,13 @@ We compare different results for the reduced diagnostics with the literature: The small-scale simulation has been performed with a resolution of ``nx = 64, ny = 64, nz = 64`` grid cells, while the large-scale one has a much higher resolution of ``nx = 512, ny = 512, nz = 1024``. Moreover, the large-scale simulation uses dedicated QED lookup tables instead of the builtin tables. -To generate the tables within WarpX, the code must be compiled with the flag ``-DWarpX_QED_TABLE_GEN=ON``. -For the large-scale simulation we have used the following options: +For the large-scale simulation we have used the following options (added to the input file): .. code-block:: ini qed_qs.lookup_table_mode = generate qed_bw.lookup_table_mode = generate + qed_qs.tab_dndt_chi_min=1e-3 qed_qs.tab_dndt_chi_max=2e3 qed_qs.tab_dndt_how_many=512 @@ -58,6 +78,7 @@ For the large-scale simulation we have used the following options: qed_qs.tab_em_frac_how_many=512 qed_qs.tab_em_frac_min=1e-12 qed_qs.save_table_in=my_qs_table.txt + qed_bw.tab_dndt_chi_min=1e-2 qed_bw.tab_dndt_chi_max=2e3 qed_bw.tab_dndt_how_many=512 @@ -68,6 +89,19 @@ For the large-scale simulation we have used the following options: qed_bw.save_table_in=my_bw_table.txt +The same table can be also obtained using the table generator with the following lines: + +.. code-block:: ini + + ./qed_table_generator --table QS --mode DP -o my_qs_table.txt \ + --dndt_chi_min 1e-3 --dndt_chi_max 2e3 --dndt_how_many 512 \ + --em_chi_min 1e-3 --em_chi_max 2e3 --em_frac_min 1e-12 --em_chi_how_many 512 --em_frac_how_many 512 + + + ./qed_table_generator --table BW --mode DP -o my_bw_table.txt \ + --dndt_chi_min 1e-2 --dndt_chi_max 2e3 --dndt_how_many 512 --pair_chi_min 1e-2 --pair_chi_max 2e3 --pair_chi_how_many 512 --pair_frac_how_many 512 + + .. figure:: https://gist.github.com/user-attachments/assets/2dd43782-d039-4faa-9d27-e3cf8fb17352 :alt: Beam-beam collision benchmark against :cite:t:`ex-Yakimenko2019`. :width: 100% diff --git a/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision b/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision index d0cf3cd7ebf..1f58f68ba69 100644 --- a/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision +++ b/Examples/Physics_applications/beam_beam_collision/inputs_test_3d_beam_beam_collision @@ -203,6 +203,12 @@ qed_bw.chi_min = 1.e-2 #qed_bw.tab_pair_frac_how_many=512 #qed_bw.save_table_in=my_bw_table.txt +# if you wish to use existing tables: +#qed_qs.lookup_table_mode=load +#qed_qs.load_table_from = /path/to/my_qs_table.txt +#qed_bw.lookup_table_mode=load +#qed_bw.load_table_from = /path/to/my_bw_table.txt + warpx.do_qed_schwinger = 0. ################################# From fcd5a09c2cc393f69cbf3e18bb943072a5a9fc3d Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 28 Oct 2024 14:08:11 -0700 Subject: [PATCH 060/278] CI: fix IntelLLVM builds (#5419) The CI checks `Intel / oneAPI ICX SP` and `Intel / oneAPI DPC++ SP` are failing since a few days. This is likely due to the fact that the GitHub Actions runner is now installing IntelLLVM 2025.0.0 instead of IntelLLVM 2024.2.1, as until a few days ago. This causes the following issue when building openPMD: ```console /home/runner/work/WarpX/WarpX/build_sp/_deps/fetchedopenpmd-src/include/openPMD/backend/Container.hpp:263:32: error: no member named 'm_container' in 'Container' 263 | container().swap(other.m_container); | ~~~~~ ^ 1 error generated. ``` We can try to install the previous version of IntelLLVM manually and see if that fixes the issue. --- .github/workflows/dependencies/dpcpp.sh | 5 ++++- .github/workflows/intel.yml | 2 ++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/dependencies/dpcpp.sh b/.github/workflows/dependencies/dpcpp.sh index 3b146405b4b..2ca89e03d3f 100755 --- a/.github/workflows/dependencies/dpcpp.sh +++ b/.github/workflows/dependencies/dpcpp.sh @@ -29,13 +29,16 @@ df -h # https://github.com/ECP-WarpX/WarpX/pull/1566#issuecomment-790934878 # try apt install up to five times, to avoid connection splits +# FIXME install latest version of IntelLLVM, Intel MKL +# after conflicts with openPMD are resolved status=1 for itry in {1..5} do sudo apt-get install -y --no-install-recommends \ build-essential \ cmake \ - intel-oneapi-compiler-dpcpp-cpp intel-oneapi-mkl-devel \ + intel-oneapi-compiler-dpcpp-cpp=2024.2.1-1079 \ + intel-oneapi-mkl-devel=2024.2.1-103 \ g++ gfortran \ libopenmpi-dev \ openmpi-bin \ diff --git a/.github/workflows/intel.yml b/.github/workflows/intel.yml index 170008d0672..9b98c6e5990 100644 --- a/.github/workflows/intel.yml +++ b/.github/workflows/intel.yml @@ -112,6 +112,7 @@ jobs: set +e source /opt/intel/oneapi/setvars.sh set -e + export PATH=$PATH:/opt/intel/oneapi/compiler/2024.2/bin # FIXME export CXX=$(which icpx) export CC=$(which icx) @@ -176,6 +177,7 @@ jobs: set +e source /opt/intel/oneapi/setvars.sh set -e + export PATH=$PATH:/opt/intel/oneapi/compiler/2024.2/bin # FIXME export CXX=$(which icpx) export CC=$(which icx) export CXXFLAGS="-fsycl ${CXXFLAGS}" From 026e093ddf5165a3465dadbdd20336a3bdc0afa8 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Tue, 29 Oct 2024 06:23:01 -0700 Subject: [PATCH 061/278] CTest: fix bug with `WarpX_APP=OFF` and `WarpX_PYTHON=ON` (#5421) Our `CMakeLists` to set up the `ctest` executable had a logic error when `WarpX_APP=OFF` and `WarpX_PYTHON=ON`, in that it was trying to install executable tests without an executable application. The error message looked something like ```console Error evaluating generator expression: $ No target "app_3d" ``` --- Examples/CMakeLists.txt | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Examples/CMakeLists.txt b/Examples/CMakeLists.txt index fe1da3d08e6..f36bcbb9973 100644 --- a/Examples/CMakeLists.txt +++ b/Examples/CMakeLists.txt @@ -93,6 +93,11 @@ function(add_warpx_test return() endif() + # cannot run executable tests w/o WarpX executable application + if(NOT python AND NOT WarpX_APP) + return() + endif() + # set MPI executable set(THIS_MPI_TEST_EXE ${MPIEXEC_EXECUTABLE} From e7641a21d85099131e6c0c112b6ee0880ebfa5ef Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Tue, 29 Oct 2024 16:55:22 +0100 Subject: [PATCH 062/278] Docs: update documentation for Adastra supercomputer (CINES) (#5423) This PR updates the instructions to compile WarpX on the Adastra supercomputer (CINES, France) --- Tools/machines/adastra-cines/adastra_warpx.profile.example | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/Tools/machines/adastra-cines/adastra_warpx.profile.example b/Tools/machines/adastra-cines/adastra_warpx.profile.example index 3cba4346421..8aaff6e4450 100644 --- a/Tools/machines/adastra-cines/adastra_warpx.profile.example +++ b/Tools/machines/adastra-cines/adastra_warpx.profile.example @@ -8,7 +8,9 @@ module load cpe/23.12 module load craype-accel-amd-gfx90a craype-x86-trento module load PrgEnv-cray module load CCE-GPU-3.0.0 -module load amd-mixed/5.2.3 +module load amd-mixed/5.7.1 +module load develop +module load cmake/3.27.9 # optional: for PSATD in RZ geometry support export CMAKE_PREFIX_PATH=${SHAREDHOMEDIR}/sw/adastra/gpu/blaspp-2024.05.31:$CMAKE_PREFIX_PATH From c19d0c2c22bc127b7b1be3f0d1c61c93256258f1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 29 Oct 2024 17:49:43 +0000 Subject: [PATCH 063/278] [pre-commit.ci] pre-commit autoupdate (#5420) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.0 → v0.7.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.0...v0.7.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 29f612a3ef6..27065ac5ca3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.0 + rev: v0.7.1 hooks: # Run the linter - id: ruff From bf905144acde28548a89ed1d415ace70e4d7d008 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Tue, 29 Oct 2024 10:50:14 -0700 Subject: [PATCH 064/278] AMReX/pyAMReX/PICSAR: weekly update (#5418) - Weekly update to latest AMReX: ```console ./Tools/Release/updateAMReX.py ``` - Weekly update to latest pyAMReX: ```console ./Tools/Release/updatepyAMReX.py ``` - Weekly update to latest PICSAR (no changes): ```console ./Tools/Release/updatePICSAR.py ``` --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- cmake/dependencies/pyAMReX.cmake | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 1f70e7128bd..1a89f6668d5 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -137,7 +137,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 62c2a81eac7862d526e5861ef2befc00b7f5b759 && cd - + cd ../amrex && git checkout --detach 92679babfc2cc66ca06ee591a80001db57c89878 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 2c4976777e2..9854cbb0800 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -283,7 +283,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "62c2a81eac7862d526e5861ef2befc00b7f5b759" +set(WarpX_amrex_branch "92679babfc2cc66ca06ee591a80001db57c89878" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index 48dbebcc5c6..3236851d392 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "d96b4948cc5812be82dbff1df5d62927c866ae07" +set(WarpX_pyamrex_branch "1aa1db34a0d1bdc084bd6069a4fd97b26266af5c" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") From f8b3270f88cca922305e3882f6787a934c630c6b Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Tue, 29 Oct 2024 12:34:32 -0700 Subject: [PATCH 065/278] Add PICMI interface for injecting from embedded boundary (#5395) This adds the option to inject particles from the embedded boundary with PICMI. --- Python/pywarpx/picmi.py | 28 +++++++++++++++++++++------- 1 file changed, 21 insertions(+), 7 deletions(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index c7a27f62df0..08c71ed02de 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -691,6 +691,11 @@ def setup_parse_momentum_functions( class UniformFluxDistribution( picmistandard.PICMI_UniformFluxDistribution, DensityDistributionBase ): + def init(self, kw): + self.inject_from_embedded_boundary = kw.pop( + "warpx_inject_from_embedded_boundary", False + ) + def distribution_initialize_inputs( self, species_number, layout, species, density_scale, source_name ): @@ -702,13 +707,22 @@ def distribution_initialize_inputs( species.add_new_group_attr(source_name, "flux", self.flux) if density_scale is not None: species.add_new_group_attr(source_name, "flux", density_scale) - species.add_new_group_attr( - source_name, "flux_normal_axis", self.flux_normal_axis - ) - species.add_new_group_attr( - source_name, "surface_flux_pos", self.surface_flux_position - ) - species.add_new_group_attr(source_name, "flux_direction", self.flux_direction) + + if not self.inject_from_embedded_boundary: + species.add_new_group_attr( + source_name, "flux_normal_axis", self.flux_normal_axis + ) + species.add_new_group_attr( + source_name, "surface_flux_pos", self.surface_flux_position + ) + species.add_new_group_attr( + source_name, "flux_direction", self.flux_direction + ) + else: + species.add_new_group_attr( + source_name, "inject_from_embedded_boundary", True + ) + species.add_new_group_attr(source_name, "flux_tmin", self.flux_tmin) species.add_new_group_attr(source_name, "flux_tmax", self.flux_tmax) From 057d403b1acee6513e7c0b49166b5643e5a3f9c5 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Thu, 31 Oct 2024 09:19:02 -0700 Subject: [PATCH 066/278] Fix bug where tmax was ignored in flux injection (#5430) There was a bug where WarpX would only read `flux_tmin`, `flux_tmax` for the injection from a plane, but not for the injection from the EB. This PR fixes the bug, and uses `tmin`/`tmax` in the CI test for the EB injection. --- .../analysis_flux_injection_from_eb.py | 4 ++-- Examples/Tests/flux_injection/inputs_base_from_eb | 2 ++ .../test_2d_flux_injection_from_eb.json | 12 ++++++------ .../test_3d_flux_injection_from_eb.json | 14 +++++++------- .../test_rz_flux_injection_from_eb.json | 14 +++++++------- Source/Initialization/PlasmaInjector.cpp | 5 +++-- 6 files changed, 27 insertions(+), 24 deletions(-) diff --git a/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py b/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py index 36ff50bea06..c9e1c6df42c 100755 --- a/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py +++ b/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py @@ -32,7 +32,7 @@ fn = sys.argv[1] ds = yt.load(fn) ad = ds.all_data() -t_max = ds.current_time.item() # time of simulation +t_inj = 0.5e-8 # duration for which the flux injection was active # Extract the dimensionality of the simulation with open("./warpx_used_inputs", "r") as f: @@ -52,7 +52,7 @@ emission_surface = 4 * np.pi * R**2 # in m^2 elif dims == "2D": emission_surface = 2 * np.pi * R # in m -Ntot = flux * emission_surface * t_max +Ntot = flux * emission_surface * t_inj # Parameters of the histogram hist_bins = 50 diff --git a/Examples/Tests/flux_injection/inputs_base_from_eb b/Examples/Tests/flux_injection/inputs_base_from_eb index 3e32d8799b6..87b9c32592b 100644 --- a/Examples/Tests/flux_injection/inputs_base_from_eb +++ b/Examples/Tests/flux_injection/inputs_base_from_eb @@ -29,6 +29,8 @@ electron.inject_from_embedded_boundary = 1 electron.num_particles_per_cell = 100 electron.flux_profile = parse_flux_function electron.flux_function(x,y,z,t) = "1." +electron.flux_tmin = 0.25e-8 +electron.flux_tmax = 0.75e-8 electron.momentum_distribution_type = gaussianflux electron.ux_th = 0.01 electron.uy_th = 0.01 diff --git a/Regression/Checksum/benchmarks_json/test_2d_flux_injection_from_eb.json b/Regression/Checksum/benchmarks_json/test_2d_flux_injection_from_eb.json index dd489f16e05..da993c9ef4b 100644 --- a/Regression/Checksum/benchmarks_json/test_2d_flux_injection_from_eb.json +++ b/Regression/Checksum/benchmarks_json/test_2d_flux_injection_from_eb.json @@ -1,11 +1,11 @@ { "lev=0": {}, "electron": { - "particle_momentum_x": 6.990772711451971e-19, - "particle_momentum_y": 5.4131306169803364e-20, - "particle_momentum_z": 6.997294931789925e-19, - "particle_position_x": 35518.95120597846, - "particle_position_y": 35517.855675902414, - "particle_weight": 1.25355e-07 + "particle_momentum_x": 3.4911323396038835e-19, + "particle_momentum_y": 2.680312173420972e-20, + "particle_momentum_z": 3.4918430443688734e-19, + "particle_position_x": 17950.08139982036, + "particle_position_y": 17949.47183079554, + "particle_weight": 6.269e-08 } } diff --git a/Regression/Checksum/benchmarks_json/test_3d_flux_injection_from_eb.json b/Regression/Checksum/benchmarks_json/test_3d_flux_injection_from_eb.json index e947a8af07b..15b6c7b602c 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_flux_injection_from_eb.json +++ b/Regression/Checksum/benchmarks_json/test_3d_flux_injection_from_eb.json @@ -1,12 +1,12 @@ { "lev=0": {}, "electron": { - "particle_momentum_x": 4.371688233196277e-18, - "particle_momentum_y": 4.368885079657374e-18, - "particle_momentum_z": 4.367429424105371e-18, - "particle_position_x": 219746.94401890738, - "particle_position_y": 219690.7015248918, - "particle_position_z": 219689.45580938633, - "particle_weight": 4.954974999999999e-07 + "particle_momentum_x": 2.1855512033870577e-18, + "particle_momentum_y": 2.1826030840183147e-18, + "particle_momentum_z": 2.181852403122796e-18, + "particle_position_x": 111042.81925863726, + "particle_position_y": 111012.52928910403, + "particle_position_z": 111015.90903542604, + "particle_weight": 2.4775750000000003e-07 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_flux_injection_from_eb.json b/Regression/Checksum/benchmarks_json/test_rz_flux_injection_from_eb.json index 23884de9725..fb7142afed0 100644 --- a/Regression/Checksum/benchmarks_json/test_rz_flux_injection_from_eb.json +++ b/Regression/Checksum/benchmarks_json/test_rz_flux_injection_from_eb.json @@ -1,12 +1,12 @@ { "lev=0": {}, "electron": { - "particle_momentum_x": 6.734984863106283e-19, - "particle_momentum_y": 6.786279785869023e-19, - "particle_momentum_z": 1.0527983828124758e-18, - "particle_position_x": 53309.270966506396, - "particle_position_y": 53302.3776094842, - "particle_theta": 58707.74469425615, - "particle_weight": 4.991396867417661e-07 + "particle_momentum_x": 3.3665608248716305e-19, + "particle_momentum_y": 3.392690322852239e-19, + "particle_momentum_z": 5.254577143779578e-19, + "particle_position_x": 26933.772112044953, + "particle_position_y": 26926.994273876346, + "particle_theta": 29492.77423173835, + "particle_weight": 2.4953304765944705e-07 } } \ No newline at end of file diff --git a/Source/Initialization/PlasmaInjector.cpp b/Source/Initialization/PlasmaInjector.cpp index 76bb7a5be42..468d9e7e336 100644 --- a/Source/Initialization/PlasmaInjector.cpp +++ b/Source/Initialization/PlasmaInjector.cpp @@ -305,6 +305,9 @@ void PlasmaInjector::setupNFluxPerCell (amrex::ParmParse const& pp_species) } #endif + utils::parser::queryWithParser(pp_species, source_name, "flux_tmin", flux_tmin); + utils::parser::queryWithParser(pp_species, source_name, "flux_tmax", flux_tmax); + // Check whether injection from the embedded boundary is requested utils::parser::queryWithParser(pp_species, source_name, "inject_from_embedded_boundary", m_inject_from_eb); if (m_inject_from_eb) { @@ -318,8 +321,6 @@ void PlasmaInjector::setupNFluxPerCell (amrex::ParmParse const& pp_species) // Parse the parameters of the plane (position, normal direction, etc.) utils::parser::getWithParser(pp_species, source_name, "surface_flux_pos", surface_flux_pos); - utils::parser::queryWithParser(pp_species, source_name, "flux_tmin", flux_tmin); - utils::parser::queryWithParser(pp_species, source_name, "flux_tmax", flux_tmax); std::string flux_normal_axis_string; utils::parser::get(pp_species, source_name, "flux_normal_axis", flux_normal_axis_string); flux_normal_axis = -1; From 20a79544daeaf2495fc4d2f37f85a29db32a4ee6 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Fri, 1 Nov 2024 12:21:25 -0700 Subject: [PATCH 067/278] Rigid injection: Center field scaling around the v push (#5389) In the rigid injection, the fields where scale by the fraction of time spent between `n*dt` and `(n+1)*dt` to the right of the injection plane. However, to be consistent with the leap-frog velocity update, this needs to be between `(n-1/2)*dt` and `(n+1/2)*dt` instead. As a side-effect of this PR, saving and re-setting `u` and `optical_depth` to their original value is not needed anymore since the scaling factor for E and B is 0 for particles to the left of the plane. --- .../Checksum/benchmarks_json/test_1d_fel.json | 18 +++--- .../test_2d_comoving_psatd_hybrid.json | 56 +++++++++---------- .../test_2d_galilean_psatd_hybrid.json | 56 +++++++++---------- .../test_2d_laser_acceleration_boosted.json | 52 ++++++++--------- Source/Particles/Gather/ScaleFields.H | 17 +++++- .../RigidInjectedParticleContainer.cpp | 44 --------------- 6 files changed, 106 insertions(+), 137 deletions(-) diff --git a/Regression/Checksum/benchmarks_json/test_1d_fel.json b/Regression/Checksum/benchmarks_json/test_1d_fel.json index 2bd9c1fad80..ffcc97fa057 100644 --- a/Regression/Checksum/benchmarks_json/test_1d_fel.json +++ b/Regression/Checksum/benchmarks_json/test_1d_fel.json @@ -1,31 +1,31 @@ { "lev=0": { "Bx": 0.0, - "By": 514.5044890273722, + "By": 473.98537926589177, "Bz": 0.0, - "Ex": 154245109024.33972, + "Ex": 142097845843.78326, "Ey": 0.0, "Ez": 0.0, - "jx": 1161126105.5594487, + "jx": 1260205974.7220135, "jy": 0.0, "jz": 0.0 }, "electrons": { "particle_position_x": 0.0, "particle_position_y": 0.0, - "particle_position_z": 13607.569953355982, - "particle_momentum_x": 3.095483353687591e-19, + "particle_position_z": 13607.572916093213, + "particle_momentum_x": 3.2646797960476606e-19, "particle_momentum_y": 0.0, - "particle_momentum_z": 1.5419514460764825e-16, + "particle_momentum_z": 1.541338620507345e-16, "particle_weight": 1349823909946836.0 }, "positrons": { "particle_position_x": 0.0, "particle_position_y": 0.0, - "particle_position_z": 13607.569953355982, - "particle_momentum_x": 3.095483353687591e-19, + "particle_position_z": 13607.572916093213, + "particle_momentum_x": 3.2646797960476606e-19, "particle_momentum_y": 0.0, - "particle_momentum_z": 1.5419514460764825e-16, + "particle_momentum_z": 1.541338620507345e-16, "particle_weight": 1349823909946836.0 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_2d_comoving_psatd_hybrid.json b/Regression/Checksum/benchmarks_json/test_2d_comoving_psatd_hybrid.json index 8b03899369b..930d48ed713 100644 --- a/Regression/Checksum/benchmarks_json/test_2d_comoving_psatd_hybrid.json +++ b/Regression/Checksum/benchmarks_json/test_2d_comoving_psatd_hybrid.json @@ -1,38 +1,38 @@ { "lev=0": { - "Bx": 1118808.3686978193, - "By": 3248970.5506422943, - "Bz": 280612.7921641442, - "Ex": 975536649649286.1, - "Ey": 402861835403418.1, - "Ez": 159049265640492.28, - "jx": 2.9996888133195436e+16, - "jy": 8.866654944519546e+16, - "jz": 3.164008885453435e+17, - "rho": 1059988299.6088305 - }, - "ions": { - "particle_momentum_x": 1.6150513873065298e-18, - "particle_momentum_y": 2.233426695677123e-18, - "particle_momentum_z": 4.279249529993671e-13, - "particle_position_x": 1.4883816864183497, - "particle_position_y": 16.452386504127254, - "particle_weight": 1.234867369440658e+18 + "Bx": 1118808.3708538802, + "By": 3248949.0437452313, + "Bz": 280612.7768961371, + "Ex": 975530336896144.1, + "Ey": 402861838033488.6, + "Ez": 159049784131625.12, + "jx": 2.9997142632475216e+16, + "jy": 8.866655055001146e+16, + "jz": 3.163953981093208e+17, + "rho": 1059970922.1974506 }, "electrons": { - "particle_momentum_x": 7.058167362825288e-19, - "particle_momentum_y": 2.204239326446281e-18, - "particle_momentum_z": 2.530521998715408e-16, - "particle_position_x": 1.5006581263609764, - "particle_position_y": 16.454388313398017, + "particle_momentum_x": 7.058252826278211e-19, + "particle_momentum_y": 2.204239315713169e-18, + "particle_momentum_z": 2.530521235191952e-16, + "particle_position_x": 1.5006579649318788, + "particle_position_y": 16.454388304724286, "particle_weight": 1.234867020725368e+18 }, "beam": { - "particle_momentum_x": 6.869222298759882e-19, - "particle_momentum_y": 4.374719809060106e-19, - "particle_momentum_z": 6.4523206583503136e-18, - "particle_position_x": 0.001290816359726098, - "particle_position_y": 0.3586691102823157, + "particle_momentum_x": 6.88879318082965e-19, + "particle_momentum_y": 4.37466174746362e-19, + "particle_momentum_z": 6.4299296650127095e-18, + "particle_position_x": 0.0012936414423443238, + "particle_position_y": 0.3587414953163842, "particle_weight": 3120754537230.3823 + }, + "ions": { + "particle_momentum_x": 1.6150618501530563e-18, + "particle_momentum_y": 2.2334266731098355e-18, + "particle_momentum_z": 4.279249530957972e-13, + "particle_position_x": 1.488381686539698, + "particle_position_y": 16.4523865041322, + "particle_weight": 1.234867369440658e+18 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_2d_galilean_psatd_hybrid.json b/Regression/Checksum/benchmarks_json/test_2d_galilean_psatd_hybrid.json index dd56f8170a9..a163d063134 100644 --- a/Regression/Checksum/benchmarks_json/test_2d_galilean_psatd_hybrid.json +++ b/Regression/Checksum/benchmarks_json/test_2d_galilean_psatd_hybrid.json @@ -1,38 +1,38 @@ { "lev=0": { - "Bx": 1086729.9718613266, - "By": 2886554.482275311, - "Bz": 264259.55093734514, - "Ex": 867387781289915.2, - "Ey": 392666724461952.5, - "Ez": 146897592531660.03, - "jx": 2.702866174672266e+16, - "jy": 8.615938361747776e+16, - "jz": 2.7329155817806224e+17, - "rho": 915945723.7934376 + "Bx": 1086729.9879225595, + "By": 2886531.8361456757, + "Bz": 264259.55266959703, + "Ex": 867381192933999.0, + "Ey": 392666738858258.7, + "Ez": 146898030091111.84, + "jx": 2.702892158065604e+16, + "jy": 8.615938867870698e+16, + "jz": 2.7328506574305683e+17, + "rho": 915924567.6956444 + }, + "beam": { + "particle_momentum_x": 7.006049777955171e-19, + "particle_momentum_y": 4.374916846096741e-19, + "particle_momentum_z": 6.173292885825711e-18, + "particle_position_x": 0.0016046573777589298, + "particle_position_y": 0.35899824939059793, + "particle_weight": 3120754537230.3823 }, "ions": { - "particle_momentum_x": 1.4394902513923003e-18, - "particle_momentum_y": 1.5967629157922875e-18, - "particle_momentum_z": 4.287340658051679e-13, - "particle_position_x": 1.4911814217142487, - "particle_position_y": 16.521964978771, + "particle_momentum_x": 1.4395010524514718e-18, + "particle_momentum_y": 1.596762923413923e-18, + "particle_momentum_z": 4.2873406589510426e-13, + "particle_position_x": 1.4911814218338595, + "particle_position_y": 16.52196497877563, "particle_weight": 1.2372405194129536e+18 }, "electrons": { - "particle_momentum_x": 6.240933687389075e-19, - "particle_momentum_y": 1.5790611427694247e-18, - "particle_momentum_z": 2.5064357834741096e-16, - "particle_position_x": 1.501413766926399, - "particle_position_y": 16.523781713952324, + "particle_momentum_x": 6.241019323257125e-19, + "particle_momentum_y": 1.5790611706036782e-18, + "particle_momentum_z": 2.5064350576384073e-16, + "particle_position_x": 1.501413593465263, + "particle_position_y": 16.52378170448397, "particle_weight": 1.2372401466086835e+18 - }, - "beam": { - "particle_momentum_x": 7.000932845220306e-19, - "particle_momentum_y": 4.374936866729326e-19, - "particle_momentum_z": 6.194468548032543e-18, - "particle_position_x": 0.0016030835496557787, - "particle_position_y": 0.3589262705964349, - "particle_weight": 3120754537230.3823 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_2d_laser_acceleration_boosted.json b/Regression/Checksum/benchmarks_json/test_2d_laser_acceleration_boosted.json index dd59536ad37..b8592e87c82 100644 --- a/Regression/Checksum/benchmarks_json/test_2d_laser_acceleration_boosted.json +++ b/Regression/Checksum/benchmarks_json/test_2d_laser_acceleration_boosted.json @@ -1,38 +1,38 @@ { "lev=0": { - "Bx": 4818955.480797943, - "By": 1752.8025638791275, - "Bz": 14516.212782554387, - "Ex": 2366115503598.9224, - "Ey": 1446112025635674.2, - "Ez": 21864189507357.867, - "jx": 1996366349775593.5, - "jy": 5.312583827155926e+16, - "jz": 2.0491352624508764e+16, - "rho": 68443961.71852128 + "Bx": 4818955.485307051, + "By": 1752.8020185365554, + "Bz": 14516.212849649737, + "Ex": 2366115529014.2324, + "Ey": 1446112026998942.5, + "Ez": 21864189485739.55, + "jx": 1996366372981548.5, + "jy": 5.312583836344946e+16, + "jz": 2.049135259966133e+16, + "rho": 68443961.64027263 + }, + "beam": { + "particle_momentum_x": 3.535736052190267e-19, + "particle_momentum_y": 4.363217976210739e-19, + "particle_momentum_z": 5.658515465395611e-17, + "particle_position_x": 0.008314855161869274, + "particle_position_y": 1.170433573157185, + "particle_weight": 62415090744.60765 }, "electrons": { - "particle_momentum_x": 2.2135945391319113e-23, - "particle_momentum_y": 2.8224559499558413e-22, - "particle_momentum_z": 5.260626010214114e-22, - "particle_position_x": 0.010800577787628052, - "particle_position_y": 0.2111506062831815, + "particle_momentum_x": 2.213594541883545e-23, + "particle_momentum_y": 2.8224559261549207e-22, + "particle_momentum_z": 5.260626007410037e-22, + "particle_position_x": 0.010800577787636243, + "particle_position_y": 0.2111506062831794, "particle_weight": 4.121554826246186e+16 }, "ions": { - "particle_momentum_x": 6.248472277246885e-23, - "particle_momentum_y": 4.449097689427654e-22, - "particle_momentum_z": 5.768168724998047e-22, + "particle_momentum_x": 6.24847229412907e-23, + "particle_momentum_y": 4.449097671673176e-22, + "particle_momentum_z": 5.768168722032957e-22, "particle_position_x": 0.010800001678510512, "particle_position_y": 0.21114947608115425, "particle_weight": 4.121554826246186e+16 - }, - "beam": { - "particle_momentum_x": 3.5357456351701565e-19, - "particle_momentum_y": 4.363391839372122e-19, - "particle_momentum_z": 5.658606416951653e-17, - "particle_position_x": 0.008314723025211468, - "particle_position_y": 1.1704335743854242, - "particle_weight": 62415090744.60765 } } \ No newline at end of file diff --git a/Source/Particles/Gather/ScaleFields.H b/Source/Particles/Gather/ScaleFields.H index 5731bc047f4..a8c685eef8b 100644 --- a/Source/Particles/Gather/ScaleFields.H +++ b/Source/Particles/Gather/ScaleFields.H @@ -47,8 +47,21 @@ struct ScaleFields // This only approximates what should be happening. The particles // should by advanced a fraction of a time step instead. // Scaling the fields is much easier and may be good enough. - const amrex::Real dtscale = 1._rt - (m_z_plane_previous - zp)/(m_vz_ave_boosted + m_v_boost)/m_dt; - if (0._rt < dtscale && dtscale < 1._rt) + + // The scaling factor corresponds to the fraction of time that + // the particles spends to the right of the injection plane, + // between (n-1/2)*dt and (n+1/2)*dt, which is the interval + // over which the velocity is updated, in the leap-frog velocity push. + // (Note that here, `zp` is the particle position at time n*dt) + amrex::Real dtscale = 0.5_rt - (m_z_plane_previous - zp)/(m_vz_ave_boosted + m_v_boost)/m_dt; + // If the particle stays to the left of the plane during the + // whole push, simply set the scaling factor to 0, and thus + // the velocity push leaves the velocity unchanged. + if (dtscale < 0._rt) { + dtscale = 0; + } + // Scale the fields. + if (dtscale < 1._rt) { Exp *= dtscale; Eyp *= dtscale; diff --git a/Source/Particles/RigidInjectedParticleContainer.cpp b/Source/Particles/RigidInjectedParticleContainer.cpp index d1e1f48ab38..5d8b0111825 100644 --- a/Source/Particles/RigidInjectedParticleContainer.cpp +++ b/Source/Particles/RigidInjectedParticleContainer.cpp @@ -176,10 +176,6 @@ RigidInjectedParticleContainer::PushPX (WarpXParIter& pti, // Save the position, momentum and optical depth, making copies amrex::Gpu::DeviceVector xp_save, yp_save, zp_save; - amrex::Gpu::DeviceVector uxp_save, uyp_save, uzp_save; -#ifdef WARPX_QED - amrex::Gpu::DeviceVector optical_depth_save; -#endif const auto GetPosition = GetParticlePosition(pti, offset); auto SetPosition = SetParticlePosition(pti, offset); @@ -188,12 +184,6 @@ RigidInjectedParticleContainer::PushPX (WarpXParIter& pti, amrex::ParticleReal* const AMREX_RESTRICT uy = uyp.dataPtr() + offset; amrex::ParticleReal* const AMREX_RESTRICT uz = uzp.dataPtr() + offset; -#ifdef WARPX_QED - const bool loc_has_quantum_sync = has_quantum_sync(); - amrex::ParticleReal* AMREX_RESTRICT p_optical_depth = nullptr; - amrex::ParticleReal* AMREX_RESTRICT p_optical_depth_save = nullptr; -#endif - if (!done_injecting_lev) { // If the old values are not already saved, create copies here. @@ -201,27 +191,10 @@ RigidInjectedParticleContainer::PushPX (WarpXParIter& pti, yp_save.resize(np_to_push); zp_save.resize(np_to_push); - uxp_save.resize(np_to_push); - uyp_save.resize(np_to_push); - uzp_save.resize(np_to_push); - amrex::ParticleReal* const AMREX_RESTRICT xp_save_ptr = xp_save.dataPtr(); amrex::ParticleReal* const AMREX_RESTRICT yp_save_ptr = yp_save.dataPtr(); amrex::ParticleReal* const AMREX_RESTRICT zp_save_ptr = zp_save.dataPtr(); - amrex::ParticleReal* const AMREX_RESTRICT uxp_save_ptr = uxp_save.dataPtr(); - amrex::ParticleReal* const AMREX_RESTRICT uyp_save_ptr = uyp_save.dataPtr(); - amrex::ParticleReal* const AMREX_RESTRICT uzp_save_ptr = uzp_save.dataPtr(); - -#ifdef WARPX_QED - if(loc_has_quantum_sync){ - p_optical_depth = pti.GetAttribs(particle_comps["opticalDepthQSR"]).dataPtr() - + offset; - optical_depth_save.resize(np_to_push); - p_optical_depth_save = optical_depth_save.dataPtr(); - } -#endif - amrex::ParallelFor( np_to_push, [=] AMREX_GPU_DEVICE (long i) { amrex::ParticleReal xp, yp, zp; @@ -229,13 +202,6 @@ RigidInjectedParticleContainer::PushPX (WarpXParIter& pti, xp_save_ptr[i] = xp; yp_save_ptr[i] = yp; zp_save_ptr[i] = zp; - uxp_save_ptr[i] = ux[i]; - uyp_save_ptr[i] = uy[i]; - uzp_save_ptr[i] = uz[i]; -#ifdef WARPX_QED - if(loc_has_quantum_sync){ - p_optical_depth_save[i] = p_optical_depth[i];} -#endif }); } @@ -252,9 +218,6 @@ RigidInjectedParticleContainer::PushPX (WarpXParIter& pti, amrex::ParticleReal* AMREX_RESTRICT x_save = xp_save.dataPtr(); amrex::ParticleReal* AMREX_RESTRICT y_save = yp_save.dataPtr(); amrex::ParticleReal* AMREX_RESTRICT z_save = zp_save.dataPtr(); - amrex::ParticleReal* AMREX_RESTRICT ux_save = uxp_save.dataPtr(); - amrex::ParticleReal* AMREX_RESTRICT uy_save = uyp_save.dataPtr(); - amrex::ParticleReal* AMREX_RESTRICT uz_save = uzp_save.dataPtr(); // Undo the push for particles not injected yet. // The zp are advanced a fixed amount. @@ -267,9 +230,6 @@ RigidInjectedParticleContainer::PushPX (WarpXParIter& pti, amrex::ParticleReal xp, yp, zp; GetPosition(i, xp, yp, zp); if (zp <= z_plane_lev) { - ux[i] = ux_save[i]; - uy[i] = uy_save[i]; - uz[i] = uz_save[i]; xp = x_save[i]; yp = y_save[i]; if (rigid) { @@ -281,10 +241,6 @@ RigidInjectedParticleContainer::PushPX (WarpXParIter& pti, zp = z_save[i] + dt*uz[i]*gi; } SetPosition(i, xp, yp, zp); -#ifdef WARPX_QED - if(loc_has_quantum_sync){ - p_optical_depth[i] = p_optical_depth_save[i];} -#endif } }); } From 548a890e1995cf09b813bc9aff29f6e83bc68bfc Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 1 Nov 2024 16:46:35 -0700 Subject: [PATCH 068/278] Create Issue Templates (#5278) Create issue templates for: - [x] bugs - [x] installation issues - [x] feature requests - [x] blank - [ ] usage question -> link to [Discussions](https://github.com/ECP-WarpX/WarpX/discussions) --------- Co-authored-by: Edoardo Zoni Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- .github/ISSUE_TEMPLATE/bug_report.md | 58 ++++++++++++++++++++ .github/ISSUE_TEMPLATE/config.yml | 1 + .github/ISSUE_TEMPLATE/feature_request.md | 27 +++++++++ .github/ISSUE_TEMPLATE/installation-issue.md | 49 +++++++++++++++++ 4 files changed, 135 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md create mode 100644 .github/ISSUE_TEMPLATE/config.yml create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md create mode 100644 .github/ISSUE_TEMPLATE/installation-issue.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000000..a545067b6d2 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,58 @@ +--- +name: Bug report +about: Report a bug or unexpected behavior. +labels: [bug] +--- + +_Please remove any sensitive information (e.g., passwords, API keys) from your submission. +Please check the relevant boxes and fill in the specific versions or details for the relevant items. +Thank you for taking the time to report this issue. We will respond as soon as possible._ + +## Description +A clear and concise description of the bug. + +## Expected behavior +What did you expect to happen when you encountered the issue? + +## How to reproduce +Please provide (if available): +- WarpX inputs files +- PICMI Python files +- Python post-processing scripts + +If you are unable to provide certain files or scripts, please describe the steps you took to encounter the issue. + +Please minimize your inputs/scripts to be concise and focused on the issue. +For instance, make the simulation scripts as small and fast to run as possible. + +## System information +Please check all relevant boxes and provide details. + +- Operating system (name and version): + - [ ] Linux: e.g., Ubuntu 22.04 LTS + - [ ] macOS: e.g., macOS Monterey 12.4 + - [ ] Windows: e.g., Windows 11 Pro +- Version of WarpX: e.g., latest, 24.10, etc. +- Installation method: + - [ ] Conda + - [ ] Spack + - [ ] PyPI + - [ ] Brew + - [ ] From source with CMake + - [ ] Module system on an HPC cluster +- Other dependencies: yes/no, describe +- Computational resources: + - [ ] MPI: e.g., 2 MPI processes + - [ ] OpenMP: e.g., 2 OpenMP threads + - [ ] CPU: e.g., 2 CPUs + - [ ] GPU: e.g., 2 GPUs (NVIDIA, AMD, etc.) + +If you encountered the issue on an HPC cluster, please check our [HPC documentation](https://warpx.readthedocs.io/en/latest/install/hpc.html) to see if your HPC cluster is already supported. + +## Steps taken so far +What troubleshooting steps have you taken so far, and what were the results? + +Have you tried compiling and running in debug mode, following the instructions in our [debugging documentation](https://warpx.readthedocs.io/en/latest/usage/workflows/debugging.html)? + +## Additional information +If applicable, please add any additional information that may help explain the issue, such as log files (e.g., build logs, error logs, etc.), error messages (e.g., compiler errors, runtime errors, etc.), screenshots, or other relevant details. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000000..0086358db1e --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: true diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000000..8e4630cc098 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,27 @@ +--- +name: Feature request +about: Suggest a new feature or enhancement. +labels: [enhancement] +--- + +_Please remove any sensitive information (e.g., passwords, API keys) from your submission. +Please check the relevant boxes and fill in the specific versions or details for the relevant items. +Thank you for taking the time to report this issue. We will respond as soon as possible._ + +## Context and motivation +Please provide a clear and concise description of the context that is prompting you to request a new feature. What problem are you trying to solve, and how will this feature help you achieve your goals? + +## Proposed feature +Describe the feature you would like to add to WarpX in detail. Please include: +- A clear and concise description of the feature +- Any relevant technical requirements or specifications +- How you envision the feature being used + +## Alternative solutions +Have you considered any alternative solutions or features that could achieve the same goal? If so, please describe them and explain why you believe the proposed feature is the best solution. + +## Additional information +If applicable, please provide any additional information that may be relevant to the feature request, such as: +- Links to existing codes or implementations +- References to relevant publications or research +- Any specific use cases or scenarios where the feature would be particularly useful diff --git a/.github/ISSUE_TEMPLATE/installation-issue.md b/.github/ISSUE_TEMPLATE/installation-issue.md new file mode 100644 index 00000000000..93ad0f1a5d8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/installation-issue.md @@ -0,0 +1,49 @@ +--- +name: Installation issue +about: Report an issue with installing or setting up WarpX +labels: [install] +--- + +_Please remove any sensitive information (e.g., passwords, API keys) from your submission. +Please check the relevant boxes and fill in the specific versions or details for the relevant items. +Thank you for taking the time to report this issue. We will respond as soon as possible._ + +## Description +A clear and concise description of the issue. + +## System information +- Operating system (name and version): + - [ ] Linux: e.g., Ubuntu 22.04 LTS + - [ ] macOS: e.g., macOS Monterey 12.4 + - [ ] Windows: e.g., Windows 11 Pro +- Version of WarpX: e.g., latest, 24.10, etc. +- Installation method: + - [ ] Conda + - [ ] Spack + - [ ] PyPI + - [ ] Brew + - [ ] From source with CMake + - [ ] Module system on an HPC cluster +- Other dependencies: yes/no, describe +- Computational resources: + - [ ] CPU + - [ ] GPU: e.g., NVIDIA, AMD, etc. + +If you encountered the issue on an HPC cluster, please check our [HPC documentation](https://warpx.readthedocs.io/en/latest/install/hpc.html) to see if your HPC cluster is already supported. + +If you encountered the issue installing from source with CMake, please provide the output of the following steps: +1. buildsystem generation: output of `cmake --fresh -S . -B build` (include your specific build options, e.g., `-DWarpX_DIMS=3`) +2. project build: output of `cmake --build build` (include your specific build options, e.g., `-j 4`) + +If applicable, please add any additional information about your software environment: +- [ ] CMake: e.g., 3.24.0 +- [ ] C++ compiler: e.g., GNU 11.3 with NVCC 12.0.76 +- [ ] Python: e.g., CPython 3.12 +- [ ] MPI: e.g., OpenMPI 4.1.1 +- [ ] FFTW: e.g., 3.3.10 +- [ ] HDF5: e.g., 1.14.0 +- [ ] ADIOS2: e.g., 2.10.0 +- Other dependencies: yes/no, describe + +## Additional information +If applicable, please add any additional information that may help explain the issue, such as log files (e.g., build logs, error logs, etc.), error messages (e.g., compiler errors, runtime errors, etc.), screenshots, or other relevant details. From 78cf034088823d798b1641d47588f8c63aefa9b9 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 4 Nov 2024 10:56:39 -0800 Subject: [PATCH 069/278] Python: Warn old `warpx.multifab` Signature (#5326) Warn users that use the old `warpx.multifab("internal_name")` overload to use the new one that only requests a prefix, with `dir` and `level` as extra arguments. Follow-up to #5321. --- Source/Python/WarpX.cpp | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 0b1ae49dfbc..1ce7959e7e4 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -114,6 +114,11 @@ void init_WarpX (py::module& m) ) .def("multifab", [](WarpX & wx, std::string internal_name) { + py::print("WARNING: WarpX' multifab('internal_name') signature is deprecated.\nPlease use:\n" + "- multifab('prefix', level=...) for scalar fields\n" + "- multifab('prefix', dir=..., level=...) for vector field components\n" + "where 'prefix' is the part of 'internal_name';' before the []", + py::arg("file") = py::module_::import("sys").attr("stderr")); if (wx.m_fields.internal_has(internal_name)) { return wx.m_fields.internal_get(internal_name); } else { From c1cd7ab012a1fbe599c3643121b5ef83a3b42b88 Mon Sep 17 00:00:00 2001 From: Debojyoti Ghosh Date: Mon, 4 Nov 2024 12:20:47 -0800 Subject: [PATCH 070/278] Implicit Field Solve Preconditioner based on Curl-Curl Operator (#5286) Implemented a preconditioner for the implicit E-field solve using the AMReX curl-curl operator and the MLMG solver. + Introduced a `Preconditioner` base class that defines the action of a preconditioner for the JFNK algorithm. + Implemented the `CurlCurlMLMGPC` that uses the multigrid solution for the curl-curl operator (implemented in `AMReX`) to precondition the E-field JFNK solve. Other changes needed for this: + Partially implemented a mapping between WarpX field boundary types and AMReX's linear operator boundary types. + Added some functionalities to `ImplicitSolver` class that allows preconditioners to access `WarpX` info (like `Geometry`, boundaries, etc). Some premilinary wall times for: ``` Test: inputs_vandb_2d Grid: 160 X 160 dt: 0.125/wpe = 2.22e-18 (dt_CFL = 7.84e-19 s, CFL = 2.83) Time iterations: 20 Solver parameters: newton.max_iterations = 10 newton.relative_tolerance = 1.0e-12 newton.absolute_tolerance = 0.0 gmres.max_iterations = 1000 gmres.relative_tolerance = 1.0e-8 gmres.absolute_tolerance = 0.0 Avg GMRES iterations: ~3 (wPC), ~27 (noPC) ``` with `32^2` particles per cell: ``` Lassen (MPI + CUDA) ------------------- Box GPU Walltime (s) wPC noPC 1 1 2324.7 15004.1 4 1 2306.8 14356.8 4 4 758.9 3647.3 Dane (MPI + OMP) ---------------- Box CPU Threads Walltime (s) wPC noPC 1 1 1 6709.3 43200.0* 1 1 2 3279.1 22296.1 1 1 4 1696.3 11613.2 1 1 8 1085.0 6911.4 1 1 16 724.3 4729.0 4 1 1 5525.9 33288.8 16 1 1 4419.4 28467.8 4 4 1 1324.4 9121.1 16 16 1 524.9 3658.8 * 43200.0 seconds is 12 hours (max job duration on Dane); the simulation was almost done (started the 20th step). ``` with `10^2` particles per cell: ``` Lassen (MPI + CUDA) ------------------- Box GPU Walltime (s) wPC noPC 1 1 365.0 1443.5 4 1 254.1 927.8 4 4 133.1 301.5 Dane (MPI + OMP) ---------------- Box CPU Threads Walltime (s) wPC noPC 1 1 1 440.8 2360.5 1 1 2 241.7 1175.8 1 1 4 129.3 727.0 1 1 8 94.2 407.5 1 1 16 74.3 245.6 4 1 1 393.3 1932.5 16 1 1 337.6 1618.7 4 4 1 92.2 479.1 16 16 1 58.1 192.6 ``` --------- Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Co-authored-by: Remi Lehe Co-authored-by: Justin Angus Co-authored-by: Weiqun Zhang --- .../ImplicitSolvers/CMakeLists.txt | 1 + .../ImplicitSolvers/ImplicitSolver.H | 24 +- .../ImplicitSolvers/ImplicitSolver.cpp | 60 +++ .../FieldSolver/ImplicitSolvers/Make.package | 1 + .../ImplicitSolvers/ThetaImplicitEM.H | 5 +- .../ImplicitSolvers/ThetaImplicitEM.cpp | 4 +- .../ImplicitSolvers/WarpXSolverVec.H | 3 +- .../ImplicitSolvers/WarpXSolverVec.cpp | 2 + Source/NonlinearSolvers/CurlCurlMLMGPC.H | 355 ++++++++++++++++++ Source/NonlinearSolvers/JacobianFunctionMF.H | 51 ++- Source/NonlinearSolvers/NewtonSolver.H | 23 +- Source/NonlinearSolvers/Preconditioner.H | 100 +++++ Source/WarpX.H | 10 + 13 files changed, 615 insertions(+), 24 deletions(-) create mode 100644 Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp create mode 100644 Source/NonlinearSolvers/CurlCurlMLMGPC.H create mode 100644 Source/NonlinearSolvers/Preconditioner.H diff --git a/Source/FieldSolver/ImplicitSolvers/CMakeLists.txt b/Source/FieldSolver/ImplicitSolvers/CMakeLists.txt index 6e16f19084c..04abc9d3e91 100644 --- a/Source/FieldSolver/ImplicitSolvers/CMakeLists.txt +++ b/Source/FieldSolver/ImplicitSolvers/CMakeLists.txt @@ -2,6 +2,7 @@ foreach(D IN LISTS WarpX_DIMS) warpx_set_suffix_dims(SD ${D}) target_sources(lib_${SD} PRIVATE + ImplicitSolver.cpp SemiImplicitEM.cpp ThetaImplicitEM.cpp WarpXImplicitOps.cpp diff --git a/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.H b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.H index 88ad6a058fd..ea9af6e2298 100644 --- a/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.H +++ b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.H @@ -1,4 +1,4 @@ -/* Copyright 2024 Justin Angus +/* Copyright 2024 Justin Angus, Debojyoti Ghosh * * This file is part of WarpX. * @@ -9,9 +9,11 @@ #include "FieldSolver/ImplicitSolvers/WarpXSolverVec.H" #include "NonlinearSolvers/NonlinearSolverLibrary.H" +#include "Utils/WarpXAlgorithmSelection.H" #include #include +#include /** * \brief Base class for implicit time solvers. The base functions are those @@ -85,6 +87,16 @@ public: int a_nl_iter, bool a_from_jacobian ) = 0; + [[nodiscard]] virtual amrex::Real theta () const { return 1.0; } + + [[nodiscard]] int numAMRLevels () const { return m_num_amr_levels; } + + [[nodiscard]] const amrex::Geometry& GetGeometry (int) const; + [[nodiscard]] const amrex::Array& GetFieldBoundaryLo () const; + [[nodiscard]] const amrex::Array& GetFieldBoundaryHi () const; + [[nodiscard]] amrex::Array GetLinOpBCLo () const; + [[nodiscard]] amrex::Array GetLinOpBCHi () const; + protected: /** @@ -94,6 +106,11 @@ protected: bool m_is_defined = false; + /** + * \brief Number of AMR levels + */ + int m_num_amr_levels = 1; + /** * \brief Nonlinear solver type and object */ @@ -140,6 +157,11 @@ protected: } + /** + * \brief Convert from WarpX FieldBoundaryType to amrex::LinOpBCType + */ + [[nodiscard]] amrex::Array convertFieldBCToLinOpBC ( const amrex::Array& ) const; + }; #endif diff --git a/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp new file mode 100644 index 00000000000..a6cbdfd307d --- /dev/null +++ b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp @@ -0,0 +1,60 @@ +#include "ImplicitSolver.H" +#include "WarpX.H" + +using namespace amrex; + +const Geometry& ImplicitSolver::GetGeometry (const int a_lvl) const +{ + AMREX_ASSERT((a_lvl >= 0) && (a_lvl < m_num_amr_levels)); + return m_WarpX->Geom(a_lvl); +} + +const Array& ImplicitSolver::GetFieldBoundaryLo () const +{ + return m_WarpX->GetFieldBoundaryLo(); +} + +const Array& ImplicitSolver::GetFieldBoundaryHi () const +{ + return m_WarpX->GetFieldBoundaryHi(); +} + +Array ImplicitSolver::GetLinOpBCLo () const +{ + return convertFieldBCToLinOpBC(m_WarpX->GetFieldBoundaryLo()); +} + +Array ImplicitSolver::GetLinOpBCHi () const +{ + return convertFieldBCToLinOpBC(m_WarpX->GetFieldBoundaryHi()); +} + +Array ImplicitSolver::convertFieldBCToLinOpBC (const Array& a_fbc) const +{ + Array lbc; + for (auto& bc : lbc) { bc = LinOpBCType::interior; } + for (int i = 0; i < AMREX_SPACEDIM; i++) { + if (a_fbc[i] == FieldBoundaryType::PML) { + WARPX_ABORT_WITH_MESSAGE("LinOpBCType not set for this FieldBoundaryType"); + } else if (a_fbc[i] == FieldBoundaryType::Periodic) { + lbc[i] = LinOpBCType::Periodic; + } else if (a_fbc[i] == FieldBoundaryType::PEC) { + WARPX_ABORT_WITH_MESSAGE("LinOpBCType not set for this FieldBoundaryType"); + } else if (a_fbc[i] == FieldBoundaryType::PMC) { + WARPX_ABORT_WITH_MESSAGE("LinOpBCType not set for this FieldBoundaryType"); + } else if (a_fbc[i] == FieldBoundaryType::Damped) { + WARPX_ABORT_WITH_MESSAGE("LinOpBCType not set for this FieldBoundaryType"); + } else if (a_fbc[i] == FieldBoundaryType::Absorbing_SilverMueller) { + WARPX_ABORT_WITH_MESSAGE("LinOpBCType not set for this FieldBoundaryType"); + } else if (a_fbc[i] == FieldBoundaryType::Neumann) { + lbc[i] = LinOpBCType::Neumann; + } else if (a_fbc[i] == FieldBoundaryType::None) { + WARPX_ABORT_WITH_MESSAGE("LinOpBCType not set for this FieldBoundaryType"); + } else if (a_fbc[i] == FieldBoundaryType::Open) { + WARPX_ABORT_WITH_MESSAGE("LinOpBCType not set for this FieldBoundaryType"); + } else { + WARPX_ABORT_WITH_MESSAGE("Invalid value for FieldBoundaryType"); + } + } + return lbc; +} diff --git a/Source/FieldSolver/ImplicitSolvers/Make.package b/Source/FieldSolver/ImplicitSolvers/Make.package index a4543f94dd3..16cd4003490 100644 --- a/Source/FieldSolver/ImplicitSolvers/Make.package +++ b/Source/FieldSolver/ImplicitSolvers/Make.package @@ -1,3 +1,4 @@ +CEXE_sources += ImplicitSolver.cpp CEXE_sources += SemiImplicitEM.cpp CEXE_sources += ThetaImplicitEM.cpp CEXE_sources += WarpXImplicitOps.cpp diff --git a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H index aba66782154..69d56c6ddc5 100644 --- a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H +++ b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H @@ -8,13 +8,12 @@ #define THETA_IMPLICIT_EM_H_ #include "FieldSolver/ImplicitSolvers/WarpXSolverVec.H" +#include "ImplicitSolver.H" #include #include #include -#include "ImplicitSolver.H" - /** @file * Theta-implicit electromagnetic time solver class. This is a fully implicit * algorithm where both the fields and particles are treated implicitly. @@ -79,7 +78,7 @@ public: int a_nl_iter, bool a_from_jacobian ) override; - [[nodiscard]] amrex::Real theta () const { return m_theta; } + [[nodiscard]] amrex::Real theta () const override { return m_theta; } private: diff --git a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp index 4cd5de4f24f..e5b8431a930 100644 --- a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp @@ -19,6 +19,7 @@ void ThetaImplicitEM::Define ( WarpX* const a_WarpX ) // Retain a pointer back to main WarpX class m_WarpX = a_WarpX; + m_num_amr_levels = 1; // Define E and Eold vectors m_E.Define( m_WarpX, "Efield_fp" ); @@ -26,8 +27,7 @@ void ThetaImplicitEM::Define ( WarpX* const a_WarpX ) // Define B_old MultiFabs using ablastr::fields::Direction; - const int num_levels = 1; - for (int lev = 0; lev < num_levels; ++lev) { + for (int lev = 0; lev < m_num_amr_levels; ++lev) { const auto& ba_Bx = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{0}, lev)->boxArray(); const auto& ba_By = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{1}, lev)->boxArray(); const auto& ba_Bz = m_WarpX->m_fields.get(FieldType::Bfield_fp, Direction{2}, lev)->boxArray(); diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H index 29c808b48cd..d864f239e42 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H +++ b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H @@ -75,6 +75,7 @@ public: void Define ( const WarpXSolverVec& a_solver_vec ) { assertIsDefined( a_solver_vec ); + m_num_amr_levels = a_solver_vec.m_num_amr_levels; Define( WarpXSolverVec::m_WarpX, a_solver_vec.getVectorType(), a_solver_vec.getScalarType() ); @@ -300,7 +301,7 @@ private: std::string m_scalar_type_name = "none"; static constexpr int m_ncomp = 1; - static constexpr int m_num_amr_levels = 1; + int m_num_amr_levels = 1; inline static bool m_warpx_ptr_defined = false; inline static WarpX* m_WarpX = nullptr; diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp index 6a0e6bb8a91..22c3b1d67c1 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp +++ b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp @@ -34,6 +34,8 @@ void WarpXSolverVec::Define ( WarpX* a_WarpX, m_warpx_ptr_defined = true; } + m_num_amr_levels = 1; + m_vector_type_name = a_vector_type_name; m_scalar_type_name = a_scalar_type_name; diff --git a/Source/NonlinearSolvers/CurlCurlMLMGPC.H b/Source/NonlinearSolvers/CurlCurlMLMGPC.H new file mode 100644 index 00000000000..47d7310995c --- /dev/null +++ b/Source/NonlinearSolvers/CurlCurlMLMGPC.H @@ -0,0 +1,355 @@ +/* Copyright 2024 Debojyoti Ghosh + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef CURL_CURL_MLMG_PC_H_ +#define CURL_CURL_MLMG_PC_H_ + +#include "Fields.H" +#include "Utils/WarpXConst.H" +#include "Preconditioner.H" + +#include + +#include +#include +#include +#include +#include +#ifndef WARPX_DIM_1D_Z // currently not implemented in 1D +#include +#include +#include +#include +#endif + +/** + * \brief Curl-curl Preconditioner + * + * Preconditioner that solves the curl-curl equation for the E-field, given + * a RHS. Uses AMReX's curl-curl linear operator and multigrid solver. + * + * The equation solves for Eg in: + * curl ( alpha * curl ( Eg ) ) + beta * Eg = b + * where + * + alpha is a scalar + * + beta can either be a scalar that is constant in space or a MultiFab + * + Eg is the electric field. + * + b is a specified RHS with the same layout as Eg + * + * This class is templated on a solution-type class T and an operator class Ops. + * + * The Ops class must have the following function: + * + Return number of AMR levels + * + Return the amrex::Geometry object given an AMR level + * + Return hi and lo linear operator boundaries + * + Return the time step factor (theta) for the time integration scheme + * + * The T class must have the following functions: + * + Return underlying vector of amrex::MultiFab arrays + */ + +template +class CurlCurlMLMGPC : public Preconditioner +{ + public: + + using RT = typename T::value_type; + + /** + * \brief Default constructor + */ + CurlCurlMLMGPC () = default; + + /** + * \brief Default destructor + */ + ~CurlCurlMLMGPC () override = default; + + // Prohibit move and copy operations + CurlCurlMLMGPC(const CurlCurlMLMGPC&) = delete; + CurlCurlMLMGPC& operator=(const CurlCurlMLMGPC&) = delete; + CurlCurlMLMGPC(CurlCurlMLMGPC&&) noexcept = delete; + CurlCurlMLMGPC& operator=(CurlCurlMLMGPC&&) noexcept = delete; + + /** + * \brief Define the preconditioner + */ + void Define (const T&, Ops*) override; + + /** + * \brief Update the preconditioner + */ + void Update (const T&) override; + + /** + * \brief Apply (solve) the preconditioner given a RHS + * + * Given a right-hand-side b, solve: + * A x = b + * where A is the linear operator, in this case, the curl-curl operator: + * A x = curl (alpha * curl (x) ) + beta * x + */ + void Apply (T&, const T&) override; + + /** + * \brief Print parameters + */ + void printParameters() const override; + + /** + * \brief Check if the nonlinear solver has been defined. + */ + [[nodiscard]] inline bool IsDefined () const override { return m_is_defined; } + + protected: + + using MFArr = amrex::Array; + + bool m_is_defined = false; + + bool m_verbose = true; + bool m_bottom_verbose = false; + bool m_agglomeration = true; + bool m_consolidation = true; + bool m_use_gmres = false; + bool m_use_gmres_pc = true; + + int m_max_iter = 10; + int m_max_coarsening_level = 30; + + RT m_atol = 1.0e-16; + RT m_rtol = 1.0e-4; + + Ops* m_ops = nullptr; + + int m_num_amr_levels = 0; + amrex::Vector m_geom; + amrex::Vector m_grids; + amrex::Vector m_dmap; + amrex::IntVect m_gv; + +// currently not implemented in 1D +#ifndef WARPX_DIM_1D_Z + amrex::Array m_bc_lo; + amrex::Array m_bc_hi; + + std::unique_ptr m_info; + std::unique_ptr m_curl_curl; + std::unique_ptr> m_solver; + std::unique_ptr> m_gmres_solver; +#endif + + /** + * \brief Read parameters + */ + void readParameters(); + + private: + +}; + +template +void CurlCurlMLMGPC::printParameters() const +{ + using namespace amrex; + auto pc_name = getEnumNameString(PreconditionerType::pc_curl_curl_mlmg); + Print() << pc_name << " verbose: " << (m_verbose?"true":"false") << "\n"; + Print() << pc_name << " bottom verbose: " << (m_bottom_verbose?"true":"false") << "\n"; + Print() << pc_name << " max iter: " << m_max_iter << "\n"; + Print() << pc_name << " agglomeration: " << m_agglomeration << "\n"; + Print() << pc_name << " consolidation: " << m_consolidation << "\n"; + Print() << pc_name << " max_coarsening_level: " << m_max_coarsening_level << "\n"; + Print() << pc_name << " absolute tolerance: " << m_atol << "\n"; + Print() << pc_name << " relative tolerance: " << m_rtol << "\n"; + Print() << pc_name << " use GMRES: " << (m_use_gmres?"true":"false") << "\n"; + if (m_use_gmres) { + Print() << pc_name + << " use PC for GMRES: " + << (m_use_gmres_pc?"true":"false") << "\n"; + } +} + +template +void CurlCurlMLMGPC::readParameters() +{ + const amrex::ParmParse pp(amrex::getEnumNameString(PreconditionerType::pc_curl_curl_mlmg)); + pp.query("verbose", m_verbose); + pp.query("bottom_verbose", m_bottom_verbose); + pp.query("max_iter", m_max_iter); + pp.query("agglomeration", m_agglomeration); + pp.query("consolidation", m_consolidation); + pp.query("max_coarsening_level", m_max_coarsening_level); + pp.query("absolute_tolerance", m_atol); + pp.query("relative_tolerance", m_rtol); + pp.query("use_gmres", m_use_gmres); + pp.query("use_gmres_pc", m_use_gmres_pc); +} + +template +void CurlCurlMLMGPC::Define ( const T& a_U, + Ops* const a_ops ) +{ + using namespace amrex; + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !IsDefined(), + "CurlCurlMLMGPC::Define() called on defined object" ); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + (a_ops != nullptr), + "CurlCurlMLMGPC::Define(): a_ops is nullptr" ); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + a_U.getArrayVecType()==warpx::fields::FieldType::Efield_fp, + "CurlCurlMLMGPC::Define() must be called with Efield_fp type"); + + m_ops = a_ops; + // read preconditioner parameters + readParameters(); + +// currently not implemented in 1D +#ifdef WARPX_DIM_1D_Z + WARPX_ABORT_WITH_MESSAGE("CurlCurlMLMGPC not yet implemented for 1D"); +#else + // create info object for curl-curl op + m_info = std::make_unique(); + m_info->setAgglomeration(m_agglomeration); + m_info->setConsolidation(m_consolidation); + m_info->setMaxCoarseningLevel(m_max_coarsening_level); + + // Get data vectors from a_U + auto& u_mfarrvec = a_U.getArrayVec(); + + // Set number of AMR levels and create geometry, grids, and + // distribution mapping vectors. + m_num_amr_levels = m_ops->numAMRLevels(); + m_geom.resize(m_num_amr_levels); + m_grids.resize(m_num_amr_levels); + m_dmap.resize(m_num_amr_levels); + for (int n = 0; n < m_num_amr_levels; n++) { + m_geom[n] = m_ops->GetGeometry(n); + m_dmap[n] = u_mfarrvec[n][0]->DistributionMap(); + + BoxArray ba = u_mfarrvec[n][0]->boxArray(); + m_grids[n] = ba.enclosedCells(); + } + + // Construct the curl-curl linear operator and set its BCs + m_curl_curl = std::make_unique(m_geom, m_grids, m_dmap, *m_info); + m_curl_curl->setDomainBC(m_ops->GetLinOpBCLo(), m_ops->GetLinOpBCHi()); + + // Dummy value for alpha and beta to avoid abort due to degenerate matrix by MLMG solver + m_curl_curl->setScalars(1.0, 1.0); + + // Construct the MLMG solver + m_solver = std::make_unique>(*m_curl_curl); + m_solver->setMaxIter(m_max_iter); + m_solver->setFixedIter(m_max_iter); + m_solver->setVerbose(static_cast(m_verbose)); + m_solver->setBottomVerbose(static_cast(m_bottom_verbose)); + + // If using GMRES solver, construct it + if (m_use_gmres) { + m_gmres_solver = std::make_unique>(*m_solver); + m_gmres_solver->usePrecond(m_use_gmres_pc); + m_gmres_solver->setPrecondNumIters(m_max_iter); + m_gmres_solver->setVerbose(static_cast(m_verbose)); + } +#endif + + m_is_defined = true; +} + +template +void CurlCurlMLMGPC::Update (const T& a_U) +{ + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + IsDefined(), + "CurlCurlMLMGPC::Update() called on undefined object" ); + + // a_U is not needed for a linear operator + amrex::ignore_unused(a_U); + + // set the coefficients alpha and beta for curl-curl op + const RT alpha = (m_ops->theta()*this->m_dt*PhysConst::c) * (m_ops->theta()*this->m_dt*PhysConst::c); + const RT beta = RT(1.0); + +// currently not implemented in 1D +#ifndef WARPX_DIM_1D_Z + m_curl_curl->setScalars(alpha, beta); +#endif + + if (m_verbose) { + amrex::Print() << "Updating " << amrex::getEnumNameString(PreconditionerType::pc_curl_curl_mlmg) + << ": dt = " << this->m_dt << ", " + << " coefficients: " + << "alpha = " << alpha << ", " + << "beta = " << beta << "\n"; + } +} + +template +void CurlCurlMLMGPC::Apply (T& a_x, const T& a_b) +{ + // Given a right-hand-side b, solve: + // A x = b + // where A is the linear operator, in this case, the curl-curl + // operator: + // A x = curl (alpha * curl (x) ) + beta * x + + using namespace amrex; + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + IsDefined(), + "CurlCurlMLMGPC::Apply() called on undefined object" ); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + a_x.getArrayVecType()==warpx::fields::FieldType::Efield_fp, + "CurlCurlMLMGPC::Apply() - a_x must be Efield_fp type"); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + a_b.getArrayVecType()==warpx::fields::FieldType::Efield_fp, + "CurlCurlMLMGPC::Apply() - a_b must be Efield_fp type"); + + // Get the data vectors + auto& b_mfarrvec = a_b.getArrayVec(); + auto& x_mfarrvec = a_x.getArrayVec(); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + ((b_mfarrvec.size() == m_num_amr_levels) && (x_mfarrvec.size() == m_num_amr_levels)), + "Error in CurlCurlMLMGPC::Apply() - mismatch in number of levels." ); + + for (int n = 0; n < m_num_amr_levels; n++) { + + // Copy initial guess to local object +#if defined(WARPX_DIM_1D_Z) + // Missing dimensions is x,y in WarpX and y,z in AMReX + WARPX_ABORT_WITH_MESSAGE("CurlCurlMLMGPC not yet implemented for 1D"); +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + // Missing dimension is y in WarpX and z in AMReX + Array solution { MultiFab(*x_mfarrvec[n][0], make_alias, 0, 1), + MultiFab(*x_mfarrvec[n][2], make_alias, 0, 1), + MultiFab(*x_mfarrvec[n][1], make_alias, 0, 1) }; + Array rhs { MultiFab(*b_mfarrvec[n][0], make_alias, 0, 1), + MultiFab(*b_mfarrvec[n][2], make_alias, 0, 1), + MultiFab(*b_mfarrvec[n][1], make_alias, 0, 1) }; +#elif defined(WARPX_DIM_3D) + Array solution { MultiFab(*x_mfarrvec[n][0], make_alias, 0, 1), + MultiFab(*x_mfarrvec[n][1], make_alias, 0, 1), + MultiFab(*x_mfarrvec[n][2], make_alias, 0, 1) }; + Array rhs { MultiFab(*b_mfarrvec[n][0], make_alias, 0, 1), + MultiFab(*b_mfarrvec[n][1], make_alias, 0, 1), + MultiFab(*b_mfarrvec[n][2], make_alias, 0, 1) }; +#endif + +// currently not implemented in 1D +#ifndef WARPX_DIM_1D_Z + m_curl_curl->prepareRHS({&rhs}); + if (m_use_gmres) { + m_gmres_solver->solve(solution, rhs, m_rtol, m_atol); + } else { + m_solver->solve({&solution}, {&rhs}, m_rtol, m_atol); + } +#endif + } +} + +#endif diff --git a/Source/NonlinearSolvers/JacobianFunctionMF.H b/Source/NonlinearSolvers/JacobianFunctionMF.H index d5c2b6cbac9..a3222214381 100644 --- a/Source/NonlinearSolvers/JacobianFunctionMF.H +++ b/Source/NonlinearSolvers/JacobianFunctionMF.H @@ -7,6 +7,9 @@ #ifndef JacobianFunctionMF_H_ #define JacobianFunctionMF_H_ +#include "CurlCurlMLMGPC.H" +#include + /** * \brief This is a linear function class for computing the action of a * Jacobian on a vector using a matrix-free finite-difference method. @@ -35,14 +38,18 @@ class JacobianFunctionMF inline void precond ( T& a_U, const T& a_X ) { - if (m_usePreCond) { a_U.zero(); } - else { a_U.Copy(a_X); } + if (m_usePreCond) { + a_U.zero(); + m_preCond->Apply(a_U, a_X); + } else { + a_U.Copy(a_X); + } } inline void updatePreCondMat ( const T& a_X ) { - amrex::ignore_unused(a_X); + if (m_usePreCond) { m_preCond->Update(a_X); } } inline @@ -133,15 +140,25 @@ class JacobianFunctionMF void curTime ( RT a_time ) { m_cur_time = a_time; + if (m_usePreCond) { m_preCond->CurTime(a_time); } } inline void curTimeStep ( RT a_dt ) { m_dt = a_dt; + if (m_usePreCond) { m_preCond->CurTimeStep(a_dt); } + } + + inline + void printParams () const + { + if (m_pc_type != PreconditionerType::none) { + m_preCond->printParameters(); + } } - void define( const T&, Ops* ); + void define( const T&, Ops*, const PreconditionerType& ); private: @@ -151,16 +168,18 @@ class JacobianFunctionMF RT m_epsJFNK = RT(1.0e-6); RT m_normY0; RT m_cur_time, m_dt; - std::string m_pc_type; - T m_Z, m_Y0, m_R0, m_R; - Ops* m_ops; + PreconditionerType m_pc_type = PreconditionerType::none; + T m_Z, m_Y0, m_R0, m_R; + Ops* m_ops = nullptr; + std::unique_ptr> m_preCond = nullptr; }; template -void JacobianFunctionMF::define ( const T& a_U, - Ops* a_ops ) +void JacobianFunctionMF::define ( const T& a_U, + Ops* a_ops, + const PreconditionerType& a_pc_type ) { m_Z.Define(a_U); m_Y0.Define(a_U); @@ -169,6 +188,20 @@ void JacobianFunctionMF::define ( const T& a_U, m_ops = a_ops; + m_usePreCond = (a_pc_type != PreconditionerType::none); + if (m_usePreCond) { + m_pc_type = a_pc_type; + if (m_pc_type == PreconditionerType::pc_curl_curl_mlmg) { + m_preCond = std::make_unique>(); + } else { + std::stringstream convergenceMsg; + convergenceMsg << "JacobianFunctionMF::define(): " << amrex::getEnumNameString(m_pc_type) + << " is not a valid preconditioner type."; + WARPX_ABORT_WITH_MESSAGE(convergenceMsg.str()); + } + m_preCond->Define(a_U, a_ops); + } + m_is_defined = true; } diff --git a/Source/NonlinearSolvers/NewtonSolver.H b/Source/NonlinearSolvers/NewtonSolver.H index 742e139a5f5..9c73c44e69e 100644 --- a/Source/NonlinearSolvers/NewtonSolver.H +++ b/Source/NonlinearSolvers/NewtonSolver.H @@ -9,10 +9,11 @@ #include "NonlinearSolver.H" #include "JacobianFunctionMF.H" +#include "Preconditioner.H" +#include "Utils/TextMsg.H" #include #include -#include "Utils/TextMsg.H" #include @@ -79,6 +80,9 @@ public: amrex::Print() << "GMRES max iterations: " << m_gmres_maxits << "\n"; amrex::Print() << "GMRES relative tolerance: " << m_gmres_rtol << "\n"; amrex::Print() << "GMRES absolute tolerance: " << m_gmres_atol << "\n"; + amrex::Print() << "Preconditioner type: " << amrex::getEnumNameString(m_pc_type) << "\n"; + + m_linear_function->printParams(); } private: @@ -138,9 +142,12 @@ private: */ int m_gmres_restart_length = 30; + /** + * \brief Preconditioner type + */ + PreconditionerType m_pc_type = PreconditionerType::none; + mutable amrex::Real m_cur_time, m_dt; - mutable bool m_update_pc = false; - mutable bool m_update_pc_init = false; /** * \brief The linear function used by GMRES to compute A*v. @@ -184,7 +191,7 @@ void NewtonSolver::Define ( const Vec& a_U, m_ops = a_ops; m_linear_function = std::make_unique>(); - m_linear_function->define(m_F, m_ops); + m_linear_function->define(m_F, m_ops, m_pc_type); m_linear_solver = std::make_unique>>(); m_linear_solver->define(*m_linear_function); @@ -212,6 +219,9 @@ void NewtonSolver::ParseParameters () pp_gmres.query("absolute_tolerance", m_gmres_atol); pp_gmres.query("relative_tolerance", m_gmres_rtol); pp_gmres.query("max_iterations", m_gmres_maxits); + + const amrex::ParmParse pp_jac("jacobian"); + pp_jac.query("pc_type", m_pc_type); } template @@ -330,10 +340,7 @@ void NewtonSolver::EvalResidual ( Vec& a_F, m_linear_function->setBaseRHS(m_R); // update preconditioner - if (m_update_pc || m_update_pc_init) { - m_linear_function->updatePreCondMat(a_U); - } - m_update_pc_init = false; + m_linear_function->updatePreCondMat(a_U); // Compute residual: F(U) = U - b - R(U) a_F.Copy(a_U); diff --git a/Source/NonlinearSolvers/Preconditioner.H b/Source/NonlinearSolvers/Preconditioner.H new file mode 100644 index 00000000000..191a48d00bc --- /dev/null +++ b/Source/NonlinearSolvers/Preconditioner.H @@ -0,0 +1,100 @@ +/* Copyright 2024 Debojyoti Ghosh + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef WARPX_PRECONDITIONER_H_ +#define WARPX_PRECONDITIONER_H_ + +#include + +/** + * \brief Types for preconditioners for field solvers + */ +AMREX_ENUM(PreconditionerType, pc_curl_curl_mlmg, none); + +/** + * \brief Base class for preconditioners + * + * This class is templated on a solution-type class T and an operator class Ops. + * + * The Ops class must have the following function: + * (this will depend on the specific preconditioners inheriting from this class) + * + * The T class must have the following functions: + * (this will depend on the specific preconditioners inheriting from this class) + */ + +template +class Preconditioner +{ + public: + + using RT = typename T::value_type; + + /** + * \brief Default constructor + */ + Preconditioner () = default; + + /** + * \brief Default destructor + */ + virtual ~Preconditioner () = default; + + // Default move and copy operations + Preconditioner(const Preconditioner&) = default; + Preconditioner& operator=(const Preconditioner&) = default; + Preconditioner(Preconditioner&&) noexcept = default; + Preconditioner& operator=(Preconditioner&&) noexcept = default; + + /** + * \brief Define the preconditioner + */ + virtual void Define (const T&, Ops*) = 0; + + /** + * \brief Update the preconditioner + */ + virtual void Update ( const T& ) = 0; + + /** + * \brief Apply (solve) the preconditioner given a RHS + * + * Given a right-hand-side b, solve: + * A x = b + * where A is a linear operator. + */ + virtual void Apply (T& a_x, const T& a_b) = 0; + + /** + * \brief Check if the nonlinear solver has been defined. + */ + [[nodiscard]] virtual bool IsDefined () const = 0; + + /** + * \brief Print parameters + */ + virtual void printParameters() const { } + + /** + * \brief Set the current time. + */ + inline void CurTime (const RT a_time) { m_time = a_time; } + + /** + * \brief Set the current time step size. + */ + inline void CurTimeStep (const RT a_dt) { m_dt = a_dt; } + + protected: + + RT m_time = 0.0; + RT m_dt = 0.0; + + private: + +}; + +#endif diff --git a/Source/WarpX.H b/Source/WarpX.H index bad63cd44d9..a635196d044 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -113,6 +113,16 @@ public: [[nodiscard]] int Verbose () const { return verbose; } + [[nodiscard]] const amrex::Array& GetFieldBoundaryLo () const + { + return field_boundary_lo; + } + + [[nodiscard]] const amrex::Array& GetFieldBoundaryHi () const + { + return field_boundary_hi; + } + void InitData (); void Evolve (int numsteps = -1); From bae146f7e1b85f343ffa003601ae71cf7d5ea357 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Mon, 4 Nov 2024 14:27:19 -0800 Subject: [PATCH 071/278] Correct inaccurate comment in IGF code (#5438) I forgot to update the comment in this PR: https://github.com/ECP-WarpX/WarpX/pull/5335 --- Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp index 40b36740ae5..546326d7fe0 100644 --- a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp +++ b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp @@ -120,7 +120,7 @@ computePhiIGF ( amrex::MultiFab const & rho, tmp_G.setVal(0); BL_PROFILE_VAR_START(timer_pcopies); - // Copy from rho including its ghost cells to tmp_rho + // Copy from rho to tmp_rho tmp_rho.ParallelCopy( rho, 0, 0, 1, amrex::IntVect::TheZeroVector(), amrex::IntVect::TheZeroVector() ); BL_PROFILE_VAR_STOP(timer_pcopies); From c8c78f4bc61105175e75f1c0a94880a7911346e4 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 4 Nov 2024 14:36:08 -0800 Subject: [PATCH 072/278] CI: find and print backtraces (#5424) I think this should work to find and print backtraces after all CI tests have run. This is for Azure only. Local backtraces still need be inspected manually. After trying many solutions, the current strategy is: - avoid removing the backtrace files in the `cleanup` step of each test (we continue to remove all other files); - have a separate workflow step to find and print the backtrace files in the Azure job (this is executed always). The new Azure workflow step is labeled "Logs" and it comes right after the step labeled "Test". --- .azure-pipelines.yml | 23 +++++++++++++++++------ Examples/CMakeLists.txt | 2 +- Examples/test_cleanup.cmake | 7 +++++++ 3 files changed, 25 insertions(+), 7 deletions(-) create mode 100644 Examples/test_cleanup.cmake diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index bdcfe1c9864..1d5127ae5a1 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -77,7 +77,7 @@ jobs: displayName: Cache Python Libraries - bash: | - set -eu -o pipefail + set -o nounset errexit pipefail cat /proc/cpuinfo | grep "model name" | sort -u df -h echo 'Acquire::Retries "3";' | sudo tee /etc/apt/apt.conf.d/80-retries @@ -146,9 +146,10 @@ jobs: displayName: 'Install dependencies' - bash: | - set -eu -o pipefail + # set options + set -o nounset errexit pipefail + # display disk space usage df -h - # configure export AMReX_CMAKE_FLAGS="-DAMReX_ASSERTIONS=ON -DAMReX_TESTING=ON" cmake -S . -B build \ @@ -156,15 +157,25 @@ jobs: ${WARPX_CMAKE_FLAGS} \ -DWarpX_TEST_CLEANUP=ON \ -DWarpX_TEST_FPETRAP=ON - # build cmake --build build -j 2 + # display disk space usage df -h displayName: 'Build' - bash: | - set -eu -o pipefail - + # set options + set -o nounset errexit pipefail # run tests (exclude pytest.AMReX when running Python tests) ctest --test-dir build --output-on-failure -E AMReX displayName: 'Test' + + - bash: | + # set options + set -o nounset errexit pipefail + # find and print backtrace + find build/bin/ -type f -name "Backtrace*" \ + -exec echo -e "\nBacktrace\n---------\n{}\n---------" \; \ + -exec cat {} \; + displayName: 'Logs' + condition: always() diff --git a/Examples/CMakeLists.txt b/Examples/CMakeLists.txt index f36bcbb9973..728c2142932 100644 --- a/Examples/CMakeLists.txt +++ b/Examples/CMakeLists.txt @@ -198,7 +198,7 @@ function(add_warpx_test if(WarpX_TEST_CLEANUP) add_test( NAME ${name}.cleanup - COMMAND ${CMAKE_COMMAND} -E rm -rf ${THIS_WORKING_DIR} + COMMAND ${CMAKE_COMMAND} -P ${CMAKE_SOURCE_DIR}/Examples/test_cleanup.cmake ${THIS_WORKING_DIR} ) # test cleanup depends on test run set_property(TEST ${name}.cleanup APPEND PROPERTY DEPENDS "${name}.run") diff --git a/Examples/test_cleanup.cmake b/Examples/test_cleanup.cmake new file mode 100644 index 00000000000..b15e31e1f5d --- /dev/null +++ b/Examples/test_cleanup.cmake @@ -0,0 +1,7 @@ +# delete all test files except backtrace +file(GLOB test_files ${CMAKE_ARGV3}/*) +foreach(file ${test_files}) + if(NOT ${file} MATCHES "Backtrace*") + execute_process(COMMAND ${CMAKE_COMMAND} -E rm -r ${file}) + endif() +endforeach() From a62a227657bbb29926b93398cd7feca427be16da Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 4 Nov 2024 14:40:18 -0800 Subject: [PATCH 073/278] Fix dt_update_interval argument to ElectrostaticSolver (#5434) The `warpx` prefix was left off of this argument. This addresses issues raised in #5431 and #5432 --- Python/pywarpx/picmi.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 08c71ed02de..afd28851f70 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1894,14 +1894,14 @@ class ElectrostaticSolver(picmistandard.PICMI_ElectrostaticSolver): warpx_self_fields_verbosity: integer, default=2 Level of verbosity for the lab frame solver - warpx_dt_update_interval: string, optional (default = -1) + warpx_dt_update_interval: integer, optional (default = -1) How frequently the timestep is updated. Adaptive timestepping is disabled when this is <= 0. warpx_cfl: float, optional - Fraction of the CFL condition for particle velocity vs grid size, used to set the timestep when `dt_update_interval > 0`. + Fraction of the CFL condition for particle velocity vs grid size, used to set the timestep when `warpx_dt_update_interval > 0`. warpx_max_dt: float, optional - The maximum allowable timestep when `dt_update_interval > 0`. + The maximum allowable timestep when `warpx_dt_update_interval > 0`. """ @@ -1911,7 +1911,7 @@ def init(self, kw): self.self_fields_verbosity = kw.pop("warpx_self_fields_verbosity", None) self.magnetostatic = kw.pop("warpx_magnetostatic", False) self.cfl = kw.pop("warpx_cfl", None) - self.dt_update_interval = kw.pop("dt_update_interval", None) + self.dt_update_interval = kw.pop("warpx_dt_update_interval", None) self.max_dt = kw.pop("warpx_max_dt", None) def solver_initialize_inputs(self): From ca171d247c78d10a6331d032c3fec9a749c4bcd2 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 4 Nov 2024 16:01:24 -0800 Subject: [PATCH 074/278] Release 24.11 (#5440) Prepare the November release of WarpX, following the documentation at Following this workflow: https://warpx.readthedocs.io/en/latest/maintenance/release.html: 1. Update to latest AMReX release: ```console ./Tools/Release/updateAMReX.py ``` 2. Update to latest pyAMReX release: ```console ./Tools/Release/updatepyAMReX.py ``` 3. Update to latest PICSAR release (no changes, still 24.09): ```console ./Tools/Release/updatePICSAR.py ``` 4. Update WarpX version number: ```console ./Tools/Release/newVersion.sh ``` --- .github/workflows/cuda.yml | 2 +- CMakeLists.txt | 2 +- Docs/source/conf.py | 4 ++-- Python/setup.py | 2 +- cmake/dependencies/AMReX.cmake | 4 ++-- cmake/dependencies/pyAMReX.cmake | 4 ++-- setup.py | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 1a89f6668d5..d0bcc10d72c 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -137,7 +137,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 92679babfc2cc66ca06ee591a80001db57c89878 && cd - + cd ../amrex && git checkout --detach 24.11 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/CMakeLists.txt b/CMakeLists.txt index c08c72489cb..66fe63230d9 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ # Preamble #################################################################### # cmake_minimum_required(VERSION 3.24.0) -project(WarpX VERSION 24.10) +project(WarpX VERSION 24.11) include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake) diff --git a/Docs/source/conf.py b/Docs/source/conf.py index c1ad43197c5..e081a490ee8 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -107,9 +107,9 @@ def __init__(self, *args, **kwargs): # built documents. # # The short X.Y version. -version = "24.10" +version = "24.11" # The full version, including alpha/beta/rc tags. -release = "24.10" +release = "24.11" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/Python/setup.py b/Python/setup.py index c0e38baced2..5ac5a950d99 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -65,7 +65,7 @@ setup( name="pywarpx", - version="24.10", + version="24.11", packages=["pywarpx"], package_dir={"pywarpx": "pywarpx"}, description="""Wrapper of WarpX""", diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 9854cbb0800..dd81554d607 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -260,7 +260,7 @@ macro(find_amrex) endif() set(COMPONENT_PRECISION ${WarpX_PRECISION} P${WarpX_PARTICLE_PRECISION}) - find_package(AMReX 24.10 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) + find_package(AMReX 24.11 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) # note: TINYP skipped because user-configured and optional # AMReX CMake helper scripts @@ -283,7 +283,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "92679babfc2cc66ca06ee591a80001db57c89878" +set(WarpX_amrex_branch "24.11" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index 3236851d392..1dbd5e9fde6 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -59,7 +59,7 @@ function(find_pyamrex) endif() elseif(NOT WarpX_pyamrex_internal) # TODO: MPI control - find_package(pyAMReX 24.10 CONFIG REQUIRED) + find_package(pyAMReX 24.11 CONFIG REQUIRED) message(STATUS "pyAMReX: Found version '${pyAMReX_VERSION}'") endif() endfunction() @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "1aa1db34a0d1bdc084bd6069a4fd97b26266af5c" +set(WarpX_pyamrex_branch "24.11" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") diff --git a/setup.py b/setup.py index f2bc72ff386..fc99b75f2f0 100644 --- a/setup.py +++ b/setup.py @@ -282,7 +282,7 @@ def build_extension(self, ext): setup( name="pywarpx", # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version="24.10", + version="24.11", packages=["pywarpx"], package_dir={"pywarpx": "Python/pywarpx"}, author="Jean-Luc Vay, David P. Grote, Maxence Thévenet, Rémi Lehe, Andrew Myers, Weiqun Zhang, Axel Huebl, et al.", From 499c67dfbd2361297e9893c77b78db460df2bb03 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Mon, 4 Nov 2024 16:03:19 -0800 Subject: [PATCH 075/278] Add Novatron paper in documentation (#5427) --- Docs/source/highlights.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index 09156072cad..7f613625c55 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -191,6 +191,11 @@ Please see :ref:`this section `. Nuclear Fusion and Plasma Confinement ************************************* +#. Scheffel J. and Jäderberg J. and Bendtz K. and Holmberg R. and Lindvall K., + **Axial Confinement in the Novatron Mirror Machine**. + arXiv 2410.20134 + `DOI:10.48550/arXiv.2410.20134 `__ + #. Affolter M., Thompson R., Hepner S., Hayes E. C., Podolsky V., Borghei M., Carlsson J., Gargone A., Merthe D., McKee E., Langtry R., **The Orbitron: A crossed-field device for co-confinement of high energy ions and electrons**. AIP Advances **14**, 085025, 2024. From c803d3476fae8e4578d44b93dd7973aba5f276f2 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 4 Nov 2024 16:05:15 -0800 Subject: [PATCH 076/278] Update bug report issue template (#5436) Just a tiny improvement left out of #5278. I think the wording "compiling and running in debug mode" is a bit reductive, given all the steps that we point to in the linked documentation: I think "debugging the code" might be a better wording. --- .github/ISSUE_TEMPLATE/bug_report.md | 2 +- .github/ISSUE_TEMPLATE/installation-issue.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index a545067b6d2..a5a64487646 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -52,7 +52,7 @@ If you encountered the issue on an HPC cluster, please check our [HPC documentat ## Steps taken so far What troubleshooting steps have you taken so far, and what were the results? -Have you tried compiling and running in debug mode, following the instructions in our [debugging documentation](https://warpx.readthedocs.io/en/latest/usage/workflows/debugging.html)? +Have you tried debugging the code, following the instructions in our [debugging documentation](https://warpx.readthedocs.io/en/latest/usage/workflows/debugging.html)? ## Additional information If applicable, please add any additional information that may help explain the issue, such as log files (e.g., build logs, error logs, etc.), error messages (e.g., compiler errors, runtime errors, etc.), screenshots, or other relevant details. diff --git a/.github/ISSUE_TEMPLATE/installation-issue.md b/.github/ISSUE_TEMPLATE/installation-issue.md index 93ad0f1a5d8..7cc937d91c0 100644 --- a/.github/ISSUE_TEMPLATE/installation-issue.md +++ b/.github/ISSUE_TEMPLATE/installation-issue.md @@ -1,6 +1,6 @@ --- name: Installation issue -about: Report an issue with installing or setting up WarpX +about: Report an issue with installing or setting up WarpX. labels: [install] --- From a4d563147edf63b11834f2a8900b28b32dd80d10 Mon Sep 17 00:00:00 2001 From: David Grote Date: Tue, 5 Nov 2024 13:25:11 -0800 Subject: [PATCH 077/278] Add `PECInsulator` boundary condition (#4943) This PR adds a mixed PEC and insulator boundary condition. This allows an insulator to be placed on a portion of the boundary. The rest of that boundary will be PEC. Within the insulator portion, the tangential fields can be specified on the boundary (as functions of space and time). The normal fields and fields not specified are extrapolated to the guard cells from the valid cells. The fields are specified in pairs, the two tangential electric fields, and the two tangential magnetic fields. In each pair, if one is set, the other will be zeroed if not set. A use case is the simulation of a dynamic pinch, driven by an external current, represented as a time dependent B field on the boundary. [PECinsulatorBC_warpX_summaryOnly.pdf](https://github.com/user-attachments/files/17637695/PECinsulatorBC_warpX_summaryOnly.pdf) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- Docs/source/usage/parameters.rst | 31 + Examples/Tests/pec/CMakeLists.txt | 10 + .../pec/inputs_test_2d_pec_field_insulator | 34 ++ .../test_2d_pec_field_insulator.json | 13 + Source/BoundaryConditions/CMakeLists.txt | 1 + Source/BoundaryConditions/Make.package | 1 + Source/BoundaryConditions/PEC_Insulator.H | 180 ++++++ Source/BoundaryConditions/PEC_Insulator.cpp | 561 ++++++++++++++++++ Source/BoundaryConditions/PEC_Insulator_fwd.H | 13 + .../WarpXFieldBoundaries.cpp | 63 ++ Source/Python/WarpX.cpp | 1 + Source/Utils/Parser/ParserUtils.H | 14 + Source/Utils/Parser/ParserUtils.cpp | 13 + Source/Utils/WarpXAlgorithmSelection.H | 1 + Source/WarpX.H | 4 + Source/WarpX.cpp | 4 + 16 files changed, 944 insertions(+) create mode 100644 Examples/Tests/pec/inputs_test_2d_pec_field_insulator create mode 100644 Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator.json create mode 100644 Source/BoundaryConditions/PEC_Insulator.H create mode 100644 Source/BoundaryConditions/PEC_Insulator.cpp create mode 100644 Source/BoundaryConditions/PEC_Insulator_fwd.H diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index af559aa1fba..1b5c3e7b186 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -493,6 +493,37 @@ Domain Boundary Conditions * ``pec``: This option can be used to set a Perfect Electric Conductor at the simulation boundary. Please see the :ref:`PEC theory section ` for more details. Note that PEC boundary is invalid at `r=0` for the RZ solver. Please use ``none`` option. This boundary condition does not work with the spectral solver. + * ``pec_insulator``: This option specifies a mixed perfect electric conductor and insulator boundary, where some part of the + boundary is PEC and some is insulator. In the insulator portion, the normal fields are extrapolated and the tangential fields + are either set to the specified value or extrapolated. The region that is insulator is specified using a spatially dependent expression with the insulator being in the area where the value of the expression is greater than zero. + The expressions are given for the low and high boundary on each axis, as listed below. The tangential fields are specified as + expressions that can depend on the location and time. The tangential fields are in two pairs, the electric fields and the + magnetic fields. In each pair, if one is specified, the other will be set to zero if not also specified. + + * ``insulator.area_x_lo(y,z)``: For the lower x (or r) boundary, expression specifying the insulator location + + * ``insulator.area_x_hi(y,z)``: For the upper x (or r) boundary, expression specifying the insulator location + + * ``insulator.area_y_lo(x,z)``: For the lower y boundary, expression specifying the insulator location + + * ``insulator.area_y_hi(x,z)``: For the upper y boundary, expression specifying the insulator location + + * ``insulator.area_z_lo(x,y)``: For the lower z boundary, expression specifying the insulator location + + * ``insulator.area_z_hi(x,y)``: For the upper z boundary, expression specifying the insulator location + + * ``insulator.Ey_x_lo(y,z,t)``, ``insulator.Ez_x_lo(y,z,t)``, ``insulator.By_x_lo(y,z,t)``, ``insulator.Bz_x_lo(y,z,t)``: expressions of the tangential field values for the lower x (or r) boundary + + * ``insulator.Ey_x_hi(y,z,t)``, ``insulator.Ez_x_hi(y,z,t)``, ``insulator.By_x_hi(y,z,t)``, ``insulator.Bz_x_hi(y,z,t)``: expressions of the tangential field values for the upper x (or r) boundary + + * ``insulator.Ex_y_lo(x,z,t)``, ``insulator.Ez_y_lo(x,z,t)``, ``insulator.Bx_y_lo(x,z,t)``, ``insulator.Bz_y_lo(x,z,t)``: expressions of the tangential field values for the lower y boundary + + * ``insulator.Ex_y_hi(x,z,t)``, ``insulator.Ez_y_hi(x,z,t)``, ``insulator.Bx_y_hi(x,z,t)``, ``insulator.Bz_y_hi(x,z,t)``: expressions of the tangential field values for the upper y boundary + + * ``insulator.Ex_z_lo(x,y,t)``, ``insulator.Ey_z_lo(x,y,t)``, ``insulator.Bx_z_lo(x,y,t)``, ``insulator.By_z_lo(x,y,t)``: expressions of the tangential field values for the lower z boundary + + * ``insulator.Ex_z_hi(x,y,t)``, ``insulator.Ey_z_hi(x,y,t)``, ``insulator.Bx_z_hi(x,y,t)``, ``insulator.By_z_hi(x,y,t)``: expressions of the tangential field values for the upper z boundary + * ``none``: No boundary condition is applied to the fields with the electromagnetic solver. This option must be used for the RZ-solver at `r=0`. * ``neumann``: For the electrostatic multigrid solver, a Neumann boundary condition (with gradient of the potential equal to 0) will be applied on the specified boundary. diff --git a/Examples/Tests/pec/CMakeLists.txt b/Examples/Tests/pec/CMakeLists.txt index ec710f7d919..e0bab40d058 100644 --- a/Examples/Tests/pec/CMakeLists.txt +++ b/Examples/Tests/pec/CMakeLists.txt @@ -30,3 +30,13 @@ add_warpx_test( diags/diag1000020 # output OFF # dependency ) + +add_warpx_test( + test_2d_pec_field_insulator # name + 2 # dims + 2 # nprocs + inputs_test_2d_pec_field_insulator # inputs + analysis_default_regression.py # analysis + diags/diag1000010 # output + OFF # dependency +) diff --git a/Examples/Tests/pec/inputs_test_2d_pec_field_insulator b/Examples/Tests/pec/inputs_test_2d_pec_field_insulator new file mode 100644 index 00000000000..68a8df1b600 --- /dev/null +++ b/Examples/Tests/pec/inputs_test_2d_pec_field_insulator @@ -0,0 +1,34 @@ +# Maximum number of time steps +max_step = 10 + +# number of grid points +amr.n_cell = 32 32 +amr.blocking_factor = 16 + +# Maximum level in hierarchy (for now must be 0, i.e., one level in total) +amr.max_level = 0 + +# Geometry +geometry.dims = 2 +geometry.prob_lo = 0. 2.e-2 # physical domain +geometry.prob_hi = 1.e-2 3.e-2 + +# Boundary condition +boundary.field_lo = neumann periodic +boundary.field_hi = PECInsulator periodic + +warpx.serialize_initial_conditions = 1 + +# Verbosity +warpx.verbose = 1 + +# CFL +warpx.cfl = 1.0 + +insulator.area_x_hi(y,z) = (2.25e-2 <= z and z <= 2.75e-2) +insulator.By_x_hi(y,z,t) = min(t/1.0e-12,1)*1.e1*3.3e-4 + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 10 +diag1.diag_type = Full diff --git a/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator.json b/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator.json new file mode 100644 index 00000000000..ca6f38977ae --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator.json @@ -0,0 +1,13 @@ +{ + "lev=0": { + "Bx": 0.0, + "By": 0.34938851065132936, + "Bz": 0.0, + "Ex": 31871402.236828588, + "Ey": 0.0, + "Ez": 104908439.18998256, + "jx": 0.0, + "jy": 0.0, + "jz": 0.0 + } +} \ No newline at end of file diff --git a/Source/BoundaryConditions/CMakeLists.txt b/Source/BoundaryConditions/CMakeLists.txt index 751e52abdd9..c560d121385 100644 --- a/Source/BoundaryConditions/CMakeLists.txt +++ b/Source/BoundaryConditions/CMakeLists.txt @@ -2,6 +2,7 @@ foreach(D IN LISTS WarpX_DIMS) warpx_set_suffix_dims(SD ${D}) target_sources(lib_${SD} PRIVATE + PEC_Insulator.cpp PML.cpp WarpXEvolvePML.cpp WarpXFieldBoundaries.cpp diff --git a/Source/BoundaryConditions/Make.package b/Source/BoundaryConditions/Make.package index 43d18425ffc..452c9c18b7e 100644 --- a/Source/BoundaryConditions/Make.package +++ b/Source/BoundaryConditions/Make.package @@ -1,3 +1,4 @@ +CEXE_sources += PEC_Insulator.cpp CEXE_sources += PML.cpp WarpXEvolvePML.cpp CEXE_sources += WarpXFieldBoundaries.cpp WarpX_PEC.cpp diff --git a/Source/BoundaryConditions/PEC_Insulator.H b/Source/BoundaryConditions/PEC_Insulator.H new file mode 100644 index 00000000000..5cfdf6488f0 --- /dev/null +++ b/Source/BoundaryConditions/PEC_Insulator.H @@ -0,0 +1,180 @@ +#ifndef PEC_INSULATOR_H_ +#define PEC_INSULATOR_H_ + +#include "Utils/WarpXAlgorithmSelection.H" + +#include +#include +#include + +#include + +#include +#include + +class PEC_Insulator +{ +public: + + PEC_Insulator(); + + /** + * \brief Apply either the PEC or insulator boundary condition on the boundary and in the + * guard cells. + * In the PEC, the nodal fields (in a Yee mesh) are made even relative to the boundary, + * the non-nodal fields are made odd. + * In the insulator, the tangential fields are set to the value if specified, otherwise unchanged, + * and the normal fields extrapolated from the valid cells. + * + * \param[in,out] Efield + * \param[in] field_boundary_lo lower field boundary conditions + * \param[in] field_boundary_hi upper field boundary conditions + * \param[in] ng_fieldgather number of guard cells used by field gather + * \param[in] geom geometry object of level "lev" + * \param[in] lev level of the Multifab + * \param[in] patch_type coarse or fine + * \param[in] ref_ratios vector containing the refinement ratios of the refinement levels + * \param[in] time current time of the simulation + * \param[in] split_pml_field whether pml the multifab is the regular Efield or + * split pml field + */ + void ApplyPEC_InsulatortoEfield (std::array Efield, + amrex::Array const & field_boundary_lo, + amrex::Array const & field_boundary_hi, + amrex::IntVect const & ng_fieldgather, amrex::Geometry const & geom, + int lev, PatchType patch_type, amrex::Vector const & ref_ratios, + amrex::Real time, + bool split_pml_field = false); + /** + * \brief Apply either the PEC or insulator boundary condition on the boundary and in the + * guard cells. + * In the PEC, the nodal fields (in a Yee mesh) are made even relative to the boundary, + * the non-nodal fields are made odd. + * In the insulator, the tangential fields are set to the value if specified, otherwise unchanged, + * and the normal fields extrapolated from the valid cells. + * + * \param[in,out] Bfield + * \param[in] field_boundary_lo lower field boundary conditions + * \param[in] field_boundary_hi upper field boundary conditions + * \param[in] ng_fieldgather number of guard cells used by field gather + * \param[in] geom geometry object of level "lev" + * \param[in] lev level of the Multifab + * \param[in] patch_type coarse or fine + * \param[in] ref_ratios vector containing the refinement ratios of the refinement levels + * \param[in] time current time of the simulation + */ + void ApplyPEC_InsulatortoBfield (std::array Bfield, + amrex::Array const & field_boundary_lo, + amrex::Array const & field_boundary_hi, + amrex::IntVect const & ng_fieldgather, amrex::Geometry const & geom, + int lev, PatchType patch_type, amrex::Vector const & ref_ratios, + amrex::Real time); + + /** + * \brief The work routine applying the boundary condition + * + * \param[in,out] field + * \param[in] field_boundary_lo lower field boundary conditions + * \param[in] field_boundary_hi upper field boundary conditions + * \param[in] ng_fieldgather number of guard cells used by field gather + * \param[in] geom geometry object of level "lev" + * \param[in] lev level of the Multifab + * \param[in] patch_type coarse or fine + * \param[in] ref_ratios vector containing the refinement ratios of the refinement levels + * \param[in] time current time of the simulation + * \param[in] split_pml_field whether pml the multifab is the regular Efield or + * split pml field + * \param[in] E_like whether the field is E like or B like + * \param[in] set_F_x_lo whether the tangential field at the boundary was specified + * \param[in] set_F_x_hi whether the tangential field at the boundary was specified + * \param[in] a_Fy_x_lo the parser for the tangential field at the boundary + * \param[in] a_Fz_x_lo the parser for the tangential field at the boundary + * \param[in] a_Fy_x_hi the parser for the tangential field at the boundary + * \param[in] a_Fz_x_hi the parser for the tangential field at the boundary + * \param[in] set_F_y_lo whether the tangential field at the boundary was specified + * \param[in] set_F_y_hi whether the tangential field at the boundary was specified + * \param[in] a_Fx_y_lo the parser for the tangential field at the boundary + * \param[in] a_Fz_y_lo the parser for the tangential field at the boundary + * \param[in] a_Fx_y_hi the parser for the tangential field at the boundary + * \param[in] a_Fz_y_hi the parser for the tangential field at the boundary + * \param[in] set_F_z_lo whether the tangential field at the boundary was specified + * \param[in] set_F_z_hi whether the tangential field at the boundary was specified + * \param[in] a_Fx_z_lo the parser for the tangential field at the boundary + * \param[in] a_Fy_z_lo the parser for the tangential field at the boundary + * \param[in] a_Fx_z_hi the parser for the tangential field at the boundary + * \param[in] a_Fy_z_hi the parser for the tangential field at the boundary + */ + void + ApplyPEC_InsulatortoField (std::array field, + amrex::Array const & field_boundary_lo, + amrex::Array const & field_boundary_hi, + amrex::IntVect const & ng_fieldgather, amrex::Geometry const & geom, + int lev, PatchType patch_type, amrex::Vector const & ref_ratios, + amrex::Real time, + bool split_pml_field, + bool E_like, +#if (AMREX_SPACEDIM > 1) + bool set_F_x_lo, bool set_F_x_hi, + std::unique_ptr const & a_Fy_x_lo, std::unique_ptr const & a_Fz_x_lo, + std::unique_ptr const & a_Fy_x_hi, std::unique_ptr const & a_Fz_x_hi, +#endif +#if defined(WARPX_DIM_3D) + bool set_F_y_lo, bool set_F_y_hi, + std::unique_ptr const & a_Fx_y_lo, std::unique_ptr const & a_Fz_y_lo, + std::unique_ptr const & a_Fx_y_hi, std::unique_ptr const & a_Fz_y_hi, +#endif + bool set_F_z_lo, bool set_F_z_hi, + std::unique_ptr const & a_Fx_z_lo, std::unique_ptr const & a_Fy_z_lo, + std::unique_ptr const & a_Fx_z_hi, std::unique_ptr const & a_Fy_z_hi); + +private: + + /* \brief Reads in the parsers for the tangential fields, returning whether + * the input parameter was specified. + * \param[in] pp_insulator ParmParse instance + * \param[out] parser the parser generated from the input + * \param[in] input_name the name of the input parameter + * \param[in] coord1 the first coordinate in the plane + * \param[in] coord2 the second coordinate in the plane + */ + bool ReadTangentialFieldParser (amrex::ParmParse const & pp_insulator, + std::unique_ptr & parser, + std::string const & input_name, + std::string const & coord1, + std::string const & coord2); + + std::vector> m_insulator_area_lo; + std::vector> m_insulator_area_hi; + +#if (AMREX_SPACEDIM > 1) + bool m_set_B_x_lo = false, m_set_B_x_hi = false; + std::unique_ptr m_By_x_lo, m_Bz_x_lo; + std::unique_ptr m_By_x_hi, m_Bz_x_hi; +#endif +#if defined(WARPX_DIM_3D) + bool m_set_B_y_lo = false, m_set_B_y_hi = false; + std::unique_ptr m_Bx_y_lo, m_Bz_y_lo; + std::unique_ptr m_Bx_y_hi, m_Bz_y_hi; +#endif + bool m_set_B_z_lo = false, m_set_B_z_hi = false; + std::unique_ptr m_Bx_z_lo, m_By_z_lo; + std::unique_ptr m_Bx_z_hi, m_By_z_hi; + + +#if (AMREX_SPACEDIM > 1) + bool m_set_E_x_lo = false, m_set_E_x_hi = false; + std::unique_ptr m_Ey_x_lo, m_Ez_x_lo; + std::unique_ptr m_Ey_x_hi, m_Ez_x_hi; +#endif +#if defined(WARPX_DIM_3D) + bool m_set_E_y_lo = false, m_set_E_y_hi = false; + std::unique_ptr m_Ex_y_lo, m_Ez_y_lo; + std::unique_ptr m_Ex_y_hi, m_Ez_y_hi; +#endif + bool m_set_E_z_lo = false, m_set_E_z_hi = false; + std::unique_ptr m_Ex_z_lo, m_Ey_z_lo; + std::unique_ptr m_Ex_z_hi, m_Ey_z_hi; + + +}; +#endif // PEC_INSULATOR_H_ diff --git a/Source/BoundaryConditions/PEC_Insulator.cpp b/Source/BoundaryConditions/PEC_Insulator.cpp new file mode 100644 index 00000000000..df411f8e908 --- /dev/null +++ b/Source/BoundaryConditions/PEC_Insulator.cpp @@ -0,0 +1,561 @@ +#include "BoundaryConditions/PEC_Insulator.H" +#include "Utils/Parser/ParserUtils.H" +#include "WarpX.H" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace +{ + /** + * \brief At the specified grid location, apply either the PEC or insulator boundary condition if + * the cell is on the boundary or in the guard cells. + * + * \param[in] icomp component of the field being updated + * (0=x, 1=y, 2=z in Cartesian) + * (0=r, 1=theta, 2=z in RZ) + * \param[in] dom_lo index value of the lower domain boundary (cell-centered) + * \param[in] dom_hi index value of the higher domain boundary (cell-centered) + * \param[in] ijk_vec indices along the x(i), y(j), z(k) of field Array4 + * \param[in] n index of the MultiFab component being updated + * \param[in] field field data to be updated if (ijk) is at the boundary + * or a guard cell + * \param[in] E_like whether the field behaves like E field or B field + * \param[in] is_nodal staggering of the field data being updated. + * \param[in] is_insulator_lo Specifies whether lower boundaries are insulators + * \param[in] is_insulator_hi Specifies whether upper boundaries are insulators + * \param[in] field_lo the values of the field for the lower insulator boundary cell + * \param[in] field_hi the values of the field for the upper insulator boundary cell + * \param[in] set_field_lo whether to set the field for the direction on the lower boundary + * \param[in] set_field_hi whether to set the field for the direction on the upper boundary + * \param[in] fbndry_lo specified values of the field at the lower boundaries in the insulator + * \param[in] fbndry_hi specified values of the field at the upper boundaries in the insulator + */ + AMREX_GPU_DEVICE AMREX_FORCE_INLINE + void SetFieldOnPEC_Insulator (int icomp, + amrex::IntVect const & dom_lo, + amrex::IntVect const & dom_hi, + amrex::IntVect const & ijk_vec, int n, + amrex::Array4 const & field, + bool const E_like, + amrex::IntVect const & is_nodal, + amrex::IntVect const & is_insulator_lo, + amrex::IntVect const & is_insulator_hi, + amrex::RealVect const & field_lo, + amrex::RealVect const & field_hi, + amrex::IntVect const & set_field_lo, + amrex::IntVect const & set_field_hi, + amrex::GpuArray const fbndry_lo, + amrex::GpuArray const fbndry_hi) + { + using namespace amrex::literals; + amrex::IntVect ijk_mirror = ijk_vec; + amrex::IntVect ijk_mirrorp1 = ijk_vec; + bool OnBoundary = false; + bool GuardCell = false; + bool isInsulatorBoundary = false; + amrex::Real sign = +1._rt; + bool is_normal_to_boundary; + amrex::Real field_value = 0._rt; + bool set_field = false; + // Loop over all dimensions + for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { + // Loop over sides, iside = -1 (lo), iside = +1 (hi) + for (int iside = -1; iside <= +1; iside += 2) { + bool const isPEC_InsulatorBoundary = ( (iside == -1) + ? fbndry_lo[idim] == FieldBoundaryType::PECInsulator + : fbndry_hi[idim] == FieldBoundaryType::PECInsulator ); + if (isPEC_InsulatorBoundary) { + isInsulatorBoundary = ( (iside == -1) + ? is_insulator_lo[idim] == 1 + : is_insulator_hi[idim] == 1 ); + } + if (isPEC_InsulatorBoundary) { + // Calculates the number of grid points ijk_vec is beyond the + // domain boundary i.e. a value of +1 means the current cell is + // outside of the simulation domain by 1 cell. Note that the high + // side domain boundary is between cell dom_hi and dom_hi+1 for cell + // centered grids and on cell dom_hi+1 for nodal grid. This is why + // (dom_hi[idim] + is_nodal[idim]) is used. + int const ig = ((iside == -1) ? (dom_lo[idim] - ijk_vec[idim]) + : (ijk_vec[idim] - (dom_hi[idim] + is_nodal[idim]))); + +#if (defined WARPX_DIM_XZ) || (defined WARPX_DIM_RZ) + // For 2D : for icomp==1, (Fy in XZ, Ftheta in RZ), + // icomp=1 is not normal to x or z boundary + // The logic below ensures that the flags are set right for 2D + is_normal_to_boundary = (icomp == (2*idim)); +#elif (defined WARPX_DIM_1D_Z) + // For 1D : icomp=0 and icomp=1 (Fx and Fy are not normal to the z boundary) + // The logic below ensures that the flags are set right for 1D + is_normal_to_boundary = (icomp == 2); +#else + is_normal_to_boundary = (icomp == idim); +#endif + + if (ig == 0) { + // Check if field is on the boundary + if (is_nodal[idim] == 1) { + OnBoundary = true; + } + } else if (ig > 0) { + GuardCell = true; + + // Mirror location inside the domain by "ig" number of cells + ijk_mirror[idim] = ( (iside == -1) + ? (dom_lo[idim] + ig - (1 - is_nodal[idim])) + : (dom_hi[idim] + 1 - ig)); + // Location twice as far in, for extrapolation + ijk_mirrorp1[idim] = 2*ijk_mirror[idim] - ijk_vec[idim]; + + // Check for components with even symmetry. + // True for E_like and tangential, and B_like and normal + if (E_like ^ is_normal_to_boundary) { sign *= -1._rt; } + + field_value = ( (iside == -1) ? field_lo[idim] : field_hi[idim] ); + set_field = ( (iside == -1) ? set_field_lo[idim]==1 : set_field_hi[idim]==1 ); + +#if (defined WARPX_DIM_RZ) + if (idim == 0 && iside == +1) { + // Upper radial boundary + amrex::Real const rguard = ijk_vec[idim] + 0.5_rt*(1._rt - is_nodal[idim]); + if (icomp == 0) { + // Add radial scale so that the divergence, drFr/dr, is 0. + // This only works for the first guard cell and with + // Fr cell centered in r. + amrex::Real const rmirror = ijk_mirror[idim] + 0.5_rt*(1._rt - is_nodal[idim]); + // Calculate radial scale factor + sign *= rmirror/rguard; + } + if (isInsulatorBoundary) { + // Apply radial scale factor + field_value *= dom_hi[idim]/rguard; + } + } +#endif + } + } // is pec_insulator boundary + } // loop over iside + } // loop over dimensions + + if (isInsulatorBoundary) { + if (is_normal_to_boundary) { + // The value on the boundary is left unmodified + // The values in the guard cells are extrapolated + if (GuardCell) { + field(ijk_vec, n) = 2._rt*field(ijk_mirror, n) - field(ijk_mirrorp1, n); + } + } else if ((OnBoundary || GuardCell) && set_field) { + field(ijk_vec, n) = field_value; + } else if (GuardCell) { + field(ijk_vec, n) = 2._rt*field(ijk_mirror, n) - field(ijk_mirrorp1, n); + } + } else { + if (OnBoundary && (E_like ^ is_normal_to_boundary)) { + // If ijk_vec is on a boundary, set to zero if + // E_like and tangential or B_like and normal + field(ijk_vec,n) = 0._rt; + } else if (GuardCell) { + // Fnormal and Ftangential is set opposite and equal to the value + // in the mirror location, respectively. + field(ijk_vec,n) = sign * field(ijk_mirror,n); + } + } + } +} + + +bool +PEC_Insulator::ReadTangentialFieldParser (amrex::ParmParse const & pp_insulator, + std::unique_ptr & parser, + std::string const & input_name, + std::string const & coord1, + std::string const & coord2) +{ + std::string str = "0"; + bool const specified = utils::parser::Query_parserString(pp_insulator, input_name, str); + parser = std::make_unique(utils::parser::makeParser(str, {coord1, coord2, "t"})); + return specified; +} + +PEC_Insulator::PEC_Insulator () +{ + + amrex::ParmParse const pp_insulator("insulator"); + +#if (AMREX_SPACEDIM > 1) + std::string str_area_x_lo = "0"; + std::string str_area_x_hi = "0"; + utils::parser::Query_parserString( pp_insulator, "area_x_lo(y,z)", str_area_x_lo); + utils::parser::Query_parserString( pp_insulator, "area_x_hi(y,z)", str_area_x_hi); + m_insulator_area_lo.push_back( + std::make_unique(utils::parser::makeParser(str_area_x_lo, {"y", "z"}))); + m_insulator_area_hi.push_back( + std::make_unique(utils::parser::makeParser(str_area_x_hi, {"y", "z"}))); + + m_set_B_x_lo |= ReadTangentialFieldParser(pp_insulator, m_By_x_lo, "By_x_lo(y,z,t)", "y", "z"); + m_set_B_x_lo |= ReadTangentialFieldParser(pp_insulator, m_Bz_x_lo, "Bz_x_lo(y,z,t)", "y", "z"); + m_set_B_x_hi |= ReadTangentialFieldParser(pp_insulator, m_By_x_hi, "By_x_hi(y,z,t)", "y", "z"); + m_set_B_x_hi |= ReadTangentialFieldParser(pp_insulator, m_Bz_x_hi, "Bz_x_hi(y,z,t)", "y", "z"); + + m_set_E_x_lo |= ReadTangentialFieldParser(pp_insulator, m_Ey_x_lo, "Ey_x_lo(y,z,t)", "y", "z"); + m_set_E_x_lo |= ReadTangentialFieldParser(pp_insulator, m_Ez_x_lo, "Ez_x_lo(y,z,t)", "y", "z"); + m_set_E_x_hi |= ReadTangentialFieldParser(pp_insulator, m_Ey_x_hi, "Ey_x_hi(y,z,t)", "y", "z"); + m_set_E_x_hi |= ReadTangentialFieldParser(pp_insulator, m_Ez_x_hi, "Ez_x_hi(y,z,t)", "y", "z"); +#endif +#if defined(WARPX_DIM_3D) + std::string str_area_y_lo = "0"; + std::string str_area_y_hi = "0"; + utils::parser::Query_parserString( pp_insulator, "area_y_lo(x,z)", str_area_y_lo); + utils::parser::Query_parserString( pp_insulator, "area_y_hi(x,z)", str_area_y_hi); + m_insulator_area_lo.push_back( + std::make_unique(utils::parser::makeParser(str_area_y_lo, {"x", "z"}))); + m_insulator_area_hi.push_back( + std::make_unique(utils::parser::makeParser(str_area_y_hi, {"x", "z"}))); + + m_set_B_y_lo |= ReadTangentialFieldParser(pp_insulator, m_Bx_y_lo, "Bx_y_lo(x,z,t)", "x", "z"); + m_set_B_y_lo |= ReadTangentialFieldParser(pp_insulator, m_Bz_y_lo, "Bz_y_lo(x,z,t)", "x", "z"); + m_set_B_y_hi |= ReadTangentialFieldParser(pp_insulator, m_Bx_y_hi, "Bx_y_hi(x,z,t)", "x", "z"); + m_set_B_y_hi |= ReadTangentialFieldParser(pp_insulator, m_Bz_y_hi, "Bz_y_hi(x,z,t)", "x", "z"); + + m_set_E_y_lo |= ReadTangentialFieldParser(pp_insulator, m_Ex_y_lo, "Ex_y_lo(x,z,t)", "x", "z"); + m_set_E_y_lo |= ReadTangentialFieldParser(pp_insulator, m_Ez_y_lo, "Ez_y_lo(x,z,t)", "x", "z"); + m_set_E_y_hi |= ReadTangentialFieldParser(pp_insulator, m_Ex_y_hi, "Ex_y_hi(x,z,t)", "x", "z"); + m_set_E_y_hi |= ReadTangentialFieldParser(pp_insulator, m_Ez_y_hi, "Ez_y_hi(x,z,t)", "x", "z"); +#endif + + std::string str_area_z_lo = "0"; + std::string str_area_z_hi = "0"; + utils::parser::Query_parserString( pp_insulator, "area_z_lo(x,y)", str_area_z_lo); + utils::parser::Query_parserString( pp_insulator, "area_z_hi(x,y)", str_area_z_hi); + m_insulator_area_lo.push_back( + std::make_unique(utils::parser::makeParser(str_area_z_lo, {"x", "y"}))); + m_insulator_area_hi.push_back( + std::make_unique(utils::parser::makeParser(str_area_z_hi, {"x", "y"}))); + + m_set_B_z_lo |= ReadTangentialFieldParser(pp_insulator, m_Bx_z_lo, "Bx_z_lo(x,y,t)", "x", "y"); + m_set_B_z_lo |= ReadTangentialFieldParser(pp_insulator, m_By_z_lo, "By_z_lo(x,y,t)", "x", "y"); + m_set_B_z_hi |= ReadTangentialFieldParser(pp_insulator, m_Bx_z_hi, "Bx_z_hi(x,y,t)", "x", "y"); + m_set_B_z_hi |= ReadTangentialFieldParser(pp_insulator, m_By_z_hi, "By_z_hi(x,y,t)", "x", "y"); + + m_set_E_z_lo |= ReadTangentialFieldParser(pp_insulator, m_Ex_z_lo, "Ex_z_lo(x,y,t)", "x", "y"); + m_set_E_z_lo |= ReadTangentialFieldParser(pp_insulator, m_Ey_z_lo, "Ey_z_lo(x,y,t)", "x", "y"); + m_set_E_z_hi |= ReadTangentialFieldParser(pp_insulator, m_Ex_z_hi, "Ex_z_hi(x,y,t)", "x", "y"); + m_set_E_z_hi |= ReadTangentialFieldParser(pp_insulator, m_Ey_z_hi, "Ey_z_hi(x,y,t)", "x", "y"); + +} + +void +PEC_Insulator::ApplyPEC_InsulatortoEfield ( + std::array Efield, + amrex::Array const & field_boundary_lo, + amrex::Array const & field_boundary_hi, + amrex::IntVect const & ng_fieldgather, amrex::Geometry const & geom, + int lev, PatchType patch_type, amrex::Vector const & ref_ratios, + amrex::Real time, + bool split_pml_field) +{ + bool const E_like = true; + ApplyPEC_InsulatortoField(Efield, field_boundary_lo, field_boundary_hi, ng_fieldgather, geom, + lev, patch_type, ref_ratios, time, split_pml_field, + E_like, +#if (AMREX_SPACEDIM > 1) + m_set_E_x_lo, m_set_E_x_hi, + m_Ey_x_lo, m_Ez_x_lo, m_Ey_x_hi, m_Ez_x_hi, +#endif +#if defined(WARPX_DIM_3D) + m_set_E_y_lo, m_set_E_y_hi, + m_Ex_y_lo, m_Ez_y_lo, m_Ex_y_hi, m_Ez_y_hi, +#endif + m_set_E_z_lo, m_set_E_z_hi, + m_Ex_z_lo, m_Ey_z_lo, m_Ex_z_hi, m_Ey_z_hi); +} + + +void +PEC_Insulator::ApplyPEC_InsulatortoBfield ( + std::array Bfield, + amrex::Array const & field_boundary_lo, + amrex::Array const & field_boundary_hi, + amrex::IntVect const & ng_fieldgather, amrex::Geometry const & geom, + int lev, PatchType patch_type, amrex::Vector const & ref_ratios, + amrex::Real time) +{ + bool const E_like = false; + bool const split_pml_field = false; + ApplyPEC_InsulatortoField(Bfield, field_boundary_lo, field_boundary_hi, ng_fieldgather, geom, + lev, patch_type, ref_ratios, time, split_pml_field, + E_like, +#if (AMREX_SPACEDIM > 1) + m_set_B_x_lo, m_set_B_x_hi, + m_By_x_lo, m_Bz_x_lo, m_By_x_hi, m_Bz_x_hi, +#endif +#if defined(WARPX_DIM_3D) + m_set_B_y_lo, m_set_B_y_hi, + m_Bx_y_lo, m_Bz_y_lo, m_Bx_y_hi, m_Bz_y_hi, +#endif + m_set_B_z_lo, m_set_B_z_hi, + m_Bx_z_lo, m_By_z_lo, m_Bx_z_hi, m_By_z_hi); +} + + +void +PEC_Insulator::ApplyPEC_InsulatortoField ( + std::array field, + amrex::Array const & field_boundary_lo, + amrex::Array const & field_boundary_hi, + amrex::IntVect const & ng_fieldgather, amrex::Geometry const & geom, + int lev, PatchType patch_type, amrex::Vector const & ref_ratios, + amrex::Real time, + bool split_pml_field, + bool E_like, +#if (AMREX_SPACEDIM > 1) + bool set_F_x_lo, bool set_F_x_hi, + std::unique_ptr const & a_Fy_x_lo, std::unique_ptr const & a_Fz_x_lo, + std::unique_ptr const & a_Fy_x_hi, std::unique_ptr const & a_Fz_x_hi, +#endif +#if defined(WARPX_DIM_3D) + bool set_F_y_lo, bool set_F_y_hi, + std::unique_ptr const & a_Fx_y_lo, std::unique_ptr const & a_Fz_y_lo, + std::unique_ptr const & a_Fx_y_hi, std::unique_ptr const & a_Fz_y_hi, +#endif + bool set_F_z_lo, bool set_F_z_hi, + std::unique_ptr const & a_Fx_z_lo, std::unique_ptr const & a_Fy_z_lo, + std::unique_ptr const & a_Fx_z_hi, std::unique_ptr const & a_Fy_z_hi) +{ + using namespace amrex::literals; + amrex::Box domain_box = geom.Domain(); + if (patch_type == PatchType::coarse && (lev > 0)) { + domain_box.coarsen(ref_ratios[lev-1]); + } + amrex::IntVect const domain_lo = domain_box.smallEnd(); + amrex::IntVect const domain_hi = domain_box.bigEnd(); + amrex::GpuArray fbndry_lo; + amrex::GpuArray fbndry_hi; + for (int idim=0; idim < AMREX_SPACEDIM; ++idim) { + fbndry_lo[idim] = field_boundary_lo[idim]; + fbndry_hi[idim] = field_boundary_hi[idim]; + } + +#if (AMREX_SPACEDIM > 1) + amrex::ParserExecutor<2> const area_parsers_x_lo = m_insulator_area_lo[0]->compile<2>(); + amrex::ParserExecutor<2> const area_parsers_x_hi = m_insulator_area_hi[0]->compile<2>(); +#endif +#if defined(WARPX_DIM_3D) + amrex::ParserExecutor<2> const area_parsers_y_lo = m_insulator_area_lo[1]->compile<2>(); + amrex::ParserExecutor<2> const area_parsers_y_hi = m_insulator_area_hi[1]->compile<2>(); +#endif + amrex::ParserExecutor<2> const area_parsers_z_lo = m_insulator_area_lo[WARPX_ZINDEX]->compile<2>(); + amrex::ParserExecutor<2> const area_parsers_z_hi = m_insulator_area_hi[WARPX_ZINDEX]->compile<2>(); + +#if (AMREX_SPACEDIM > 1) + amrex::ParserExecutor<3> const Fy_x_lo_parser = a_Fy_x_lo->compile<3>(); + amrex::ParserExecutor<3> const Fz_x_lo_parser = a_Fz_x_lo->compile<3>(); + amrex::ParserExecutor<3> const Fy_x_hi_parser = a_Fy_x_hi->compile<3>(); + amrex::ParserExecutor<3> const Fz_x_hi_parser = a_Fz_x_hi->compile<3>(); +#endif +#if defined(WARPX_DIM_3D) + amrex::ParserExecutor<3> const Fx_y_lo_parser = a_Fx_y_lo->compile<3>(); + amrex::ParserExecutor<3> const Fz_y_lo_parser = a_Fz_y_lo->compile<3>(); + amrex::ParserExecutor<3> const Fx_y_hi_parser = a_Fx_y_hi->compile<3>(); + amrex::ParserExecutor<3> const Fz_y_hi_parser = a_Fz_y_hi->compile<3>(); +#endif + amrex::ParserExecutor<3> const Fx_z_lo_parser = a_Fx_z_lo->compile<3>(); + amrex::ParserExecutor<3> const Fy_z_lo_parser = a_Fy_z_lo->compile<3>(); + amrex::ParserExecutor<3> const Fx_z_hi_parser = a_Fx_z_hi->compile<3>(); + amrex::ParserExecutor<3> const Fy_z_hi_parser = a_Fy_z_hi->compile<3>(); + + amrex::IntVect const Fx_nodal = field[0]->ixType().toIntVect(); + amrex::IntVect const Fy_nodal = field[1]->ixType().toIntVect(); + amrex::IntVect const Fz_nodal = field[2]->ixType().toIntVect(); + // For each field multifab, apply boundary condition to ncomponents + // If not split field, the boundary condition is applied to the regular field used in Maxwell's eq. + // If split_pml_field is true, then boundary condition is applied to all the split field components. + int const nComp_x = field[0]->nComp(); + int const nComp_y = field[1]->nComp(); + int const nComp_z = field[2]->nComp(); + + std::array const & dx = WarpX::CellSize(lev); + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for (amrex::MFIter mfi(*field[0], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { + // Extract field data + amrex::Array4 const & Fx = field[0]->array(mfi); + amrex::Array4 const & Fy = field[1]->array(mfi); + amrex::Array4 const & Fz = field[2]->array(mfi); + + // Extract tileboxes for which to loop + // if split field, the box includes nodal flag + // For E-field used in Maxwell's update, nodal flag plus cells that particles + // gather fields from in the guard-cell region are included. + // Note that for simulations without particles or laser, ng_field_gather is 0 + // and the guard-cell values of the E-field multifab will not be modified. + amrex::Box const & tex = (split_pml_field) ? mfi.tilebox(field[0]->ixType().toIntVect()) + : mfi.tilebox(field[0]->ixType().toIntVect(), ng_fieldgather); + amrex::Box const & tey = (split_pml_field) ? mfi.tilebox(field[1]->ixType().toIntVect()) + : mfi.tilebox(field[1]->ixType().toIntVect(), ng_fieldgather); + amrex::Box const & tez = (split_pml_field) ? mfi.tilebox(field[2]->ixType().toIntVect()) + : mfi.tilebox(field[2]->ixType().toIntVect(), ng_fieldgather); + + const amrex::XDim3 xyzmin_x = WarpX::LowerCorner(tex, lev, 0._rt); + const amrex::XDim3 xyzmin_y = WarpX::LowerCorner(tey, lev, 0._rt); + const amrex::XDim3 xyzmin_z = WarpX::LowerCorner(tez, lev, 0._rt); + amrex::IntVect const lo_x = tex.smallEnd(); + amrex::IntVect const lo_y = tey.smallEnd(); + amrex::IntVect const lo_z = tez.smallEnd(); + + // loop over cells and update fields + amrex::ParallelFor( + tex, nComp_x, + [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { + amrex::ignore_unused(j, k); + + amrex::IntVect const iv(AMREX_D_DECL(i, j, k)); + amrex::Real const x = (AMREX_SPACEDIM > 1 ? xyzmin_x.x + (iv[0] - lo_x[0])*dx[0] : 0._rt); + amrex::Real const y = (AMREX_SPACEDIM == 3 ? xyzmin_x.y + (iv[1] - lo_x[1])*dx[1] : 0._rt); +#if (AMREX_SPACEDIM > 1) + amrex::Real const z = xyzmin_x.z + (iv[WARPX_ZINDEX] - lo_x[WARPX_ZINDEX])*dx[2]; +#endif + + amrex::IntVect is_insulator_lo; + amrex::IntVect is_insulator_hi; + amrex::RealVect F_lo, F_hi; + amrex::IntVect set_field_lo; + amrex::IntVect set_field_hi; +#if (AMREX_SPACEDIM > 1) + is_insulator_lo[0] = (area_parsers_x_lo(y, z) > 0._rt); + is_insulator_hi[0] = (area_parsers_x_hi(y, z) > 0._rt); + F_lo[0] = 0._rt; // Will be unused + F_hi[0] = 0._rt; // Will be unused + set_field_lo[0] = 0; // Will be unused + set_field_hi[0] = 0; // Will be unused +#endif +#if defined(WARPX_DIM_3D) + is_insulator_lo[1] = (area_parsers_y_lo(x, z) > 0._rt); + is_insulator_hi[1] = (area_parsers_y_hi(x, z) > 0._rt); + F_lo[1] = (set_F_y_lo ? Fx_y_lo_parser(x, z, time) : 0._rt); + F_hi[1] = (set_F_y_hi ? Fx_y_hi_parser(x, z, time) : 0._rt); + set_field_lo[1] = set_F_y_lo; + set_field_hi[1] = set_F_y_hi; +#endif + is_insulator_lo[WARPX_ZINDEX] = (area_parsers_z_lo(x, y) > 0._rt); + is_insulator_hi[WARPX_ZINDEX] = (area_parsers_z_hi(x, y) > 0._rt); + F_lo[WARPX_ZINDEX] = (set_F_z_lo ? Fx_z_lo_parser(x, y, time) : 0._rt); + F_hi[WARPX_ZINDEX] = (set_F_z_hi ? Fx_z_hi_parser(x, y, time) : 0._rt); + set_field_lo[WARPX_ZINDEX] = set_F_z_lo; + set_field_hi[WARPX_ZINDEX] = set_F_z_hi; + + int const icomp = 0; + ::SetFieldOnPEC_Insulator(icomp, domain_lo, domain_hi, iv, n, + Fx, E_like, Fx_nodal, is_insulator_lo, is_insulator_hi, + F_lo, F_hi, set_field_lo, set_field_hi, + fbndry_lo, fbndry_hi); + }, + tey, nComp_y, + [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { + amrex::ignore_unused(j, k); + + amrex::IntVect const iv(AMREX_D_DECL(i, j, k)); + amrex::Real const x = (AMREX_SPACEDIM > 1 ? xyzmin_y.x + (iv[0] - lo_y[0])*dx[0] : 0._rt); + amrex::Real const y = (AMREX_SPACEDIM == 3 ? xyzmin_y.y + (iv[1] - lo_y[1])*dx[1] : 0._rt); +#if (AMREX_SPACEDIM > 1) + amrex::Real const z = xyzmin_y.z + (iv[WARPX_ZINDEX] - lo_y[WARPX_ZINDEX])*dx[2]; +#endif + + amrex::IntVect is_insulator_lo; + amrex::IntVect is_insulator_hi; + amrex::RealVect F_lo, F_hi; + amrex::IntVect set_field_lo; + amrex::IntVect set_field_hi; +#if (AMREX_SPACEDIM > 1) + is_insulator_lo[0] = (area_parsers_x_lo(y, z) > 0._rt); + is_insulator_hi[0] = (area_parsers_x_hi(y, z) > 0._rt); + F_lo[0] = (set_F_x_lo ? Fy_x_lo_parser(y, z, time) : 0._rt); + F_hi[0] = (set_F_x_hi ? Fy_x_hi_parser(y, z, time) : 0._rt); + set_field_lo[0] = set_F_x_lo; + set_field_hi[0] = set_F_x_hi; +#endif +#if defined(WARPX_DIM_3D) + is_insulator_lo[1] = (area_parsers_y_lo(x, z) > 0._rt); + is_insulator_hi[1] = (area_parsers_y_hi(x, z) > 0._rt); + F_lo[1] = 0._rt; // Will be unused + F_hi[1] = 0._rt; // Will be unused + set_field_lo[1] = 0; // Will be unused + set_field_hi[1] = 0; // Will be unused +#endif + is_insulator_lo[WARPX_ZINDEX] = (area_parsers_z_lo(x, y) > 0._rt); + is_insulator_hi[WARPX_ZINDEX] = (area_parsers_z_hi(x, y) > 0._rt); + F_lo[WARPX_ZINDEX] = (set_F_z_lo ? Fy_z_lo_parser(x, y, time) : 0._rt); + F_hi[WARPX_ZINDEX] = (set_F_z_hi ? Fy_z_hi_parser(x, y, time) : 0._rt); + set_field_lo[WARPX_ZINDEX] = set_F_z_lo; + set_field_hi[WARPX_ZINDEX] = set_F_z_hi; + + int const icomp = 1; + ::SetFieldOnPEC_Insulator(icomp, domain_lo, domain_hi, iv, n, + Fy, E_like, Fy_nodal, is_insulator_lo, is_insulator_hi, + F_lo, F_hi, set_field_lo, set_field_hi, + fbndry_lo, fbndry_hi); + }, + tez, nComp_z, + [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { + amrex::ignore_unused(j, k); + + amrex::IntVect const iv(AMREX_D_DECL(i, j, k)); + amrex::Real const x = (AMREX_SPACEDIM > 1 ? xyzmin_z.x + (iv[0] - lo_z[0])*dx[0] : 0._rt); + amrex::Real const y = (AMREX_SPACEDIM == 3 ? xyzmin_z.y + (iv[1] - lo_z[1])*dx[1] : 0._rt); +#if (AMREX_SPACEDIM > 1) + amrex::Real const z = xyzmin_z.z + (iv[WARPX_ZINDEX] - lo_z[WARPX_ZINDEX])*dx[2]; +#endif + + amrex::IntVect is_insulator_lo; + amrex::IntVect is_insulator_hi; + amrex::RealVect F_lo, F_hi; + amrex::IntVect set_field_lo; + amrex::IntVect set_field_hi; +#if (AMREX_SPACEDIM > 1) + is_insulator_lo[0] = (area_parsers_x_lo(y, z) > 0._rt); + is_insulator_hi[0] = (area_parsers_x_hi(y, z) > 0._rt); + F_lo[0] = (set_F_x_lo ? Fz_x_lo_parser(y, z, time) : 0._rt); + F_hi[0] = (set_F_x_hi ? Fz_x_hi_parser(y, z, time) : 0._rt); + set_field_lo[0] = set_F_x_lo; + set_field_hi[0] = set_F_x_hi; +#endif +#if defined(WARPX_DIM_3D) + is_insulator_lo[1] = (area_parsers_y_lo(x, z) > 0._rt); + is_insulator_hi[1] = (area_parsers_y_hi(x, z) > 0._rt); + F_lo[1] = (set_F_y_lo ? Fz_y_lo_parser(x, z, time) : 0._rt); + F_hi[1] = (set_F_y_hi ? Fz_y_hi_parser(x, z, time) : 0._rt); + set_field_lo[1] = set_F_y_lo; + set_field_hi[1] = set_F_y_hi; +#endif + is_insulator_lo[WARPX_ZINDEX] = (area_parsers_z_lo(x, y) > 0._rt); + is_insulator_hi[WARPX_ZINDEX] = (area_parsers_z_hi(x, y) > 0._rt); + F_lo[WARPX_ZINDEX] = 0._rt; // Will be unused + F_hi[WARPX_ZINDEX] = 0._rt; // Will be unused + set_field_lo[WARPX_ZINDEX] = 0; // Will be unused + set_field_hi[WARPX_ZINDEX] = 0; // Will be unused + + int const icomp = 2; + ::SetFieldOnPEC_Insulator(icomp, domain_lo, domain_hi, iv, n, + Fz, E_like, Fz_nodal, is_insulator_lo, is_insulator_hi, + F_lo, F_hi, set_field_lo, set_field_hi, + fbndry_lo, fbndry_hi); + } + ); + } +} diff --git a/Source/BoundaryConditions/PEC_Insulator_fwd.H b/Source/BoundaryConditions/PEC_Insulator_fwd.H new file mode 100644 index 00000000000..9b2c1b05307 --- /dev/null +++ b/Source/BoundaryConditions/PEC_Insulator_fwd.H @@ -0,0 +1,13 @@ +/* Copyright 2024 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef PEC_INSULATOR_FWD_H +#define PEC_INSULATOR_FWD_H + +class PEC_Insulator; + +#endif /* PEC_INSULATOR_FWD_H */ diff --git a/Source/BoundaryConditions/WarpXFieldBoundaries.cpp b/Source/BoundaryConditions/WarpXFieldBoundaries.cpp index dc41e95f40f..7566979557e 100644 --- a/Source/BoundaryConditions/WarpXFieldBoundaries.cpp +++ b/Source/BoundaryConditions/WarpXFieldBoundaries.cpp @@ -1,4 +1,5 @@ #include "WarpX.H" +#include "BoundaryConditions/PEC_Insulator.H" #include "BoundaryConditions/PML.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" @@ -92,6 +93,47 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) } } + if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { + amrex::Real const tnew = gett_new(lev); + if (patch_type == PatchType::fine) { + pec_insulator_boundary->ApplyPEC_InsulatortoEfield( + {m_fields.get(FieldType::Efield_fp,Direction{0},lev), + m_fields.get(FieldType::Efield_fp,Direction{1},lev), + m_fields.get(FieldType::Efield_fp,Direction{2},lev)}, + field_boundary_lo, field_boundary_hi, + get_ng_fieldgather(), Geom(lev), + lev, patch_type, ref_ratio, tnew); + if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { + // apply pec on split E-fields in PML region + const bool split_pml_field = true; + pec_insulator_boundary->ApplyPEC_InsulatortoEfield( + m_fields.get_alldirs(FieldType::pml_E_fp, lev), + field_boundary_lo, field_boundary_hi, + get_ng_fieldgather(), Geom(lev), + lev, patch_type, ref_ratio, tnew, + split_pml_field); + } + } else { + pec_insulator_boundary->ApplyPEC_InsulatortoEfield( + {m_fields.get(FieldType::Efield_cp,Direction{0},lev), + m_fields.get(FieldType::Efield_cp,Direction{1},lev), + m_fields.get(FieldType::Efield_cp,Direction{2},lev)}, + field_boundary_lo, field_boundary_hi, + get_ng_fieldgather(), Geom(lev), + lev, patch_type, ref_ratio, tnew); + if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { + // apply pec on split E-fields in PML region + const bool split_pml_field = true; + pec_insulator_boundary->ApplyPEC_InsulatortoEfield( + m_fields.get_alldirs(FieldType::pml_E_cp, lev), + field_boundary_lo, field_boundary_hi, + get_ng_fieldgather(), Geom(lev), + lev, patch_type, ref_ratio, tnew, + split_pml_field); + } + } + } + #ifdef WARPX_DIM_RZ if (patch_type == PatchType::fine) { ApplyFieldBoundaryOnAxis(m_fields.get(FieldType::Efield_fp, Direction{0}, lev), @@ -129,6 +171,27 @@ void WarpX::ApplyBfieldBoundary (const int lev, PatchType patch_type, DtType a_d } } + if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { + amrex::Real const tnew = gett_new(lev); + if (patch_type == PatchType::fine) { + pec_insulator_boundary->ApplyPEC_InsulatortoBfield( + {m_fields.get(FieldType::Bfield_fp,Direction{0},lev), + m_fields.get(FieldType::Bfield_fp,Direction{1},lev), + m_fields.get(FieldType::Bfield_fp,Direction{2},lev)}, + field_boundary_lo, field_boundary_hi, + get_ng_fieldgather(), Geom(lev), + lev, patch_type, ref_ratio, tnew); + } else { + pec_insulator_boundary->ApplyPEC_InsulatortoBfield( + {m_fields.get(FieldType::Bfield_cp,Direction{0},lev), + m_fields.get(FieldType::Bfield_cp,Direction{1},lev), + m_fields.get(FieldType::Bfield_cp,Direction{2},lev)}, + field_boundary_lo, field_boundary_hi, + get_ng_fieldgather(), Geom(lev), + lev, patch_type, ref_ratio, tnew); + } + } + // Silver-Mueller boundaries are only applied on the first half-push of B // This is because the formula used for Silver-Mueller assumes that // E and B are staggered in time, which is only true after the first half-push diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 1ce7959e7e4..932304d5009 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -7,6 +7,7 @@ #include // see WarpX.cpp - full includes for _fwd.H headers +#include #include #include #include diff --git a/Source/Utils/Parser/ParserUtils.H b/Source/Utils/Parser/ParserUtils.H index 96937abdbbe..19f976c3a6c 100644 --- a/Source/Utils/Parser/ParserUtils.H +++ b/Source/Utils/Parser/ParserUtils.H @@ -88,6 +88,20 @@ namespace utils::parser std::string& stored_string); + /** + * \brief If the input is provided, parse the string (typically a mathematical expression) from the + * input file and store it into a variable, replacing its contents. + * + * \param pp used to read the query_string `pp.=string` + * \param query_string ParmParse.query will look for this string + * \param stored_string variable in which the string to parse is stored + */ + bool Query_parserString( + amrex::ParmParse const& pp, + std::string const& query_string, + std::string& stored_string); + + /** Parse a string and return as a double precision floating point number * * In case the string cannot be interpreted as a double, diff --git a/Source/Utils/Parser/ParserUtils.cpp b/Source/Utils/Parser/ParserUtils.cpp index 0339b766e38..d017a6e019c 100644 --- a/Source/Utils/Parser/ParserUtils.cpp +++ b/Source/Utils/Parser/ParserUtils.cpp @@ -51,6 +51,19 @@ void utils::parser::Store_parserString( } } +bool utils::parser::Query_parserString( + amrex::ParmParse const& pp, + std::string const& query_string, + std::string& stored_string) +{ + bool const input_specified = pp.contains(query_string.c_str()); + if (input_specified) { + stored_string.clear(); + utils::parser::Store_parserString(pp, query_string, stored_string); + } + return input_specified; +} + int utils::parser::query (const amrex::ParmParse& a_pp, std::string const& group, char const * str, std::string& val) { const bool is_specified_without_group = a_pp.contains(str); diff --git a/Source/Utils/WarpXAlgorithmSelection.H b/Source/Utils/WarpXAlgorithmSelection.H index f67aeddadd0..088ef295364 100644 --- a/Source/Utils/WarpXAlgorithmSelection.H +++ b/Source/Utils/WarpXAlgorithmSelection.H @@ -132,6 +132,7 @@ AMREX_ENUM(FieldBoundaryType, Open, // Used in the Integrated Green Function Poisson solver // Note that the solver implicitely assumes open BCs: // no need to enforce them separately + PECInsulator, // Mixed boundary with PEC and insulator Default = PML); /** Particle boundary conditions at the domain boundary diff --git a/Source/WarpX.H b/Source/WarpX.H index a635196d044..4dc3ab8c8be 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -12,6 +12,7 @@ #ifndef WARPX_H_ #define WARPX_H_ +#include "BoundaryConditions/PEC_Insulator_fwd.H" #include "BoundaryConditions/PML_fwd.H" #include "Diagnostics/MultiDiagnostics_fwd.H" #include "Diagnostics/ReducedDiags/MultiReducedDiags_fwd.H" @@ -1435,6 +1436,9 @@ private: #endif amrex::Real v_particle_pml; + // Insulator boundary conditions + std::unique_ptr pec_insulator_boundary; + // External fields parameters std::unique_ptr m_p_ext_field_params; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index d1e3108e32a..cb46f3129c8 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -11,6 +11,7 @@ */ #include "WarpX.H" +#include "BoundaryConditions/PEC_Insulator.H" #include "BoundaryConditions/PML.H" #include "Diagnostics/MultiDiagnostics.H" #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" @@ -1685,6 +1686,9 @@ WarpX::ReadParameters () } } + // Setup pec_insulator boundary conditions + pec_insulator_boundary = std::make_unique(); + // for slice generation // { const ParmParse pp_slice("slice"); From 48bb4da13e2ac27f97002ed8ebbdd9c2a788e9cb Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Wed, 6 Nov 2024 19:23:08 +0000 Subject: [PATCH 078/278] [pre-commit.ci] pre-commit autoupdate (#5442) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.1 → v0.7.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.1...v0.7.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 27065ac5ca3..0ee981588e3 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.1 + rev: v0.7.2 hooks: # Run the linter - id: ruff From f8a6701b7e202ef8a2d6451beba7f3ad28221145 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Wed, 6 Nov 2024 13:56:56 -0800 Subject: [PATCH 079/278] Install `lasy` on Perlmutter by default (#5439) This modifies Perlmutter's installation instructions so that `lasy` is installed by default. --- Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh b/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh index da48d9543a0..c77f075a3a8 100755 --- a/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh +++ b/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh @@ -177,7 +177,7 @@ python3 -m pip install --upgrade cupy-cuda12x # CUDA 12 compatible wheel # optimas (based on libEnsemble & ax->botorch->gpytorch->pytorch) python3 -m pip install --upgrade torch # CUDA 12 compatible wheel python3 -m pip install --upgrade optimas[all] - +python3 -m pip install --upgrade lasy # remove build temporary directory rm -rf ${build_dir} From 3d68665f6e2c2bf53c0007a2f1b4f13f6f57396f Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Thu, 7 Nov 2024 08:57:26 -0800 Subject: [PATCH 080/278] CI: build tests with `-g1` compile option (#5443) Build and run CI tests with more debug information by adding the `-g1` compile option. --- .azure-pipelines.yml | 10 +++++----- CMakeLists.txt | 7 +++++++ 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index 1d5127ae5a1..62d8a0a424d 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -152,11 +152,11 @@ jobs: df -h # configure export AMReX_CMAKE_FLAGS="-DAMReX_ASSERTIONS=ON -DAMReX_TESTING=ON" - cmake -S . -B build \ - ${AMReX_CMAKE_FLAGS} \ - ${WARPX_CMAKE_FLAGS} \ - -DWarpX_TEST_CLEANUP=ON \ - -DWarpX_TEST_FPETRAP=ON + export WARPX_TEST_FLAGS="-DWarpX_TEST_CLEANUP=ON -DWarpX_TEST_FPETRAP=ON -DWarpX_TEST_DEBUG=ON" + cmake -S . -B build \ + ${AMReX_CMAKE_FLAGS} \ + ${WARPX_CMAKE_FLAGS} \ + ${WARPX_TEST_FLAGS} # build cmake --build build -j 2 # display disk space usage diff --git a/CMakeLists.txt b/CMakeLists.txt index 66fe63230d9..8ff14bacfa6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -90,6 +90,13 @@ mark_as_advanced(WarpX_TEST_CLEANUP) option(WarpX_TEST_FPETRAP "Run CI tests with FPE-trapping runtime parameters" OFF) mark_as_advanced(WarpX_TEST_FPETRAP) +# Advanced option to run CI tests with the -g compile option +option(WarpX_TEST_DEBUG "Run CI tests with the -g compile option" OFF) +mark_as_advanced(WarpX_TEST_DEBUG) +if(WarpX_TEST_DEBUG) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g1") +endif() + set(WarpX_DIMS_VALUES 1 2 3 RZ) set(WarpX_DIMS 3 CACHE STRING "Simulation dimensionality <1;2;3;RZ>") list(REMOVE_DUPLICATES WarpX_DIMS) From c36d6aac0a3d109f86c3ff4bbcdc2d25841821c1 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Thu, 7 Nov 2024 19:40:47 -0800 Subject: [PATCH 081/278] AMReX/pyAMReX/PICSAR: weekly update (#5445) - Weekly update to latest AMReX: ```console ./Tools/Release/updateAMReX.py ``` - Weekly update to latest pyAMReX (no changes): ```console ./Tools/Release/updatepyAMReX.py ``` - Weekly update to latest PICSAR (no changes): ```console ./Tools/Release/updatePICSAR.py ``` Slightly off schedule to merge a bug fix from AMReX. --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index d0bcc10d72c..a10306789cb 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -137,7 +137,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 24.11 && cd - + cd ../amrex && git checkout --detach 4b703fec6c2ff983e465c8cef0cc4947231edb07 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index dd81554d607..e1072d03014 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -283,7 +283,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "24.11" +set(WarpX_amrex_branch "4b703fec6c2ff983e465c8cef0cc4947231edb07" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") From 9476692839160aed8e3f389006d06f1fa9272776 Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 11 Nov 2024 08:38:58 -0800 Subject: [PATCH 082/278] Implicit add filtering (#5086) This adds filtering to the implicit solver, replacing PR #4600. It is a simple change. All that is needed is adding a call to filter the `Efield_fp` just before the particles are pushed. The current density is already filtered in `SyncCurrentAndRho`. The name of the routine `ApplyFilterJ` was changed to `ApplyFilterMF` since it now has a more general usage. --- Docs/source/developers/fields.rst | 2 +- Examples/Tests/implicit/CMakeLists.txt | 10 ++ ...test_2d_theta_implicit_jfnk_vandb_filtered | 115 ++++++++++++++++++ ...2d_theta_implicit_jfnk_vandb_filtered.json | 31 +++++ Source/Evolve/WarpXEvolve.cpp | 6 +- .../ImplicitSolvers/WarpXImplicitOps.cpp | 3 + Source/Parallelization/WarpXComm.cpp | 34 +++--- Source/WarpX.H | 8 +- 8 files changed, 184 insertions(+), 25 deletions(-) create mode 100644 Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb_filtered create mode 100644 Regression/Checksum/benchmarks_json/test_2d_theta_implicit_jfnk_vandb_filtered.json diff --git a/Docs/source/developers/fields.rst b/Docs/source/developers/fields.rst index 9d980119814..bd6a886ae2a 100644 --- a/Docs/source/developers/fields.rst +++ b/Docs/source/developers/fields.rst @@ -119,7 +119,7 @@ Bilinear filter The multi-pass bilinear filter (applied on the current density) is implemented in ``Source/Filter/``, and class ``WarpX`` holds an instance of this class in member variable ``WarpX::bilinear_filter``. For performance reasons (to avoid creating too many guard cells), this filter is directly applied in communication routines, see ``WarpX::AddCurrentFromFineLevelandSumBoundary`` above and -.. doxygenfunction:: WarpX::ApplyFilterJ(const amrex::Vector, 3>> ¤t, int lev, int idim) +.. doxygenfunction:: WarpX::ApplyFilterMF(const amrex::Vector, 3>> &mfvec, int lev, int idim) .. doxygenfunction:: WarpX::SumBoundaryJ(const amrex::Vector, 3>> ¤t, int lev, int idim, const amrex::Periodicity &period) diff --git a/Examples/Tests/implicit/CMakeLists.txt b/Examples/Tests/implicit/CMakeLists.txt index dabd4de66b8..bf378631e16 100644 --- a/Examples/Tests/implicit/CMakeLists.txt +++ b/Examples/Tests/implicit/CMakeLists.txt @@ -31,6 +31,16 @@ add_warpx_test( OFF # dependency ) +add_warpx_test( + test_2d_theta_implicit_jfnk_vandb_filtered # name + 2 # dims + 2 # nprocs + inputs_test_2d_theta_implicit_jfnk_vandb_filtered # inputs + analysis_vandb_jfnk_2d.py # analysis + diags/diag1000020 # output + OFF # dependency +) + add_warpx_test( test_2d_theta_implicit_jfnk_vandb_picmi # name 2 # dims diff --git a/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb_filtered b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb_filtered new file mode 100644 index 00000000000..4849a5e30a3 --- /dev/null +++ b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb_filtered @@ -0,0 +1,115 @@ +################################# +########## CONSTANTS ############ +################################# + +my_constants.n0 = 1.e30 # m^-3 +my_constants.Ti = 100. # eV +my_constants.Te = 100. # eV +my_constants.wpe = q_e*sqrt(n0/(m_e*epsilon0)) +my_constants.de0 = clight/wpe +my_constants.nppcz = 10 # number of particles/cell in z +my_constants.dt = 0.1/wpe # s + +################################# +####### GENERAL PARAMETERS ###### +################################# +max_step = 20 +amr.n_cell = 40 40 +amr.max_grid_size = 8 +amr.blocking_factor = 8 +amr.max_level = 0 +geometry.dims = 2 +geometry.prob_lo = 0.0 0.0 # physical domain +geometry.prob_hi = 10.0*de0 10.0*de0 + +################################# +####### Boundary condition ###### +################################# +boundary.field_lo = periodic periodic +boundary.field_hi = periodic periodic + +################################# +############ NUMERICS ########### +################################# +warpx.abort_on_warning_threshold = high +warpx.serialize_initial_conditions = 1 +warpx.verbose = 1 +warpx.const_dt = dt +#warpx.cfl = 0.5656 +warpx.use_filter = 1 + +algo.maxwell_solver = Yee +algo.evolve_scheme = "theta_implicit_em" +#algo.evolve_scheme = "semi_implicit_em" + +implicit_evolve.theta = 0.5 +implicit_evolve.max_particle_iterations = 21 +implicit_evolve.particle_tolerance = 1.0e-12 + +#implicit_evolve.nonlinear_solver = "picard" +#picard.verbose = true +#picard.max_iterations = 25 +#picard.relative_tolerance = 0.0 #1.0e-12 +#picard.absolute_tolerance = 0.0 #1.0e-24 +#picard.require_convergence = false + +implicit_evolve.nonlinear_solver = "newton" +newton.verbose = true +newton.max_iterations = 20 +newton.relative_tolerance = 1.0e-12 +newton.absolute_tolerance = 0.0 +newton.require_convergence = false + +gmres.verbose_int = 2 +gmres.max_iterations = 1000 +gmres.relative_tolerance = 1.0e-8 +gmres.absolute_tolerance = 0.0 + +algo.particle_pusher = "boris" +#algo.particle_pusher = "higuera" + +algo.particle_shape = 2 +#algo.current_deposition = "direct" +#algo.current_deposition = "esirkepov" +algo.current_deposition = "villasenor" + +################################# +############ PLASMA ############# +################################# +particles.species_names = electrons protons + +electrons.charge = -q_e +electrons.mass = m_e +electrons.injection_style = "NUniformPerCell" +electrons.num_particles_per_cell_each_dim = nppcz nppcz +electrons.profile = constant +electrons.density = 1.e30 # number per m^3 +electrons.momentum_distribution_type = "gaussian" +electrons.ux_th = sqrt(Te*q_e/m_e)/clight +electrons.uy_th = sqrt(Te*q_e/m_e)/clight +electrons.uz_th = sqrt(Te*q_e/m_e)/clight + +protons.charge = q_e +protons.mass = m_p +protons.injection_style = "NUniformPerCell" +protons.num_particles_per_cell_each_dim = nppcz nppcz +protons.profile = constant +protons.density = 1.e30 # number per m^3 +protons.momentum_distribution_type = "gaussian" +protons.ux_th = sqrt(Ti*q_e/m_p)/clight +protons.uy_th = sqrt(Ti*q_e/m_p)/clight +protons.uz_th = sqrt(Ti*q_e/m_p)/clight + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 20 +diag1.diag_type = Full +diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho divE +diag1.electrons.variables = x z w ux uy uz +diag1.protons.variables = x z w ux uy uz + +warpx.reduced_diags_names = particle_energy field_energy +particle_energy.type = ParticleEnergy +particle_energy.intervals = 1 +field_energy.type = FieldEnergy +field_energy.intervals = 1 diff --git a/Regression/Checksum/benchmarks_json/test_2d_theta_implicit_jfnk_vandb_filtered.json b/Regression/Checksum/benchmarks_json/test_2d_theta_implicit_jfnk_vandb_filtered.json new file mode 100644 index 00000000000..d342c49e2fd --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_theta_implicit_jfnk_vandb_filtered.json @@ -0,0 +1,31 @@ +{ + "lev=0": { + "Bx": 65625.24877705125, + "By": 71913.65275407257, + "Bz": 59768.79247890749, + "Ex": 56341360261928.086, + "Ey": 13926508614721.855, + "Ez": 56508162715968.17, + "divE": 5.5816922509658905e+22, + "jx": 1.8114330881270456e+19, + "jy": 2.0727708668063334e+19, + "jz": 1.7843765469944717e+19, + "rho": 494213515033.04443 + }, + "electrons": { + "particle_momentum_x": 4.888781979240524e-19, + "particle_momentum_y": 4.879904653089102e-19, + "particle_momentum_z": 4.878388335258947e-19, + "particle_position_x": 0.0042514822919144084, + "particle_position_y": 0.0042515394083575886, + "particle_weight": 2823958719279159.5 + }, + "protons": { + "particle_momentum_x": 2.0873319751377048e-17, + "particle_momentum_y": 2.0858882863041667e-17, + "particle_momentum_z": 2.0877426824914187e-17, + "particle_position_x": 0.004251275869325256, + "particle_position_y": 0.0042512738905204584, + "particle_weight": 2823958719279159.5 + } +} diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index a685afd28e7..e9540be3da7 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -616,7 +616,7 @@ void WarpX::SyncCurrentAndRho () // TODO This works only without mesh refinement const int lev = 0; if (use_filter) { - ApplyFilterJ(m_fields.get_mr_levels_alldirs(FieldType::current_fp_vay, finest_level), lev); + ApplyFilterMF(m_fields.get_mr_levels_alldirs(FieldType::current_fp_vay, finest_level), lev); } } } @@ -875,7 +875,7 @@ WarpX::OneStep_sub1 (Real cur_time) m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level), fine_lev); RestrictRhoFromFineToCoarsePatch(fine_lev); if (use_filter) { - ApplyFilterJ( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), fine_lev); + ApplyFilterMF( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), fine_lev); } SumBoundaryJ( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), @@ -953,7 +953,7 @@ WarpX::OneStep_sub1 (Real cur_time) m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level), fine_lev); RestrictRhoFromFineToCoarsePatch(fine_lev); if (use_filter) { - ApplyFilterJ( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), fine_lev); + ApplyFilterMF( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), fine_lev); } SumBoundaryJ( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), fine_lev, Geom(fine_lev).periodicity()); ApplyFilterandSumBoundaryRho( diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp index 806c3412990..3cf42f18456 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp +++ b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp @@ -52,8 +52,11 @@ WarpX::ImplicitPreRHSOp ( amrex::Real a_cur_time, bool a_from_jacobian ) { using namespace amrex::literals; + using warpx::fields::FieldType; amrex::ignore_unused( a_full_dt, a_nl_iter, a_from_jacobian ); + if (use_filter) { ApplyFilterMF(m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level), 0); } + // Advance the particle positions by 1/2 dt, // particle velocities by dt, then take average of old and new v, // deposit currents, giving J at n+1/2 diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index d64632d964a..a0ae7ed67e9 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -1195,7 +1195,7 @@ WarpX::SyncCurrent (const std::string& current_fp_string) ablastr::fields::MultiLevelVectorField const& J_cp = m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level); if (use_filter) { - ApplyFilterJ(J_cp, lev+1, idim); + ApplyFilterMF(J_cp, lev+1, idim); } SumBoundaryJ(J_cp, lev+1, idim, period); } @@ -1232,7 +1232,7 @@ WarpX::SyncCurrent (const std::string& current_fp_string) if (use_filter) { - ApplyFilterJ(J_fp, lev, idim); + ApplyFilterMF(J_fp, lev, idim); } SumBoundaryJ(J_fp, lev, idim, period); } @@ -1354,32 +1354,32 @@ void WarpX::RestrictCurrentFromFineToCoarsePatch ( ablastr::coarsen::average::Coarsen(*crse[2], *fine[2], refinement_ratio ); } -void WarpX::ApplyFilterJ ( - const ablastr::fields::MultiLevelVectorField& current, +void WarpX::ApplyFilterMF ( + const ablastr::fields::MultiLevelVectorField& mfvec, const int lev, const int idim) { using ablastr::fields::Direction; - amrex::MultiFab& J = *current[lev][Direction{idim}]; + amrex::MultiFab& mf = *mfvec[lev][Direction{idim}]; - const int ncomp = J.nComp(); - const amrex::IntVect ngrow = J.nGrowVect(); - amrex::MultiFab Jf(J.boxArray(), J.DistributionMap(), ncomp, ngrow); - bilinear_filter.ApplyStencil(Jf, J, lev); + const int ncomp = mf.nComp(); + const amrex::IntVect ngrow = mf.nGrowVect(); + amrex::MultiFab mf_filtered(mf.boxArray(), mf.DistributionMap(), ncomp, ngrow); + bilinear_filter.ApplyStencil(mf_filtered, mf, lev); const int srccomp = 0; const int dstcomp = 0; - amrex::MultiFab::Copy(J, Jf, srccomp, dstcomp, ncomp, ngrow); + amrex::MultiFab::Copy(mf, mf_filtered, srccomp, dstcomp, ncomp, ngrow); } -void WarpX::ApplyFilterJ ( - const ablastr::fields::MultiLevelVectorField& current, +void WarpX::ApplyFilterMF ( + const ablastr::fields::MultiLevelVectorField& mfvec, const int lev) { for (int idim=0; idim<3; ++idim) { - ApplyFilterJ(current, lev, idim); + ApplyFilterMF(mfvec, lev, idim); } } @@ -1457,7 +1457,7 @@ void WarpX::AddCurrentFromFineLevelandSumBoundary ( if (use_filter) { - ApplyFilterJ(J_fp, lev); + ApplyFilterMF(J_fp, lev); } SumBoundaryJ(J_fp, lev, period); @@ -1476,8 +1476,8 @@ void WarpX::AddCurrentFromFineLevelandSumBoundary ( if (use_filter && J_buffer[lev+1][idim]) { - ApplyFilterJ(J_cp, lev+1, idim); - ApplyFilterJ(J_buffer, lev+1, idim); + ApplyFilterMF(J_cp, lev+1, idim); + ApplyFilterMF(J_buffer, lev+1, idim); MultiFab::Add( *J_buffer[lev+1][idim], *J_cp[lev+1][idim], @@ -1491,7 +1491,7 @@ void WarpX::AddCurrentFromFineLevelandSumBoundary ( } else if (use_filter) // but no buffer { - ApplyFilterJ(J_cp, lev+1, idim); + ApplyFilterMF(J_cp, lev+1, idim); ablastr::utils::communication::ParallelAdd( mf, *J_cp[lev+1][idim], 0, 0, diff --git a/Source/WarpX.H b/Source/WarpX.H index 4dc3ab8c8be..da1a4b5a269 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -1216,12 +1216,12 @@ private: int lev); void StoreCurrent (int lev); void RestoreCurrent (int lev); - void ApplyFilterJ ( - const ablastr::fields::MultiLevelVectorField& current, + void ApplyFilterMF ( + const ablastr::fields::MultiLevelVectorField& mfvec, int lev, int idim); - void ApplyFilterJ ( - const ablastr::fields::MultiLevelVectorField& current, + void ApplyFilterMF ( + const ablastr::fields::MultiLevelVectorField& mfvec, int lev); void SumBoundaryJ ( const ablastr::fields::MultiLevelVectorField& current, From 1e287b7775dbd05b1d5ab7a949a2827499fe9d2e Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Mon, 11 Nov 2024 15:52:23 -0800 Subject: [PATCH 083/278] =?UTF-8?q?Fixing=20some=20bugs=20that=20lead=20to?= =?UTF-8?q?=20non-convergence.=20Relaxing=20tolerance=20whi=E2=80=A6=20(#5?= =?UTF-8?q?446)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …ch is currently hard coded, and adding assert to force EB being enabled in order to avoid a seg fault in AMReX when computing the gradient solution. This PR partially addresses https://github.com/ECP-WarpX/WarpX/issues/5444. This PR adds semi-coarsening in 3D and then adds an assert to keep the magnetostatic solver from being run without an EB. This is required since in AMReX MLMG->getGradSolution will segfault when not using an EB. It should also be noted that https://github.com/ECP-WarpX/WarpX/pull/5175 will use a different scheme around the embedded boundaries to compute gradients, and will likely mitigate these issues. A work around in RZ to use the outer edge is to enable the embedded boundary and set the boundary radius larger than the outer grid radius. This works like it would without an embedded boundary and can be used until either the refactor or the bugfix in AMReX for getGradSolution. --------- Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../MagnetostaticSolver.cpp | 32 +++++++++++++--- Source/ablastr/fields/VectorPoissonSolver.H | 37 +++++++++++++++++-- 2 files changed, 60 insertions(+), 9 deletions(-) diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp index 5c28ff1f3c7..c3acf8edd84 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp @@ -65,7 +65,16 @@ WarpX::ComputeMagnetostaticField() // Fields have been reset in Electrostatic solver for this time step, these fields // are added into the B fields after electrostatic solve - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(this->max_level == 0, "Magnetostatic solver not implemented with mesh refinement."); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(this->max_level == 0, + "Magnetostatic solver not implemented with mesh refinement."); + +#if defined(AMREX_USE_EB) + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(EB::enabled(), + "Magnetostatic Solver currently requires an embedded boundary to be installed for " + "compatibility with AMReX when compiling with EB support. " + "Current workaround is to install an EB outside of domain or recompile with EB support off." + "Workaround for https://github.com/AMReX-Codes/amrex/issues/4223"); +#endif AddMagnetostaticFieldLabFrame(); } @@ -128,7 +137,13 @@ WarpX::AddMagnetostaticFieldLabFrame() // const amrex::Real magnetostatic_absolute_tolerance = self_fields_absolute_tolerance*PhysConst::c; // temporary fix!!! const amrex::Real magnetostatic_absolute_tolerance = 0.0; - const amrex::Real self_fields_required_precision = 1e-12; + amrex::Real self_fields_required_precision; + if constexpr (std::is_same::value) { + self_fields_required_precision = 1e-5; + } + else { + self_fields_required_precision = 1e-11; + } const int self_fields_max_iters = 200; const int self_fields_verbosity = 2; @@ -187,11 +202,16 @@ WarpX::computeVectorPotential (ablastr::fields::MultiLevelVectorField const& cur }); #if defined(AMREX_USE_EB) - amrex::Vector factories; - for (int lev = 0; lev <= finest_level; ++lev) { - factories.push_back(&WarpX::fieldEBFactory(lev)); + std::optional > eb_farray_box_factory; + auto &warpx = WarpX::GetInstance(); + + if (EB::enabled()) { + amrex::Vector factories; + for (int lev = 0; lev <= finest_level; ++lev) { + factories.push_back(&warpx.fieldEBFactory(lev)); + } + eb_farray_box_factory = factories; } - const std::optional > eb_farray_box_factory({factories}); #else const std::optional > eb_farray_box_factory; #endif diff --git a/Source/ablastr/fields/VectorPoissonSolver.H b/Source/ablastr/fields/VectorPoissonSolver.H index f6dd2a99cf1..a41d242e2c2 100644 --- a/Source/ablastr/fields/VectorPoissonSolver.H +++ b/Source/ablastr/fields/VectorPoissonSolver.H @@ -1,4 +1,4 @@ -/* Copyright 2022 S. Eric Clark, LLNL +/* Copyright 2022-2024 S. Eric Clark (Helion Energy, formerly LLNL) * * This file is part of WarpX. * @@ -137,10 +137,41 @@ computeVectorPotential ( amrex::Vector > co ); } - const amrex::LPInfo& info = amrex::LPInfo(); - // Loop over dimensions of A to solve each component individually for (int lev=0; lev<=finest_level; lev++) { + amrex::LPInfo info; + +#ifdef WARPX_DIM_RZ + constexpr bool is_rz = true; +#else + constexpr bool is_rz = false; +#endif + + amrex::Array const dx + {AMREX_D_DECL(geom[lev].CellSize(0), + geom[lev].CellSize(1), + geom[lev].CellSize(2))}; + + + if (!eb_enabled && !is_rz) { + // Determine whether to use semi-coarsening + int max_semicoarsening_level = 0; + int semicoarsening_direction = -1; + const auto min_dir = static_cast(std::distance(dx.begin(), + std::min_element(dx.begin(), dx.end()))); + const auto max_dir = static_cast(std::distance(dx.begin(), + std::max_element(dx.begin(), dx.end()))); + if (dx[max_dir] > dx[min_dir]) { + semicoarsening_direction = max_dir; + max_semicoarsening_level = static_cast(std::log2(dx[max_dir] / dx[min_dir])); + } + if (max_semicoarsening_level > 0) { + info.setSemicoarsening(true); + info.setMaxSemicoarseningLevel(max_semicoarsening_level); + info.setSemicoarseningDirection(semicoarsening_direction); + } + } + amrex::MLEBNodeFDLaplacian linopx, linopy, linopz; if (eb_enabled) { #ifdef AMREX_USE_EB From 1b270717a3f56a3e433d19bcb3f047a9c1c1ea0f Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 12 Nov 2024 00:52:54 +0000 Subject: [PATCH 084/278] [pre-commit.ci] pre-commit autoupdate (#5450) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.2 → v0.7.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.2...v0.7.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0ee981588e3..d9a0a8bfdea 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.2 + rev: v0.7.3 hooks: # Run the linter - id: ruff From 4a2590e940efbfd7fe73d85c5986b228bdb82b9e Mon Sep 17 00:00:00 2001 From: David Grote Date: Tue, 12 Nov 2024 12:40:12 -0800 Subject: [PATCH 085/278] Set use_filter false for implicit evolve schemes (#5453) This PR sets the `use_filter` input parameter to false by default for the implicit evolve schemes. Note that this does not affect any of the related CI tests since the parameter is explicitly specified in all cases. --- Docs/source/usage/parameters.rst | 9 ++++++--- Source/WarpX.cpp | 7 ++++++- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 1b5c3e7b186..37b0e1f6656 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2186,10 +2186,13 @@ Time step Filtering ^^^^^^^^^ -* ``warpx.use_filter`` (`0` or `1`; default: `1`, except for RZ FDTD) - Whether to smooth the charge and currents on the mesh, after depositing them from the macro-particles. +* ``warpx.use_filter`` (`0` or `1`) + Whether to use filtering in the simulation. + With the explicit evolve scheme, the filtering is turned on by default, except for RZ FDTD. + With the implicit evolve schemes, the filtering is turned off by default. + The filtering smoothes the charge and currents on the mesh, after depositing them from the macro-particles. + With implicit schemes, the electric field is also filtered (to maintain consistency for energy conservation). This uses a bilinear filter (see the :ref:`filtering section `). - The default is `1` in all cases, except for simulations in RZ geometry using the FDTD solver. With the RZ PSATD solver, the filtering is done in :math:`k`-space. .. warning:: diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index cb46f3129c8..5c2f16f317d 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -488,6 +488,7 @@ WarpX::ReadParameters () if (electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT && !EB::enabled()) { throw std::runtime_error("ECP Solver requires to enable embedded boundaries at runtime."); } + pp_algo.query_enum_sloppy("evolve_scheme", evolve_scheme, "-_"); } { @@ -706,6 +707,11 @@ WarpX::ReadParameters () pp_warpx.queryarr("dt_update_interval", dt_interval_vec); dt_update_interval = utils::parser::IntervalsParser(dt_interval_vec); + // Filter defaults to true for the explicit scheme, and false for the implicit schemes + if (evolve_scheme != EvolveScheme::Explicit) { + use_filter = false; + } + // Filter currently not working with FDTD solver in RZ geometry: turn OFF by default // (see https://github.com/ECP-WarpX/WarpX/issues/1943) #ifdef WARPX_DIM_RZ @@ -1113,7 +1119,6 @@ WarpX::ReadParameters () pp_algo.query_enum_sloppy("current_deposition", current_deposition_algo, "-_"); pp_algo.query_enum_sloppy("charge_deposition", charge_deposition_algo, "-_"); pp_algo.query_enum_sloppy("particle_pusher", particle_pusher_algo, "-_"); - pp_algo.query_enum_sloppy("evolve_scheme", evolve_scheme, "-_"); // check for implicit evolve scheme if (evolve_scheme == EvolveScheme::SemiImplicitEM) { From b81317a7083e3d90203064a065eed1a546bd5e56 Mon Sep 17 00:00:00 2001 From: David Grote Date: Tue, 12 Nov 2024 13:57:59 -0800 Subject: [PATCH 086/278] Add strang implicit spectral em redo (#5027) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This implements use of the PSATD field advance coupled with the implicit solver, using a Strang split advance. - Advect Maxwell using PSATD with no J, ½ step - Advance particles along with dE/dt = -J implicitly, full step, iterating - Advect Maxwell using PSATD with no J, ½ step This requires the input parameter psatd.update_with_rho = 0. With psatd.periodic_single_box_fft = 1, exact energy conservation is obtained. Otherwise good conservation is seen, but not exact (will depend on parameters). Convergence is found for wpedt <= 1.9 (compared to wpedt < 0.25 for FDTD). This PR replaces PR #4662. A task for a future PR would be to implement specialized source free spectral advance routines (as noted in source comments). --- Docs/source/usage/parameters.rst | 14 +- Examples/Tests/implicit/CMakeLists.txt | 12 ++ Examples/Tests/implicit/analysis_2d_psatd.py | 41 ++++++ ...inputs_test_2d_theta_implicit_strang_psatd | 98 +++++++++++++ .../test_2d_theta_implicit_strang_psatd.json | 31 ++++ .../ImplicitSolvers/CMakeLists.txt | 1 + .../ImplicitSolvers/ImplicitSolverLibrary.H | 1 + .../FieldSolver/ImplicitSolvers/Make.package | 1 + .../StrangImplicitSpectralEM.H | 107 ++++++++++++++ .../StrangImplicitSpectralEM.cpp | 138 ++++++++++++++++++ .../ImplicitSolvers/WarpXImplicitOps.cpp | 38 +++++ .../ImplicitSolvers/WarpXSolverVec.H | 3 +- .../ImplicitSolvers/WarpXSolverVec.cpp | 11 +- Source/Utils/WarpXAlgorithmSelection.H | 1 + Source/WarpX.H | 1 + Source/WarpX.cpp | 27 +++- 16 files changed, 512 insertions(+), 13 deletions(-) create mode 100755 Examples/Tests/implicit/analysis_2d_psatd.py create mode 100644 Examples/Tests/implicit/inputs_test_2d_theta_implicit_strang_psatd create mode 100644 Regression/Checksum/benchmarks_json/test_2d_theta_implicit_strang_psatd.json create mode 100644 Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.H create mode 100644 Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 37b0e1f6656..7e513f4484d 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -93,6 +93,14 @@ Overall simulation parameters The PS-JFNK method is described in `Angus et al., An implicit particle code with exact energy and charge conservation for electromagnetic studies of dense plasmas `__ . (The version implemented in WarpX is an updated version that includes the relativistic gamma factor for the particles.) Also see `Chen et al., An energy- and charge-conserving, implicit, electrostatic particle-in-cell algorithm. `__ . Exact energy conservation requires that the interpolation stencil used for the field gather match that used for the current deposition. ``algo.current_deposition = direct`` must be used with ``interpolation.galerkin_scheme = 0``, and ``algo.current_deposition = Esirkepov`` must be used with ``interpolation.galerkin_scheme = 1``. If using ``algo.current_deposition = villasenor``, the corresponding field gather routine will automatically be selected and the ``interpolation.galerkin_scheme`` flag does not need to be specified. The Esirkepov and villasenor deposition schemes are charge-conserving. + * ``strang_implicit_spectral_em``: Use a fully implicit electromagnetic solver. All of the comments for ``theta_implicit_em`` + above apply here as well (except that theta is fixed to 0.5 and that charge will not be conserved). + In this version, the advance is Strang split, with a half advance of the source free Maxwell's equation (with a spectral solver), a full advance of the particles plus longitudinal E field, and a second half advance of the source free Maxwell's equations. + The advantage of this method is that with the Spectral advance of the fields, it is dispersionless. + Note that exact energy convergence is achieved only with one grid block and ``psatd.periodic_single_box_fft == 1``. Otherwise, + the energy convservation is spoiled because of the inconsistency of the periodic assumption of the spectral solver and the + non-periodic behavior of the individual blocks. + * ``semi_implicit_em``: Use an approximately energy conserving semi-implicit electromagnetic solver. Choices for the nonlinear solver include a Picard iteration scheme and particle-suppressed JFNK. Note that this method has the CFL limitation :math:`\Delta t < c/\sqrt( \sum_i 1/\Delta x_i^2 )`. The Picard solver for this method can only be expected to work well when :math:`\omega_{pe} \Delta t` is less than one. The method is described in `Chen et al., A semi-implicit, energy- and charge-conserving particle-in-cell algorithm for the relativistic Vlasov-Maxwell equations `__. @@ -105,16 +113,16 @@ Overall simulation parameters exactly energy conserving, but the solver may perform better. * ``implicit_evolve.nonlinear_solver`` (`string`, default: None) - When `algo.evolve_scheme` is either `theta_implicit_em` or `semi_implicit_em`, this sets the nonlinear solver used + When `algo.evolve_scheme` is either `theta_implicit_em`, `strang_implicit_spectral_em`, or `semi_implicit_em`, this sets the nonlinear solver used to advance the field-particle system in time. Options are `picard` or `newton`. * ``implicit_evolve.max_particle_iterations`` (`integer`, default: 21) - When `algo.evolve_scheme` is either `theta_implicit_em` or `semi_implicit_em` and `implicit_evolve.nonlinear_solver = newton` + When `algo.evolve_scheme` is either `theta_implicit_em`, `strang_implicit_spectral_em`, or `semi_implicit_em` and `implicit_evolve.nonlinear_solver = newton` , this sets the maximum number of iterations for the method used to obtain a self-consistent update of the particles at each iteration in the JFNK process. * ``implicit_evolve.particle_tolerance`` (`float`, default: 1.e-10) - When `algo.evolve_scheme` is either `theta_implicit_em` or `semi_implicit_em` and `implicit_evolve.nonlinear_solver = newton` + When `algo.evolve_scheme` is either `theta_implicit_em`, `strang_implicit_spectral_em`, or `semi_implicit_em` and `implicit_evolve.nonlinear_solver = newton` , this sets the relative tolerance for the iterative method used to obtain a self-consistent update of the particles at each iteration in the JFNK process. diff --git a/Examples/Tests/implicit/CMakeLists.txt b/Examples/Tests/implicit/CMakeLists.txt index bf378631e16..eeb1ff87804 100644 --- a/Examples/Tests/implicit/CMakeLists.txt +++ b/Examples/Tests/implicit/CMakeLists.txt @@ -50,3 +50,15 @@ add_warpx_test( diags/diag1000020 # output OFF # dependency ) + +if(WarpX_FFT) + add_warpx_test( + test_2d_theta_implicit_strang_psatd # name + 2 # dims + 2 # nprocs + inputs_test_2d_theta_implicit_strang_psatd # inputs + analysis_2d_psatd.py # analysis + diags/diag1000020 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/implicit/analysis_2d_psatd.py b/Examples/Tests/implicit/analysis_2d_psatd.py new file mode 100755 index 00000000000..3ccc3880189 --- /dev/null +++ b/Examples/Tests/implicit/analysis_2d_psatd.py @@ -0,0 +1,41 @@ +#!/usr/bin/env python3 + +# Copyright 2024 Justin Angus, David Grote +# +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL +# +# This is a script that analyses the simulation results from the script `inputs_vandb_2d`. +# This simulates a 2D periodic plasma using the implicit solver +# with the Villasenor deposition using shape factor 2. +import os +import sys + +import numpy as np + +sys.path.insert(1, "../../../../warpx/Regression/Checksum/") +import checksumAPI + +# this will be the name of the plot file +fn = sys.argv[1] + +field_energy = np.loadtxt("diags/reducedfiles/field_energy.txt", skiprows=1) +particle_energy = np.loadtxt("diags/reducedfiles/particle_energy.txt", skiprows=1) + +total_energy = field_energy[:, 2] + particle_energy[:, 2] + +delta_E = (total_energy - total_energy[0]) / total_energy[0] +max_delta_E = np.abs(delta_E).max() + +# This case should have near machine precision conservation of energy +tolerance_rel_energy = 2.1e-14 + +print(f"max change in energy: {max_delta_E}") +print(f"tolerance: {tolerance_rel_energy}") + +assert max_delta_E < tolerance_rel_energy + +test_name = os.path.split(os.getcwd())[1] +checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/implicit/inputs_test_2d_theta_implicit_strang_psatd b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_strang_psatd new file mode 100644 index 00000000000..f68d1d324ac --- /dev/null +++ b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_strang_psatd @@ -0,0 +1,98 @@ +################################# +########## CONSTANTS ############ +################################# + +my_constants.n0 = 1.e30 # m^-3 +my_constants.nz = 40 +my_constants.Ti = 100. # eV +my_constants.Te = 100. # eV +my_constants.wpe = q_e*sqrt(n0/(m_e*epsilon0)) +my_constants.de0 = clight/wpe +my_constants.nppcz = 10 # number of particles/cell in z +my_constants.dt = 0.1/wpe # s + +################################# +####### GENERAL PARAMETERS ###### +################################# +max_step = 20 +amr.n_cell = nz nz +amr.max_grid_size = nz +amr.max_level = 0 +geometry.dims = 2 +geometry.prob_lo = 0.0 0.0 # physical domain +geometry.prob_hi = 10.0*de0 10.0*de0 + +################################# +####### Boundary condition ###### +################################# +boundary.field_lo = periodic periodic +boundary.field_hi = periodic periodic + +################################# +############ NUMERICS ########### +################################# +warpx.serialize_initial_conditions = 1 +warpx.verbose = 1 +warpx.const_dt = dt +#warpx.cfl = 0.5656 +warpx.use_filter = 0 + +algo.maxwell_solver = psatd +algo.evolve_scheme = strang_implicit_spectral_em +implicit_evolve.nonlinear_solver = "picard" + +picard.verbose = true +picard.max_iterations = 9 +picard.relative_tolerance = 0.0 +picard.absolute_tolerance = 0.0 +picard.require_convergence = false + +algo.particle_pusher = "boris" + +algo.particle_shape = 2 +algo.current_deposition = direct +algo.charge_deposition = standard +algo.field_gathering = energy-conserving +interpolation.galerkin_scheme = 0 + +psatd.periodic_single_box_fft = 1 +psatd.update_with_rho = 0 + +################################# +############ PLASMA ############# +################################# +particles.species_names = electrons protons + +electrons.species_type = electron +electrons.injection_style = "NUniformPerCell" +electrons.num_particles_per_cell_each_dim = nppcz nppcz +electrons.profile = constant +electrons.density = n0 +electrons.momentum_distribution_type = gaussian +electrons.ux_th = sqrt(Te*q_e/m_e)/clight +electrons.uy_th = sqrt(Te*q_e/m_e)/clight +electrons.uz_th = sqrt(Te*q_e/m_e)/clight + +protons.species_type = proton +protons.injection_style = "NUniformPerCell" +protons.num_particles_per_cell_each_dim = nppcz nppcz +protons.profile = constant +protons.density = n0 +protons.momentum_distribution_type = gaussian +protons.ux_th = sqrt(Ti*q_e/m_p)/clight +protons.uy_th = sqrt(Ti*q_e/m_p)/clight +protons.uz_th = sqrt(Ti*q_e/m_p)/clight + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 20 +diag1.diag_type = Full +diag1.fields_to_plot = Ex Ey Ez Bx By Bz jx jy jz rho divE +diag1.electrons.variables = x z w ux uy uz +diag1.protons.variables = x z w ux uy uz + +warpx.reduced_diags_names = particle_energy field_energy +particle_energy.type = ParticleEnergy +particle_energy.intervals = 1 +field_energy.type = FieldEnergy +field_energy.intervals = 1 diff --git a/Regression/Checksum/benchmarks_json/test_2d_theta_implicit_strang_psatd.json b/Regression/Checksum/benchmarks_json/test_2d_theta_implicit_strang_psatd.json new file mode 100644 index 00000000000..5281804abba --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_theta_implicit_strang_psatd.json @@ -0,0 +1,31 @@ +{ + "lev=0": { + "Bx": 60642.062637340816, + "By": 89855.09371265332, + "Bz": 54561.47120738846, + "Ex": 81536346169528.28, + "Ey": 13888711042388.54, + "Ez": 86853122458391.0, + "divE": 9.492653438830812e+22, + "jx": 2.5941826848709296e+19, + "jy": 2.9929071160915993e+19, + "jz": 2.692985701872205e+19, + "rho": 851978517887.51 + }, + "electrons": { + "particle_momentum_x": 4.864385990952573e-19, + "particle_momentum_y": 4.879723483907468e-19, + "particle_momentum_z": 4.865564630727981e-19, + "particle_position_x": 0.004250851253052539, + "particle_position_y": 0.0042513622554793, + "particle_weight": 2823958719279159.5 + }, + "protons": { + "particle_momentum_x": 2.0934469726422704e-17, + "particle_momentum_y": 2.0929630794865952e-17, + "particle_momentum_z": 2.093085625201003e-17, + "particle_position_x": 0.004251276208274589, + "particle_position_y": 0.004251274670600805, + "particle_weight": 2823958719279159.5 + } +} diff --git a/Source/FieldSolver/ImplicitSolvers/CMakeLists.txt b/Source/FieldSolver/ImplicitSolvers/CMakeLists.txt index 04abc9d3e91..529336c4d7c 100644 --- a/Source/FieldSolver/ImplicitSolvers/CMakeLists.txt +++ b/Source/FieldSolver/ImplicitSolvers/CMakeLists.txt @@ -5,6 +5,7 @@ foreach(D IN LISTS WarpX_DIMS) ImplicitSolver.cpp SemiImplicitEM.cpp ThetaImplicitEM.cpp + StrangImplicitSpectralEM.cpp WarpXImplicitOps.cpp WarpXSolverVec.cpp ) diff --git a/Source/FieldSolver/ImplicitSolvers/ImplicitSolverLibrary.H b/Source/FieldSolver/ImplicitSolvers/ImplicitSolverLibrary.H index 423957ef061..586c7163742 100644 --- a/Source/FieldSolver/ImplicitSolvers/ImplicitSolverLibrary.H +++ b/Source/FieldSolver/ImplicitSolvers/ImplicitSolverLibrary.H @@ -9,5 +9,6 @@ #include "SemiImplicitEM.H" // IWYU pragma: export #include "ThetaImplicitEM.H" // IWYU pragma: export +#include "StrangImplicitSpectralEM.H" // IWYU pragma: export #endif diff --git a/Source/FieldSolver/ImplicitSolvers/Make.package b/Source/FieldSolver/ImplicitSolvers/Make.package index 16cd4003490..8f39824d875 100644 --- a/Source/FieldSolver/ImplicitSolvers/Make.package +++ b/Source/FieldSolver/ImplicitSolvers/Make.package @@ -1,6 +1,7 @@ CEXE_sources += ImplicitSolver.cpp CEXE_sources += SemiImplicitEM.cpp CEXE_sources += ThetaImplicitEM.cpp +CEXE_sources += StrangImplicitSpectralEM.cpp CEXE_sources += WarpXImplicitOps.cpp CEXE_sources += WarpXSolverVec.cpp diff --git a/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.H b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.H new file mode 100644 index 00000000000..a674dd6de76 --- /dev/null +++ b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.H @@ -0,0 +1,107 @@ +/* Copyright 2024 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef STRANG_IMPLICIT_SPECTRALEM_H_ +#define STRANG_IMPLICIT_SPECTRALEM_H_ + +#include "FieldSolver/ImplicitSolvers/WarpXSolverVec.H" + +#include +#include +#include + +#include "ImplicitSolver.H" + +/** @file + * Implicit spectral electromagnetic time solver class. This is a fully implicit + * algorithm where both the fields and particles are treated implicitly. + * + * The time stencil is + * Advance (Eg^n, Bg^n) -> (Eg^{n+1/2}, Bg^{n+1/2}) source free // E transverse + * Iterate: + * Eg^{n+1} = Eg^n + c^2*dt*( - mu0*Jg^{n+1/2} ) // E longitudinal + * xp^{n+1} = xp^n + dt*up^{n+1/2}/(0.5*(gammap^n + gammap^{n+1})) + * up^{n+1} = up^n + dt*qp/mp*(Ep^{n+1/2} + up^{n+1/2}/gammap^{n+1/2} x Bp^{n+1/2}) + * Advance (Eg^n+1/2, Bg^n+1/2) -> (Eg^{n+1}, Bg^{n+1}) source free // E transverse + * + * The algorithm is exactly energy conserving only with a single box, periodic fft (psatd.periodic_single_box_fft = 1). + * With multiple boxes, energy is not conserved since the ffts in each box assumes periodic in the box which + * is not consistent with the current. + * The algorithm is numerially stable for any time step. + * I.e., the CFL condition for light waves does not + * have to be satisifed and the time step is not limited by the plasma period. However, how + * efficiently the algorithm can use large time steps depends strongly on the nonlinear solver. + * Furthermore, the time step should always be such that particles do not travel outside the + * ghost region of the box they live in, which is an MPI-related limitation. The time step + * is always limited by the need to resolve the appropriate physics. + * + */ + +class StrangImplicitSpectralEM : public ImplicitSolver +{ +public: + + StrangImplicitSpectralEM() = default; + + ~StrangImplicitSpectralEM() override = default; + + // Prohibit Move and Copy operations + StrangImplicitSpectralEM(const StrangImplicitSpectralEM&) = delete; + StrangImplicitSpectralEM& operator=(const StrangImplicitSpectralEM&) = delete; + StrangImplicitSpectralEM(StrangImplicitSpectralEM&&) = delete; + StrangImplicitSpectralEM& operator=(StrangImplicitSpectralEM&&) = delete; + + void Define ( WarpX* a_WarpX ) override; + + void PrintParameters () const override; + + void OneStep ( amrex::Real a_time, + amrex::Real a_dt, + int a_step ) override; + + void ComputeRHS ( WarpXSolverVec& a_RHS, + const WarpXSolverVec& a_E, + amrex::Real a_time, + amrex::Real a_dt, + int a_nl_iter, + bool a_from_jacobian ) override; + +private: + + /** + * \brief Solver vectors to be used in the nonlinear solver to solve for the + * electric field E. The main logic for determining which variables should be + * WarpXSolverVec type is that it must have the same size and have the same + * centering of the data as the variable being solved for, which is E here. + * For example, if using a Yee grid then a container for curlB could be a + * WarpXSovlerVec, but magnetic field B should not be. + */ + WarpXSolverVec m_E, m_Eold; + + /** + * \brief B is a derived variable from E. Need to save Bold to update B during + * the iterative nonlinear solve for E. Bold is owned here, but only used by WarpX. + * It is not used directly by the nonlinear solver, nor is it the same size as the + * solver vector (size E), and so it should not be WarpXSolverVec type. + */ + amrex::Vector, 3 > > m_Bold; + + /** + * \brief Update the E and B fields owned by WarpX + */ + void UpdateWarpXFields ( WarpXSolverVec const& a_E, + amrex::Real a_time, + amrex::Real a_dt ); + + /** + * \brief Nonlinear solver is for the time-centered values of E. After + * the solver, need to use m_E and m_Eold to compute E^{n+1} + */ + void FinishFieldUpdate ( amrex::Real a_new_time ); + +}; + +#endif diff --git a/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp new file mode 100644 index 00000000000..1d463bcb365 --- /dev/null +++ b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp @@ -0,0 +1,138 @@ +/* Copyright 2024 David Grote + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#include "Fields.H" +#include "StrangImplicitSpectralEM.H" +#include "WarpX.H" + +using namespace warpx::fields; +using namespace amrex::literals; + +void StrangImplicitSpectralEM::Define ( WarpX* const a_WarpX ) +{ + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !m_is_defined, + "StrangImplicitSpectralEM object is already defined!"); + + // Retain a pointer back to main WarpX class + m_WarpX = a_WarpX; + + // Define E and Eold vectors + m_E.Define( m_WarpX, "Efield_fp" ); + m_Eold.Define( m_E ); + + + // Parse nonlinear solver parameters + const amrex::ParmParse pp_implicit_evolve("implicit_evolve"); + parseNonlinearSolverParams( pp_implicit_evolve ); + + // Define the nonlinear solver + m_nlsolver->Define(m_E, this); + m_is_defined = true; + +} + +void StrangImplicitSpectralEM::PrintParameters () const +{ + if (!m_WarpX->Verbose()) { return; } + amrex::Print() << "\n"; + amrex::Print() << "------------------------------------------------------------------------" << "\n"; + amrex::Print() << "----------- STRANG SPLIT IMPLICIT SPECTRAL EM SOLVER PARAMETERS --------" << "\n"; + amrex::Print() << "------------------------------------------------------------------------" << "\n"; + amrex::Print() << "max particle iterations: " << m_max_particle_iterations << "\n"; + amrex::Print() << "particle tolerance: " << m_particle_tolerance << "\n"; + if (m_nlsolver_type==NonlinearSolverType::Picard) { + amrex::Print() << "Nonlinear solver type: Picard\n"; + } + else if (m_nlsolver_type==NonlinearSolverType::Newton) { + amrex::Print() << "Nonlinear solver type: Newton\n"; + } + m_nlsolver->PrintParams(); + amrex::Print() << "-----------------------------------------------------------\n\n"; +} + +void StrangImplicitSpectralEM::OneStep ( amrex::Real a_time, + amrex::Real a_dt, + int a_step ) +{ + amrex::ignore_unused(a_step); + + // Fields have E^{n} and B^{n} + // Particles have p^{n} and x^{n}. + + // Save the values at the start of the time step, + m_WarpX->SaveParticlesAtImplicitStepStart(); + + // Advance the fields to time n+1/2 source free + m_WarpX->SpectralSourceFreeFieldAdvance(); + + // Save the fields at the start of the step + m_Eold.Copy( FieldType::Efield_fp ); + m_E.Copy(m_Eold); // initial guess for E + + amrex::Real const half_time = a_time + 0.5_rt*a_dt; + + // Solve nonlinear system for E at t_{n+1/2} + // Particles will be advanced to t_{n+1/2} + m_nlsolver->Solve( m_E, m_Eold, half_time, a_dt ); + + // Update WarpX owned Efield_fp and Bfield_fp to t_{n+1/2} + UpdateWarpXFields( m_E, half_time, a_dt ); + + // Advance particles from time n+1/2 to time n+1 + m_WarpX->FinishImplicitParticleUpdate(); + + // Advance E and B fields from time n+1/2 to time n+1 + amrex::Real const new_time = a_time + a_dt; + FinishFieldUpdate( new_time ); + + // Advance the fields to time n+1 source free + m_WarpX->SpectralSourceFreeFieldAdvance(); + +} + +void StrangImplicitSpectralEM::ComputeRHS ( WarpXSolverVec& a_RHS, + WarpXSolverVec const & a_E, + amrex::Real a_time, + amrex::Real a_dt, + int a_nl_iter, + bool a_from_jacobian ) +{ + // Update WarpX-owned Efield_fp and Bfield_fp using current state of + // E from the nonlinear solver at time n+1/2 + UpdateWarpXFields( a_E, a_time, a_dt ); + + // Self consistently update particle positions and velocities using the + // current state of the fields E and B. Deposit current density at time n+1/2. + m_WarpX->ImplicitPreRHSOp( a_time, a_dt, a_nl_iter, a_from_jacobian ); + + // For Strang split implicit PSATD, the RHS = -dt*mu*c**2*J + bool const allow_type_mismatch = true; + a_RHS.Copy(FieldType::current_fp, warpx::fields::FieldType::None, allow_type_mismatch); + amrex::Real constexpr coeff = PhysConst::c * PhysConst::c * PhysConst::mu0; + a_RHS.scale(-coeff * 0.5_rt*a_dt); + +} + +void StrangImplicitSpectralEM::UpdateWarpXFields (WarpXSolverVec const & a_E, + amrex::Real /*a_time*/, + amrex::Real /*a_dt*/) +{ + + // Update Efield_fp owned by WarpX + m_WarpX->SetElectricFieldAndApplyBCs( a_E ); + +} + +void StrangImplicitSpectralEM::FinishFieldUpdate ( amrex::Real /*a_new_time*/ ) +{ + // Eg^{n+1} = 2*E_g^{n+1/2} - E_g^n + amrex::Real const c0 = 1._rt/0.5_rt; + amrex::Real const c1 = 1._rt - c0; + m_E.linComb( c0, m_E, c1, m_Eold ); + m_WarpX->SetElectricFieldAndApplyBCs( m_E ); + +} diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp index 3cf42f18456..fe854881ea3 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp +++ b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp @@ -121,6 +121,44 @@ WarpX::ApplyMagneticFieldBCs() ApplyBfieldBoundary(0, PatchType::fine, DtType::Full); } +void +WarpX::SpectralSourceFreeFieldAdvance () +{ + using namespace amrex::literals; + using warpx::fields::FieldType; + // Do the first piece of the Strang splitting, source free advance of E and B + // It would be more efficient to write a specialized PSATD advance that does not use J, + // but this works for now. + + // Create temporary MultiFabs to hold J + int const lev = 0; + ablastr::fields::VectorField current_fp = m_fields.get_alldirs(FieldType::current_fp, lev); + amrex::MultiFab* rho_fp = m_fields.get(FieldType::rho_fp, lev); + amrex::MultiFab j0(current_fp[0]->boxArray(), current_fp[0]->DistributionMap(), + current_fp[0]->nComp(), current_fp[0]->nGrowVect()); + amrex::MultiFab j1(current_fp[1]->boxArray(), current_fp[1]->DistributionMap(), + current_fp[1]->nComp(), current_fp[1]->nGrowVect()); + amrex::MultiFab j2(current_fp[2]->boxArray(), current_fp[2]->DistributionMap(), + current_fp[2]->nComp(), current_fp[2]->nGrowVect()); + amrex::MultiFab::Copy(j0, *(current_fp[0]), 0, 0, current_fp[0]->nComp(), current_fp[0]->nGrowVect()); + amrex::MultiFab::Copy(j1, *(current_fp[1]), 0, 0, current_fp[1]->nComp(), current_fp[1]->nGrowVect()); + amrex::MultiFab::Copy(j2, *(current_fp[2]), 0, 0, current_fp[2]->nComp(), current_fp[2]->nGrowVect()); + + current_fp[0]->setVal(0._rt); + current_fp[1]->setVal(0._rt); + current_fp[2]->setVal(0._rt); + if (rho_fp) { rho_fp->setVal(0._rt); } + PushPSATD(); // Note that this does dt/2 + FillBoundaryE(guard_cells.ng_alloc_EB, WarpX::sync_nodal_points); + FillBoundaryB(guard_cells.ng_alloc_EB, WarpX::sync_nodal_points); + + // Restore the current_fp MultiFab. Note that this is only needed for diagnostics when + // J is being written out (since current_fp is not otherwise used). + amrex::MultiFab::Copy(*(current_fp[0]), j0, 0, 0, current_fp[0]->nComp(), current_fp[0]->nGrowVect()); + amrex::MultiFab::Copy(*(current_fp[1]), j1, 0, 0, current_fp[1]->nComp(), current_fp[1]->nGrowVect()); + amrex::MultiFab::Copy(*(current_fp[2]), j2, 0, 0, current_fp[2]->nComp(), current_fp[2]->nGrowVect()); +} + void WarpX::SaveParticlesAtImplicitStepStart ( ) { diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H index d864f239e42..a4bbbe99f75 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H +++ b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.H @@ -84,7 +84,8 @@ public: [[nodiscard]] RT dotProduct( const WarpXSolverVec& a_X ) const; void Copy ( warpx::fields::FieldType a_array_type, - warpx::fields::FieldType a_scalar_type = warpx::fields::FieldType::None ); + warpx::fields::FieldType a_scalar_type = warpx::fields::FieldType::None, + bool allow_type_mismatch = false); inline void Copy ( const WarpXSolverVec& a_solver_vec ) diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp index 22c3b1d67c1..f091353a4df 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp +++ b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp @@ -112,26 +112,27 @@ void WarpXSolverVec::Define ( WarpX* a_WarpX, } void WarpXSolverVec::Copy ( FieldType a_array_type, - FieldType a_scalar_type ) + FieldType a_scalar_type, + bool allow_type_mismatch) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( IsDefined(), "WarpXSolverVec::Copy() called on undefined WarpXSolverVec"); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - a_array_type==m_array_type && - a_scalar_type==m_scalar_type, + (a_array_type==m_array_type && + a_scalar_type==m_scalar_type) || allow_type_mismatch, "WarpXSolverVec::Copy() called with vecs of different types"); for (int lev = 0; lev < m_num_amr_levels; ++lev) { if (m_array_type != FieldType::None) { - const ablastr::fields::VectorField this_array = m_WarpX->m_fields.get_alldirs(m_vector_type_name, lev); + const ablastr::fields::VectorField this_array = m_WarpX->m_fields.get_alldirs(a_array_type, lev); for (int n = 0; n < 3; ++n) { amrex::MultiFab::Copy( *m_array_vec[lev][n], *this_array[n], 0, 0, m_ncomp, amrex::IntVect::TheZeroVector() ); } } if (m_scalar_type != FieldType::None) { - const amrex::MultiFab* this_mf = m_WarpX->m_fields.get(m_scalar_type_name,lev); + const amrex::MultiFab* this_mf = m_WarpX->m_fields.get(a_scalar_type,lev); amrex::MultiFab::Copy( *m_scalar_vec[lev], *this_mf, 0, 0, m_ncomp, amrex::IntVect::TheZeroVector() ); } diff --git a/Source/Utils/WarpXAlgorithmSelection.H b/Source/Utils/WarpXAlgorithmSelection.H index 088ef295364..98d2430afc3 100644 --- a/Source/Utils/WarpXAlgorithmSelection.H +++ b/Source/Utils/WarpXAlgorithmSelection.H @@ -33,6 +33,7 @@ AMREX_ENUM(EvolveScheme, Explicit, ThetaImplicitEM, SemiImplicitEM, + StrangImplicitSpectralEM, Default = Explicit); /** diff --git a/Source/WarpX.H b/Source/WarpX.H index da1a4b5a269..1c7ed5a6a75 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -146,6 +146,7 @@ public: void UpdateMagneticFieldAndApplyBCs ( ablastr::fields::MultiLevelVectorField const& a_Bn, amrex::Real a_thetadt ); void ApplyMagneticFieldBCs (); + void SpectralSourceFreeFieldAdvance (); void FinishMagneticFieldAndApplyBCs ( ablastr::fields::MultiLevelVectorField const& a_Bn, amrex::Real a_theta ); void FinishImplicitField ( const ablastr::fields::MultiLevelVectorField& Field_fp, diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 5c2f16f317d..772131ea0e7 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -1127,6 +1127,9 @@ WarpX::ReadParameters () else if (evolve_scheme == EvolveScheme::ThetaImplicitEM) { m_implicit_solver = std::make_unique(); } + else if (evolve_scheme == EvolveScheme::StrangImplicitSpectralEM) { + m_implicit_solver = std::make_unique(); + } // implicit evolve schemes not setup to use mirrors if (evolve_scheme == EvolveScheme::SemiImplicitEM || @@ -1172,7 +1175,8 @@ WarpX::ReadParameters () if (current_deposition_algo == CurrentDepositionAlgo::Villasenor) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( evolve_scheme == EvolveScheme::SemiImplicitEM || - evolve_scheme == EvolveScheme::ThetaImplicitEM, + evolve_scheme == EvolveScheme::ThetaImplicitEM || + evolve_scheme == EvolveScheme::StrangImplicitSpectralEM, "Villasenor current deposition can only" "be used with Implicit evolve schemes."); } @@ -1243,7 +1247,8 @@ WarpX::ReadParameters () } if (evolve_scheme == EvolveScheme::SemiImplicitEM || - evolve_scheme == EvolveScheme::ThetaImplicitEM) { + evolve_scheme == EvolveScheme::ThetaImplicitEM || + evolve_scheme == EvolveScheme::StrangImplicitSpectralEM) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( current_deposition_algo == CurrentDepositionAlgo::Esirkepov || @@ -1253,8 +1258,9 @@ WarpX::ReadParameters () WARPX_ALWAYS_ASSERT_WITH_MESSAGE( electromagnetic_solver_id == ElectromagneticSolverAlgo::Yee || - electromagnetic_solver_id == ElectromagneticSolverAlgo::CKC, - "Only the Yee EM solver is supported with the implicit and semi-implicit schemes"); + electromagnetic_solver_id == ElectromagneticSolverAlgo::CKC || + electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD, + "Only the Yee, CKC, and PSATD EM solvers are supported with the implicit and semi-implicit schemes"); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( particle_pusher_algo == ParticlePusherAlgo::Boris || @@ -1265,6 +1271,11 @@ WarpX::ReadParameters () field_gathering_algo != GatheringAlgo::MomentumConserving, "With implicit and semi-implicit schemes, the momentum conserving field gather is not supported as it would not conserve energy"); } + if (evolve_scheme == EvolveScheme::StrangImplicitSpectralEM) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD, + "With the strang_implicit_spectral_em evolve scheme, the algo.maxwell_solver must be psatd"); + } // Load balancing parameters std::vector load_balance_intervals_string_vec = {"0"}; @@ -2757,6 +2768,10 @@ void WarpX::AllocLevelSpectralSolverRZ (amrex::Vector(WarpX::do_multi_J_n_depositions); } + if (evolve_scheme == EvolveScheme::StrangImplicitSpectralEM) { + // The step is Strang split into two half steps + solver_dt /= 2.; + } auto pss = std::make_unique(lev, realspace_ba, @@ -2810,6 +2825,10 @@ void WarpX::AllocLevelSpectralSolver (amrex::Vector(WarpX::do_multi_J_n_depositions); } + if (evolve_scheme == EvolveScheme::StrangImplicitSpectralEM) { + // The step is Strang split into two half steps + solver_dt /= 2.; + } auto pss = std::make_unique(lev, realspace_ba, From 3323515a5e8082e788084d338135096077020ecd Mon Sep 17 00:00:00 2001 From: David Grote Date: Wed, 13 Nov 2024 09:25:00 -0800 Subject: [PATCH 087/278] Simplify diagnostic functor setup (#5455) This reduces code duplication when setting up the functors for the full diagnostics. Instead of having separate code for each field for each dimension, this uses a loop over the dimensions so there is only a line for each field. This also combines the Cartesian and RZ setup. --- Source/Diagnostics/FullDiagnostics.cpp | 205 ++++++++----------------- 1 file changed, 60 insertions(+), 145 deletions(-) diff --git a/Source/Diagnostics/FullDiagnostics.cpp b/Source/Diagnostics/FullDiagnostics.cpp index eeca8ffdb44..7a8f376cd21 100644 --- a/Source/Diagnostics/FullDiagnostics.cpp +++ b/Source/Diagnostics/FullDiagnostics.cpp @@ -407,85 +407,43 @@ FullDiagnostics::InitializeFieldFunctorsRZopenPMD (int lev) // diagnostic output bool deposit_current = !m_solver_deposits_current; + std::vector field_names = {"r", "t", "z"}; + // Fill vector of functors for all components except individual cylindrical modes. const auto m_varname_fields_size = static_cast(m_varnames_fields.size()); for (int comp=0; comp(warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio, - false, ncomp); - if (update_varnames) { - AddRZModesToOutputNames(std::string("Er"), ncomp); - } - } else if ( m_varnames_fields[comp] == "Et" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio, - false, ncomp); - if (update_varnames) { - AddRZModesToOutputNames(std::string("Et"), ncomp); - } - } else if ( m_varnames_fields[comp] == "Ez" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio, - false, ncomp); - if (update_varnames) { - AddRZModesToOutputNames(std::string("Ez"), ncomp); - } - } else if ( m_varnames_fields[comp] == "Br" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio, - false, ncomp); - if (update_varnames) { - AddRZModesToOutputNames(std::string("Br"), ncomp); - } - } else if ( m_varnames_fields[comp] == "Bt" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio, - false, ncomp); - if (update_varnames) { - AddRZModesToOutputNames(std::string("Bt"), ncomp); - } - } else if ( m_varnames_fields[comp] == "Bz" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio, - false, ncomp); - if (update_varnames) { - AddRZModesToOutputNames(std::string("Bz"), ncomp); - } - } else if ( m_varnames_fields[comp] == "jr" ){ - m_all_field_functors[lev][comp] = std::make_unique(0, lev, m_crse_ratio, - false, deposit_current, ncomp); - deposit_current = false; - if (update_varnames) { - AddRZModesToOutputNames(std::string("jr"), ncomp); - } - } else if ( m_varnames_fields[comp] == "jt" ){ - m_all_field_functors[lev][comp] = std::make_unique(1, lev, m_crse_ratio, - false, deposit_current, ncomp); - deposit_current = false; - if (update_varnames) { - AddRZModesToOutputNames(std::string("jt"), ncomp); - } - } else if ( m_varnames_fields[comp] == "jz" ){ - m_all_field_functors[lev][comp] = std::make_unique(2, lev, m_crse_ratio, - false, deposit_current, ncomp); - deposit_current = false; - if (update_varnames) { - AddRZModesToOutputNames(std::string("jz"), ncomp); - } - } else if ( m_varnames_fields[comp] == "jr_displacement" ){ - m_all_field_functors[lev][comp] = std::make_unique(0, lev, m_crse_ratio, - false, ncomp); - if (update_varnames) { - AddRZModesToOutputNames(std::string("jr_displacement"), ncomp); - } - } else if ( m_varnames_fields[comp] == "jt_displacement" ){ - m_all_field_functors[lev][comp] = std::make_unique(1, lev, m_crse_ratio, - false, ncomp); - if (update_varnames) { - AddRZModesToOutputNames(std::string("jt_displacement"), ncomp); - } - } else if ( m_varnames_fields[comp] == "jz_displacement" ){ - m_all_field_functors[lev][comp] = std::make_unique(2, lev, m_crse_ratio, - false, ncomp); - if (update_varnames) { - AddRZModesToOutputNames(std::string("jz_displacement"), ncomp); + for (int idir=0; idir < 3; idir++) { + if ( m_varnames_fields[comp] == "E"+field_names[idir] ){ + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, + Direction{idir}, lev), lev, m_crse_ratio, false, ncomp); + if (update_varnames) { + AddRZModesToOutputNames(std::string("E"+field_names[idir]), ncomp); + } + } else if ( m_varnames_fields[comp] == "B"+field_names[idir] ){ + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, + Direction{idir}, lev), lev, m_crse_ratio, false, ncomp); + if (update_varnames) { + AddRZModesToOutputNames(std::string("B"+field_names[idir]), ncomp); + } + } else if ( m_varnames_fields[comp] == "j"+field_names[idir] ){ + m_all_field_functors[lev][comp] = std::make_unique(idir, lev, m_crse_ratio, + false, deposit_current, ncomp); + deposit_current = false; + if (update_varnames) { + AddRZModesToOutputNames(std::string("j"+field_names[idir]), ncomp); + } + } else if ( m_varnames_fields[comp] == "j"+field_names[idir]+"_displacement" ){ + m_all_field_functors[lev][comp] = std::make_unique(idir, lev, m_crse_ratio, + false, ncomp); + if (update_varnames) { + AddRZModesToOutputNames(std::string("j"+field_names[idir]+"_displacement"), ncomp); + } } - } else if ( m_varnames_fields[comp] == "rho" ){ + } + // Check if comp was found above + if (m_all_field_functors[lev][comp]) {continue;} + + if ( m_varnames_fields[comp] == "rho" ){ // Initialize rho functor to dump total rho m_all_field_functors[lev][comp] = std::make_unique(lev, m_crse_ratio, true, -1, false, ncomp); @@ -863,21 +821,33 @@ FullDiagnostics::InitializeFieldFunctors (int lev) using ablastr::fields::Direction; +#if defined(WARPX_DIM_RZ) + std::vector field_names = {"r", "t", "z"}; +#else + std::vector field_names = {"x", "y", "z"}; +#endif + m_all_field_functors[lev].resize(ntot); // Fill vector of functors for all components except individual cylindrical modes. for (int comp=0; comp(warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev), lev, m_crse_ratio); - } else if ( m_varnames[comp] == "Bz" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev), lev, m_crse_ratio); - } else if ( m_varnames[comp] == "jz" ){ - m_all_field_functors[lev][comp] = std::make_unique(2, lev, m_crse_ratio, true, deposit_current); - deposit_current = false; - } else if ( m_varnames[comp] == "jz_displacement" ) { - m_all_field_functors[lev][comp] = std::make_unique(2, lev, m_crse_ratio, true); - } else if ( m_varnames[comp] == "Az" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{2}, lev), lev, m_crse_ratio); - } else if ( m_varnames[comp] == "rho" ){ + for (int idir=0; idir < 3; idir++) { + if ( m_varnames[comp] == "E"+field_names[idir] ){ + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{idir}, lev), lev, m_crse_ratio); + } else if ( m_varnames[comp] == "B"+field_names[idir] ){ + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{idir}, lev), lev, m_crse_ratio); + } else if ( m_varnames[comp] == "j"+field_names[idir] ){ + m_all_field_functors[lev][comp] = std::make_unique(idir, lev, m_crse_ratio, true, deposit_current); + deposit_current = false; + } else if ( m_varnames[comp] == "j"+field_names[idir]+"_displacement" ) { + m_all_field_functors[lev][comp] = std::make_unique(idir, lev, m_crse_ratio, true); + } else if ( m_varnames[comp] == "A"+field_names[idir] ){ + m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{idir}, lev), lev, m_crse_ratio); + } + } + // Check if comp was found above + if (m_all_field_functors[lev][comp]) {continue;} + + if ( m_varnames[comp] == "rho" ){ // Initialize rho functor to dump total rho m_all_field_functors[lev][comp] = std::make_unique(lev, m_crse_ratio, true); } else if ( m_varnames[comp].rfind("rho_", 0) == 0 ){ @@ -902,64 +872,9 @@ FullDiagnostics::InitializeFieldFunctors (int lev) m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get_alldirs(FieldType::Bfield_aux, lev), lev, m_crse_ratio); } else if ( m_varnames[comp] == "divE" ){ m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get_alldirs(FieldType::Efield_aux, lev), lev, m_crse_ratio); - } - else { - -#ifdef WARPX_DIM_RZ - if ( m_varnames[comp] == "Er" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio); - } else if ( m_varnames[comp] == "Et" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio); - } else if ( m_varnames[comp] == "Br" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio); - } else if ( m_varnames[comp] == "Bt" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio); - } else if ( m_varnames[comp] == "jr" ){ - m_all_field_functors[lev][comp] = std::make_unique(0, lev, m_crse_ratio, true, deposit_current); - deposit_current = false; - } else if ( m_varnames[comp] == "jt" ){ - m_all_field_functors[lev][comp] = std::make_unique(1, lev, m_crse_ratio, true, deposit_current); - deposit_current = false; - } else if (m_varnames[comp] == "jr_displacement" ){ - m_all_field_functors[lev][comp] = std::make_unique(0, lev, m_crse_ratio, true); - } else if (m_varnames[comp] == "jt_displacement" ){ - m_all_field_functors[lev][comp] = std::make_unique(1, lev, m_crse_ratio, true); - } else if ( m_varnames[comp] == "Ar" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{0}, lev), lev, m_crse_ratio); - } else if ( m_varnames[comp] == "At" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{1}, lev), lev, m_crse_ratio); - } else { - WARPX_ABORT_WITH_MESSAGE(m_varnames[comp] + " is not a known field output type for RZ geometry"); - } -#else - // Valid transverse fields in Cartesian coordinates - if ( m_varnames[comp] == "Ex" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev), lev, m_crse_ratio); - } else if ( m_varnames[comp] == "Ey" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev), lev, m_crse_ratio); - } else if ( m_varnames[comp] == "Bx" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), lev, m_crse_ratio); - } else if ( m_varnames[comp] == "By" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), lev, m_crse_ratio); - } else if ( m_varnames[comp] == "jx" ){ - m_all_field_functors[lev][comp] = std::make_unique(0, lev, m_crse_ratio, true, deposit_current); - deposit_current = false; - } else if ( m_varnames[comp] == "jy" ){ - m_all_field_functors[lev][comp] = std::make_unique(1, lev, m_crse_ratio, true, deposit_current); - deposit_current = false; - } else if ( m_varnames[comp] == "jx_displacement" ){ - m_all_field_functors[lev][comp] = std::make_unique(0, lev, m_crse_ratio); - } else if ( m_varnames[comp] == "jy_displacement" ){ - m_all_field_functors[lev][comp] = std::make_unique(1, lev, m_crse_ratio); - } else if ( m_varnames[comp] == "Ax" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{0}, lev), lev, m_crse_ratio); - } else if ( m_varnames[comp] == "Ay" ){ - m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get(FieldType::vector_potential_fp_nodal, Direction{1}, lev), lev, m_crse_ratio); - } else { - std::cout << "Error on component " << m_varnames[comp] << std::endl; - WARPX_ABORT_WITH_MESSAGE(m_varnames[comp] + " is not a known field output type for this geometry"); - } -#endif + } else { + std::cout << "Error on component " << m_varnames[comp] << std::endl; + WARPX_ABORT_WITH_MESSAGE(m_varnames[comp] + " is not a known field output type for this geometry"); } } // Add functors for average particle data for each species From 6014f9b2b81b15096ad53e18ddd79de3482653f0 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Wed, 13 Nov 2024 10:07:30 -0800 Subject: [PATCH 088/278] Revert "Python: Warn old `warpx.multifab` Signature (#5326)" (#5452) It seems that the changes of #5326 is causing confusion among some users. With #5326, users typically receive a message saying that the "signature is deprecated" (which is actually incorrect ; this should say "will soon be deprecated"). As a consequence, users think that their simulation result is invalid (which is again incorrect ; using this signature is still fine for now), and try to change it according the printed instuctions, i.e.: ``` Please use: - multifab('prefix', level=...) for scalar fields - multifab('prefix', dir=..., level=...) for vector field components ``` But because there is no link to a concrete example or test, users typically try: ``` multifab("Efield_fp", dir=0, level=0) ``` and then get ``` TypeError: multifab(): incompatible function arguments. The following argument types are supported: 1. (self: pywarpx.warpx_pybind_3d.WarpX, internal_name: str) -> amrex.space3d.amrex_3d_pybind.MultiFab 2. (self: pywarpx.warpx_pybind_3d.WarpX, scalar_name: str, level: int) -> amrex.space3d.amrex_3d_pybind.MultiFab 3. (self: pywarpx.warpx_pybind_3d.WarpX, vector_name: str, dir: pywarpx.warpx_pybind_3d.Direction, level: int) -> amrex.space3d.amrex_3d_pybind.MultiFab ``` I am guessing that most users will get stuck at this point. The error message does suggest that the user has to create a `Direction` object, but since there is no example on how to create this, it is unlikely that most users will be able to overcome this issue. I would suggest to temporarily revert #5326, and then re-introduce it with: - updated instructions on how to create a `Direction` objects - updated warning that says "will be deprecated" instead of "is deprecated". --- Source/Python/WarpX.cpp | 5 ----- 1 file changed, 5 deletions(-) diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 932304d5009..921adff254f 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -115,11 +115,6 @@ void init_WarpX (py::module& m) ) .def("multifab", [](WarpX & wx, std::string internal_name) { - py::print("WARNING: WarpX' multifab('internal_name') signature is deprecated.\nPlease use:\n" - "- multifab('prefix', level=...) for scalar fields\n" - "- multifab('prefix', dir=..., level=...) for vector field components\n" - "where 'prefix' is the part of 'internal_name';' before the []", - py::arg("file") = py::module_::import("sys").attr("stderr")); if (wx.m_fields.internal_has(internal_name)) { return wx.m_fields.internal_get(internal_name); } else { From 09dc6204ce4a24adf7e662652a2978f8b081b881 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Wed, 13 Nov 2024 10:13:34 -0800 Subject: [PATCH 089/278] Check that all E-field values are finite in Ohm solver (#5417) This assert is meant to help identify the origin of a common problem in hybrid-PIC simulations, wherein unresolved Whistler waves cause runaway E-field values. Currently when this happens, WarpX fails in the current deposition routine. --------- Signed-off-by: roelof-groenewald --- Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 5220419f822..8e9e0daa274 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -189,6 +189,18 @@ void WarpX::HybridPICEvolveFields () 0, 0, 1, current_fp_temp[lev][idim]->nGrowVect()); } } + + // Check that the E-field does not have nan or inf values, otherwise print a clear message + ablastr::fields::MultiLevelVectorField Efield_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level); + for (int lev = 0; lev <= finest_level; ++lev) + { + for (int idim = 0; idim < 3; ++idim) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + Efield_fp[lev][idim]->is_finite(), + "Non-finite value detected in E-field; this indicates more substeps should be used in the field solver." + ); + } + } } void WarpX::HybridPICDepositInitialRhoAndJ () From 3d6fb5503a1f5dedaec260731f31e7993281f8eb Mon Sep 17 00:00:00 2001 From: David Grote Date: Thu, 14 Nov 2024 13:18:26 -0800 Subject: [PATCH 090/278] Fix PEC-Insulator boundary condition with staggering (#5451) When calculating the location of the fields to determine whether the field is on a PEC or insulator boundary, add the appropriate shift to account for the staggering of the fields. --- .../pec/inputs_test_2d_pec_field_insulator | 2 +- .../test_2d_pec_field_insulator.json | 8 +++--- Source/BoundaryConditions/PEC_Insulator.cpp | 27 ++++++++++++------- 3 files changed, 23 insertions(+), 14 deletions(-) diff --git a/Examples/Tests/pec/inputs_test_2d_pec_field_insulator b/Examples/Tests/pec/inputs_test_2d_pec_field_insulator index 68a8df1b600..912b77efcf6 100644 --- a/Examples/Tests/pec/inputs_test_2d_pec_field_insulator +++ b/Examples/Tests/pec/inputs_test_2d_pec_field_insulator @@ -15,7 +15,7 @@ geometry.prob_hi = 1.e-2 3.e-2 # Boundary condition boundary.field_lo = neumann periodic -boundary.field_hi = PECInsulator periodic +boundary.field_hi = pec_insulator periodic warpx.serialize_initial_conditions = 1 diff --git a/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator.json b/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator.json index ca6f38977ae..622cb5e5d30 100644 --- a/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator.json +++ b/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator.json @@ -1,13 +1,13 @@ { "lev=0": { "Bx": 0.0, - "By": 0.34938851065132936, + "By": 0.33065279639752304, "Bz": 0.0, - "Ex": 31871402.236828588, + "Ex": 31873416.396984838, "Ey": 0.0, - "Ez": 104908439.18998256, + "Ez": 99285542.27022335, "jx": 0.0, "jy": 0.0, "jz": 0.0 } -} \ No newline at end of file +} diff --git a/Source/BoundaryConditions/PEC_Insulator.cpp b/Source/BoundaryConditions/PEC_Insulator.cpp index df411f8e908..cfcd718c21c 100644 --- a/Source/BoundaryConditions/PEC_Insulator.cpp +++ b/Source/BoundaryConditions/PEC_Insulator.cpp @@ -426,10 +426,13 @@ PEC_Insulator::ApplyPEC_InsulatortoField ( amrex::ignore_unused(j, k); amrex::IntVect const iv(AMREX_D_DECL(i, j, k)); - amrex::Real const x = (AMREX_SPACEDIM > 1 ? xyzmin_x.x + (iv[0] - lo_x[0])*dx[0] : 0._rt); - amrex::Real const y = (AMREX_SPACEDIM == 3 ? xyzmin_x.y + (iv[1] - lo_x[1])*dx[1] : 0._rt); + amrex::Real const shiftx = (Fx_nodal[0] ? 0._rt : 0.5_rt); + amrex::Real const x = (AMREX_SPACEDIM > 1 ? xyzmin_x.x + (iv[0] - lo_x[0] + shiftx)*dx[0] : 0._rt); + amrex::Real const shifty = (AMREX_SPACEDIM == 3 ? (Fx_nodal[1] ? 0._rt : 0.5_rt) : 0._rt); + amrex::Real const y = (AMREX_SPACEDIM == 3 ? xyzmin_x.y + (iv[1] - lo_x[1] + shifty)*dx[1] : 0._rt); #if (AMREX_SPACEDIM > 1) - amrex::Real const z = xyzmin_x.z + (iv[WARPX_ZINDEX] - lo_x[WARPX_ZINDEX])*dx[2]; + amrex::Real const shiftz = (Fx_nodal[WARPX_ZINDEX] ? 0._rt : 0.5_rt); + amrex::Real const z = xyzmin_x.z + (iv[WARPX_ZINDEX] - lo_x[WARPX_ZINDEX] + shiftz)*dx[2]; #endif amrex::IntVect is_insulator_lo; @@ -471,10 +474,13 @@ PEC_Insulator::ApplyPEC_InsulatortoField ( amrex::ignore_unused(j, k); amrex::IntVect const iv(AMREX_D_DECL(i, j, k)); - amrex::Real const x = (AMREX_SPACEDIM > 1 ? xyzmin_y.x + (iv[0] - lo_y[0])*dx[0] : 0._rt); - amrex::Real const y = (AMREX_SPACEDIM == 3 ? xyzmin_y.y + (iv[1] - lo_y[1])*dx[1] : 0._rt); + amrex::Real const shiftx = (Fy_nodal[0] ? 0._rt : 0.5_rt); + amrex::Real const x = (AMREX_SPACEDIM > 1 ? xyzmin_y.x + (iv[0] - lo_y[0] + shiftx)*dx[0] : 0._rt); + amrex::Real const shifty = (AMREX_SPACEDIM == 3 ? (Fy_nodal[1] ? 0._rt : 0.5_rt) : 0._rt); + amrex::Real const y = (AMREX_SPACEDIM == 3 ? xyzmin_y.y + (iv[1] - lo_y[1] + shifty)*dx[1] : 0._rt); #if (AMREX_SPACEDIM > 1) - amrex::Real const z = xyzmin_y.z + (iv[WARPX_ZINDEX] - lo_y[WARPX_ZINDEX])*dx[2]; + amrex::Real const shiftz = (Fy_nodal[WARPX_ZINDEX] ? 0._rt : 0.5_rt); + amrex::Real const z = xyzmin_y.z + (iv[WARPX_ZINDEX] - lo_y[WARPX_ZINDEX] + shiftz)*dx[2]; #endif amrex::IntVect is_insulator_lo; @@ -516,10 +522,13 @@ PEC_Insulator::ApplyPEC_InsulatortoField ( amrex::ignore_unused(j, k); amrex::IntVect const iv(AMREX_D_DECL(i, j, k)); - amrex::Real const x = (AMREX_SPACEDIM > 1 ? xyzmin_z.x + (iv[0] - lo_z[0])*dx[0] : 0._rt); - amrex::Real const y = (AMREX_SPACEDIM == 3 ? xyzmin_z.y + (iv[1] - lo_z[1])*dx[1] : 0._rt); + amrex::Real const shiftx = (Fz_nodal[0] ? 0._rt : 0.5_rt); + amrex::Real const x = (AMREX_SPACEDIM > 1 ? xyzmin_z.x + (iv[0] - lo_z[0] + shiftx)*dx[0] : 0._rt); + amrex::Real const shifty = (AMREX_SPACEDIM == 3 ? (Fz_nodal[1] ? 0._rt : 0.5_rt) : 0._rt); + amrex::Real const y = (AMREX_SPACEDIM == 3 ? xyzmin_z.y + (iv[1] - lo_z[1] + shifty)*dx[1] : 0._rt); #if (AMREX_SPACEDIM > 1) - amrex::Real const z = xyzmin_z.z + (iv[WARPX_ZINDEX] - lo_z[WARPX_ZINDEX])*dx[2]; + amrex::Real const shiftz = (Fz_nodal[WARPX_ZINDEX] ? 0._rt : 0.5_rt); + amrex::Real const z = xyzmin_z.z + (iv[WARPX_ZINDEX] - lo_z[WARPX_ZINDEX] + shiftz)*dx[2]; #endif amrex::IntVect is_insulator_lo; From ad6879d47b5f2eb260a8f1afa9f06c2cca702d42 Mon Sep 17 00:00:00 2001 From: Justin Ray Angus Date: Thu, 14 Nov 2024 16:36:59 -0800 Subject: [PATCH 091/278] Remove theta() function from ImplicitSolver base class. (#5441) --- .../ImplicitSolvers/ImplicitSolver.H | 8 +++--- .../ImplicitSolvers/SemiImplicitEM.H | 1 - .../ImplicitSolvers/SemiImplicitEM.cpp | 14 ++++++----- .../StrangImplicitSpectralEM.H | 4 +-- .../StrangImplicitSpectralEM.cpp | 21 ++++++++-------- .../ImplicitSolvers/ThetaImplicitEM.H | 6 +---- .../ImplicitSolvers/ThetaImplicitEM.cpp | 25 ++++++++++--------- Source/NonlinearSolvers/CurlCurlMLMGPC.H | 5 ++-- Source/NonlinearSolvers/JacobianFunctionMF.H | 2 +- Source/NonlinearSolvers/NewtonSolver.H | 6 ++--- Source/NonlinearSolvers/NonlinearSolver.H | 2 +- Source/NonlinearSolvers/PicardSolver.H | 3 ++- 12 files changed, 48 insertions(+), 49 deletions(-) diff --git a/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.H b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.H index ea9af6e2298..f8f0390e17a 100644 --- a/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.H +++ b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.H @@ -83,12 +83,9 @@ public: virtual void ComputeRHS ( WarpXSolverVec& a_RHS, const WarpXSolverVec& a_E, amrex::Real a_time, - amrex::Real a_dt, int a_nl_iter, bool a_from_jacobian ) = 0; - [[nodiscard]] virtual amrex::Real theta () const { return 1.0; } - [[nodiscard]] int numAMRLevels () const { return m_num_amr_levels; } [[nodiscard]] const amrex::Geometry& GetGeometry (int) const; @@ -111,6 +108,11 @@ protected: */ int m_num_amr_levels = 1; + /** + * \brief Time step + */ + mutable amrex::Real m_dt = 0.0; + /** * \brief Nonlinear solver type and object */ diff --git a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.H b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.H index 6e3e5db2c74..b6c808e0ab9 100644 --- a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.H +++ b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.H @@ -64,7 +64,6 @@ public: void ComputeRHS ( WarpXSolverVec& a_RHS, const WarpXSolverVec& a_E, amrex::Real a_time, - amrex::Real a_dt, int a_nl_iter, bool a_from_jacobian ) override; diff --git a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp index 2236118a30c..117c3baecaa 100644 --- a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp @@ -58,6 +58,9 @@ void SemiImplicitEM::OneStep ( amrex::Real a_time, { amrex::ignore_unused(a_step); + // Set the member time step + m_dt = a_dt; + // Fields have Eg^{n}, Bg^{n-1/2} // Particles have up^{n} and xp^{n}. @@ -68,15 +71,15 @@ void SemiImplicitEM::OneStep ( amrex::Real a_time, m_Eold.Copy( FieldType::Efield_fp ); // Advance WarpX owned Bfield_fp to t_{n+1/2} - m_WarpX->EvolveB(a_dt, DtType::Full); + m_WarpX->EvolveB(m_dt, DtType::Full); m_WarpX->ApplyMagneticFieldBCs(); - const amrex::Real half_time = a_time + 0.5_rt*a_dt; + const amrex::Real half_time = a_time + 0.5_rt*m_dt; // Solve nonlinear system for Eg at t_{n+1/2} // Particles will be advanced to t_{n+1/2} m_E.Copy(m_Eold); // initial guess for Eg^{n+1/2} - m_nlsolver->Solve( m_E, m_Eold, half_time, a_dt ); + m_nlsolver->Solve( m_E, m_Eold, half_time, 0.5_rt*m_dt ); // Update WarpX owned Efield_fp to t_{n+1/2} m_WarpX->SetElectricFieldAndApplyBCs( m_E ); @@ -94,7 +97,6 @@ void SemiImplicitEM::OneStep ( amrex::Real a_time, void SemiImplicitEM::ComputeRHS ( WarpXSolverVec& a_RHS, const WarpXSolverVec& a_E, amrex::Real a_time, - amrex::Real a_dt, int a_nl_iter, bool a_from_jacobian ) { @@ -104,8 +106,8 @@ void SemiImplicitEM::ComputeRHS ( WarpXSolverVec& a_RHS, // Update particle positions and velocities using the current state // of Eg and Bg. Deposit current density at time n+1/2 - m_WarpX->ImplicitPreRHSOp( a_time, a_dt, a_nl_iter, a_from_jacobian ); + m_WarpX->ImplicitPreRHSOp( a_time, m_dt, a_nl_iter, a_from_jacobian ); // RHS = cvac^2*0.5*dt*( curl(Bg^{n+1/2}) - mu0*Jg^{n+1/2} ) - m_WarpX->ImplicitComputeRHSE(0.5_rt*a_dt, a_RHS); + m_WarpX->ImplicitComputeRHSE(0.5_rt*m_dt, a_RHS); } diff --git a/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.H b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.H index a674dd6de76..d1587cfb9d1 100644 --- a/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.H +++ b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.H @@ -65,7 +65,6 @@ public: void ComputeRHS ( WarpXSolverVec& a_RHS, const WarpXSolverVec& a_E, amrex::Real a_time, - amrex::Real a_dt, int a_nl_iter, bool a_from_jacobian ) override; @@ -93,8 +92,7 @@ private: * \brief Update the E and B fields owned by WarpX */ void UpdateWarpXFields ( WarpXSolverVec const& a_E, - amrex::Real a_time, - amrex::Real a_dt ); + amrex::Real a_time ); /** * \brief Nonlinear solver is for the time-centered values of E. After diff --git a/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp index 1d463bcb365..501cbed10eb 100644 --- a/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp @@ -63,6 +63,9 @@ void StrangImplicitSpectralEM::OneStep ( amrex::Real a_time, // Fields have E^{n} and B^{n} // Particles have p^{n} and x^{n}. + // Set the member time step + m_dt = a_dt; + // Save the values at the start of the time step, m_WarpX->SaveParticlesAtImplicitStepStart(); @@ -73,20 +76,20 @@ void StrangImplicitSpectralEM::OneStep ( amrex::Real a_time, m_Eold.Copy( FieldType::Efield_fp ); m_E.Copy(m_Eold); // initial guess for E - amrex::Real const half_time = a_time + 0.5_rt*a_dt; + amrex::Real const half_time = a_time + 0.5_rt*m_dt; // Solve nonlinear system for E at t_{n+1/2} // Particles will be advanced to t_{n+1/2} - m_nlsolver->Solve( m_E, m_Eold, half_time, a_dt ); + m_nlsolver->Solve( m_E, m_Eold, half_time, 0.5_rt*m_dt ); // Update WarpX owned Efield_fp and Bfield_fp to t_{n+1/2} - UpdateWarpXFields( m_E, half_time, a_dt ); + UpdateWarpXFields( m_E, half_time ); // Advance particles from time n+1/2 to time n+1 m_WarpX->FinishImplicitParticleUpdate(); // Advance E and B fields from time n+1/2 to time n+1 - amrex::Real const new_time = a_time + a_dt; + amrex::Real const new_time = a_time + m_dt; FinishFieldUpdate( new_time ); // Advance the fields to time n+1 source free @@ -97,29 +100,27 @@ void StrangImplicitSpectralEM::OneStep ( amrex::Real a_time, void StrangImplicitSpectralEM::ComputeRHS ( WarpXSolverVec& a_RHS, WarpXSolverVec const & a_E, amrex::Real a_time, - amrex::Real a_dt, int a_nl_iter, bool a_from_jacobian ) { // Update WarpX-owned Efield_fp and Bfield_fp using current state of // E from the nonlinear solver at time n+1/2 - UpdateWarpXFields( a_E, a_time, a_dt ); + UpdateWarpXFields( a_E, a_time ); // Self consistently update particle positions and velocities using the // current state of the fields E and B. Deposit current density at time n+1/2. - m_WarpX->ImplicitPreRHSOp( a_time, a_dt, a_nl_iter, a_from_jacobian ); + m_WarpX->ImplicitPreRHSOp( a_time, m_dt, a_nl_iter, a_from_jacobian ); // For Strang split implicit PSATD, the RHS = -dt*mu*c**2*J bool const allow_type_mismatch = true; a_RHS.Copy(FieldType::current_fp, warpx::fields::FieldType::None, allow_type_mismatch); amrex::Real constexpr coeff = PhysConst::c * PhysConst::c * PhysConst::mu0; - a_RHS.scale(-coeff * 0.5_rt*a_dt); + a_RHS.scale(-coeff * 0.5_rt*m_dt); } void StrangImplicitSpectralEM::UpdateWarpXFields (WarpXSolverVec const & a_E, - amrex::Real /*a_time*/, - amrex::Real /*a_dt*/) + amrex::Real /*a_time*/ ) { // Update Efield_fp owned by WarpX diff --git a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H index 69d56c6ddc5..7461b77fb51 100644 --- a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H +++ b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H @@ -74,12 +74,9 @@ public: void ComputeRHS ( WarpXSolverVec& a_RHS, const WarpXSolverVec& a_E, amrex::Real a_time, - amrex::Real a_dt, int a_nl_iter, bool a_from_jacobian ) override; - [[nodiscard]] amrex::Real theta () const override { return m_theta; } - private: /** @@ -101,8 +98,7 @@ private: * \brief Update the E and B fields owned by WarpX */ void UpdateWarpXFields ( const WarpXSolverVec& a_E, - amrex::Real a_time, - amrex::Real a_dt ); + amrex::Real a_time ); /** * \brief Nonlinear solver is for the time-centered values of E. After diff --git a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp index e5b8431a930..8ca592517ac 100644 --- a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp @@ -83,6 +83,9 @@ void ThetaImplicitEM::OneStep ( const amrex::Real a_time, // Fields have Eg^{n} and Bg^{n} // Particles have up^{n} and xp^{n}. + // Set the member time step + m_dt = a_dt; + // Save up and xp at the start of the time step m_WarpX->SaveParticlesAtImplicitStepStart ( ); @@ -99,21 +102,21 @@ void ThetaImplicitEM::OneStep ( const amrex::Real a_time, } } - const amrex::Real theta_time = a_time + m_theta*a_dt; + const amrex::Real theta_time = a_time + m_theta*m_dt; // Solve nonlinear system for Eg at t_{n+theta} // Particles will be advanced to t_{n+1/2} m_E.Copy(m_Eold); // initial guess for Eg^{n+theta} - m_nlsolver->Solve( m_E, m_Eold, theta_time, a_dt ); + m_nlsolver->Solve( m_E, m_Eold, theta_time, m_theta*m_dt ); // Update WarpX owned Efield_fp and Bfield_fp to t_{n+theta} - UpdateWarpXFields( m_E, theta_time, a_dt ); + UpdateWarpXFields( m_E, theta_time ); // Advance particles from time n+1/2 to time n+1 m_WarpX->FinishImplicitParticleUpdate(); // Advance Eg and Bg from time n+theta to time n+1 - const amrex::Real new_time = a_time + a_dt; + const amrex::Real new_time = a_time + m_dt; FinishFieldUpdate( new_time ); } @@ -121,25 +124,23 @@ void ThetaImplicitEM::OneStep ( const amrex::Real a_time, void ThetaImplicitEM::ComputeRHS ( WarpXSolverVec& a_RHS, const WarpXSolverVec& a_E, amrex::Real a_time, - amrex::Real a_dt, int a_nl_iter, bool a_from_jacobian ) { // Update WarpX-owned Efield_fp and Bfield_fp using current state of // Eg from the nonlinear solver at time n+theta - UpdateWarpXFields( a_E, a_time, a_dt ); + UpdateWarpXFields( a_E, a_time ); // Update particle positions and velocities using the current state // of Eg and Bg. Deposit current density at time n+1/2 - m_WarpX->ImplicitPreRHSOp( a_time, a_dt, a_nl_iter, a_from_jacobian ); + m_WarpX->ImplicitPreRHSOp( a_time, m_dt, a_nl_iter, a_from_jacobian ); // RHS = cvac^2*m_theta*dt*( curl(Bg^{n+theta}) - mu0*Jg^{n+1/2} ) - m_WarpX->ImplicitComputeRHSE(m_theta*a_dt, a_RHS); + m_WarpX->ImplicitComputeRHSE( m_theta*m_dt, a_RHS); } void ThetaImplicitEM::UpdateWarpXFields ( const WarpXSolverVec& a_E, - amrex::Real a_time, - amrex::Real a_dt ) + amrex::Real a_time ) { amrex::ignore_unused(a_time); @@ -148,7 +149,7 @@ void ThetaImplicitEM::UpdateWarpXFields ( const WarpXSolverVec& a_E, // Update Bfield_fp owned by WarpX ablastr::fields::MultiLevelVectorField const& B_old = m_WarpX->m_fields.get_mr_levels_alldirs(FieldType::B_old, 0); - m_WarpX->UpdateMagneticFieldAndApplyBCs(B_old, m_theta * a_dt ); + m_WarpX->UpdateMagneticFieldAndApplyBCs( B_old, m_theta*m_dt ); } @@ -164,6 +165,6 @@ void ThetaImplicitEM::FinishFieldUpdate ( amrex::Real a_new_time ) m_E.linComb( c0, m_E, c1, m_Eold ); m_WarpX->SetElectricFieldAndApplyBCs( m_E ); ablastr::fields::MultiLevelVectorField const & B_old = m_WarpX->m_fields.get_mr_levels_alldirs(FieldType::B_old, 0); - m_WarpX->FinishMagneticFieldAndApplyBCs(B_old, m_theta ); + m_WarpX->FinishMagneticFieldAndApplyBCs( B_old, m_theta ); } diff --git a/Source/NonlinearSolvers/CurlCurlMLMGPC.H b/Source/NonlinearSolvers/CurlCurlMLMGPC.H index 47d7310995c..b3fcc6fe38f 100644 --- a/Source/NonlinearSolvers/CurlCurlMLMGPC.H +++ b/Source/NonlinearSolvers/CurlCurlMLMGPC.H @@ -272,7 +272,8 @@ void CurlCurlMLMGPC::Update (const T& a_U) amrex::ignore_unused(a_U); // set the coefficients alpha and beta for curl-curl op - const RT alpha = (m_ops->theta()*this->m_dt*PhysConst::c) * (m_ops->theta()*this->m_dt*PhysConst::c); + // (m_dt here is actually theta<=0.5 times simulation dt) + const RT alpha = (this->m_dt*PhysConst::c) * (this->m_dt*PhysConst::c); const RT beta = RT(1.0); // currently not implemented in 1D @@ -282,7 +283,7 @@ void CurlCurlMLMGPC::Update (const T& a_U) if (m_verbose) { amrex::Print() << "Updating " << amrex::getEnumNameString(PreconditionerType::pc_curl_curl_mlmg) - << ": dt = " << this->m_dt << ", " + << ": theta*dt = " << this->m_dt << ", " << " coefficients: " << "alpha = " << alpha << ", " << "beta = " << beta << "\n"; diff --git a/Source/NonlinearSolvers/JacobianFunctionMF.H b/Source/NonlinearSolvers/JacobianFunctionMF.H index a3222214381..1a30dde4250 100644 --- a/Source/NonlinearSolvers/JacobianFunctionMF.H +++ b/Source/NonlinearSolvers/JacobianFunctionMF.H @@ -253,7 +253,7 @@ void JacobianFunctionMF::apply (T& a_dF, const T& a_dU) const RT eps_inv = 1.0_rt/eps; m_Z.linComb( 1.0, m_Y0, eps, a_dU ); // Z = Y0 + eps*dU - m_ops->ComputeRHS(m_R, m_Z, m_cur_time, m_dt, -1, true ); + m_ops->ComputeRHS(m_R, m_Z, m_cur_time, -1, true ); // F(Y) = Y - b - R(Y) ==> dF = dF/dY*dU = [1 - dR/dY]*dU // = dU - (R(Z)-R(Y0))/eps diff --git a/Source/NonlinearSolvers/NewtonSolver.H b/Source/NonlinearSolvers/NewtonSolver.H index 9c73c44e69e..f5147b2e4c0 100644 --- a/Source/NonlinearSolvers/NewtonSolver.H +++ b/Source/NonlinearSolvers/NewtonSolver.H @@ -169,7 +169,6 @@ private: const Vec& a_U, const Vec& a_b, amrex::Real a_time, - amrex::Real a_dt, int a_iter ) const; }; @@ -252,7 +251,7 @@ void NewtonSolver::Solve ( Vec& a_U, for (iter = 0; iter < m_maxits;) { // Compute residual: F(U) = U - b - R(U) - EvalResidual(m_F, a_U, a_b, a_time, a_dt, iter); + EvalResidual(m_F, a_U, a_b, a_time, iter); // Compute norm of the residual norm_abs = m_F.norm2(); @@ -329,11 +328,10 @@ void NewtonSolver::EvalResidual ( Vec& a_F, const Vec& a_U, const Vec& a_b, amrex::Real a_time, - amrex::Real a_dt, int a_iter ) const { - m_ops->ComputeRHS( m_R, a_U, a_time, a_dt, a_iter, false ); + m_ops->ComputeRHS( m_R, a_U, a_time, a_iter, false ); // set base U and R(U) for matrix-free Jacobian action calculation m_linear_function->setBaseSolution(a_U); diff --git a/Source/NonlinearSolvers/NonlinearSolver.H b/Source/NonlinearSolvers/NonlinearSolver.H index 6e64f1eb113..9daa3489f11 100644 --- a/Source/NonlinearSolvers/NonlinearSolver.H +++ b/Source/NonlinearSolvers/NonlinearSolver.H @@ -16,7 +16,7 @@ * This class is templated on a vector class Vec, and an operator class Ops. * * The Ops class must have the following function: - * ComputeRHS( R_vec, U_vec, time, dt, nl_iter, from_jacobian ), + * ComputeRHS( R_vec, U_vec, time, nl_iter, from_jacobian ), * where U_vec and R_vec are of type Vec. * * The Vec class must have basic math operators, such as Copy, +=, -=, diff --git a/Source/NonlinearSolvers/PicardSolver.H b/Source/NonlinearSolvers/PicardSolver.H index f6c47c4f4bc..6fe941cd48f 100644 --- a/Source/NonlinearSolvers/PicardSolver.H +++ b/Source/NonlinearSolvers/PicardSolver.H @@ -138,6 +138,7 @@ void PicardSolver::Solve ( Vec& a_U, WARPX_ALWAYS_ASSERT_WITH_MESSAGE( this->m_is_defined, "PicardSolver::Solve() called on undefined object"); + amrex::ignore_unused(a_dt); using namespace amrex::literals; // @@ -156,7 +157,7 @@ void PicardSolver::Solve ( Vec& a_U, m_Usave.Copy(a_U); // Update the solver state (a_U = a_b + m_R) - m_ops->ComputeRHS( m_R, a_U, a_time, a_dt, iter, false ); + m_ops->ComputeRHS( m_R, a_U, a_time, iter, false ); a_U.Copy(a_b); a_U += m_R; From abf12de946abfa983140a6f8a4a836494ce49183 Mon Sep 17 00:00:00 2001 From: Arianna Formenti Date: Fri, 15 Nov 2024 08:23:18 -0800 Subject: [PATCH 092/278] Docs: Thomson Parabola Spectrometer example (#5058) This PR adds a new example where different ion species travel through a Thomson Parabola Spectrometer and are collected at a screen. The example can be found in the `PhysicsApplications/thomson_parabola_spectrometer` folder. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Luca Fedeli --- Docs/source/refs.bib | 15 ++ Docs/source/usage/examples.rst | 1 + Examples/Physics_applications/CMakeLists.txt | 1 + .../CMakeLists.txt | 12 ++ .../thomson_parabola_spectrometer/README.rst | 56 +++++ .../thomson_parabola_spectrometer/analysis.py | 89 ++++++++ .../analysis_default_openpmd_regression.py | 1 + ...puts_test_3d_thomson_parabola_spectrometer | 192 ++++++++++++++++++ ...test_3d_thomson_parabola_spectrometer.json | 35 ++++ 9 files changed, 402 insertions(+) create mode 100644 Examples/Physics_applications/thomson_parabola_spectrometer/CMakeLists.txt create mode 100644 Examples/Physics_applications/thomson_parabola_spectrometer/README.rst create mode 100644 Examples/Physics_applications/thomson_parabola_spectrometer/analysis.py create mode 120000 Examples/Physics_applications/thomson_parabola_spectrometer/analysis_default_openpmd_regression.py create mode 100644 Examples/Physics_applications/thomson_parabola_spectrometer/inputs_test_3d_thomson_parabola_spectrometer create mode 100644 Regression/Checksum/benchmarks_json/test_3d_thomson_parabola_spectrometer.json diff --git a/Docs/source/refs.bib b/Docs/source/refs.bib index 5bbaf633179..70b88a0abf8 100644 --- a/Docs/source/refs.bib +++ b/Docs/source/refs.bib @@ -479,3 +479,18 @@ @article{VayFELB2009 doi = {10.1063/1.3080930}, url = {https://doi.org/10.1063/1.3080930}, } + +@article{Rhee1987, + author = {Rhee, M. J. and Schneider, R. F. and Weidman, D. J.}, + title = "{Simple time‐resolving Thomson spectrometer}", + journal = {Review of Scientific Instruments}, + volume = {58}, + number = {2}, + pages = {240-244}, + year = {1987}, + month = {02}, + issn = {0034-6748}, + doi = {10.1063/1.1139314}, + url = {https://doi.org/10.1063/1.1139314}, + eprint = {https://pubs.aip.org/aip/rsi/article-pdf/58/2/240/19154912/240\_1\_online.pdf}, +} diff --git a/Docs/source/usage/examples.rst b/Docs/source/usage/examples.rst index 237c10ab5fb..fa3e674edd3 100644 --- a/Docs/source/usage/examples.rst +++ b/Docs/source/usage/examples.rst @@ -45,6 +45,7 @@ Particle Accelerator & Beam Physics examples/gaussian_beam/README.rst examples/beam_beam_collision/README.rst examples/free_electron_laser/README.rst + examples/thomson_parabola_spectrometer/README.rst High Energy Astrophysical Plasma Physics ---------------------------------------- diff --git a/Examples/Physics_applications/CMakeLists.txt b/Examples/Physics_applications/CMakeLists.txt index 7f0f0ecfaf7..ed06a840501 100644 --- a/Examples/Physics_applications/CMakeLists.txt +++ b/Examples/Physics_applications/CMakeLists.txt @@ -10,3 +10,4 @@ add_subdirectory(plasma_acceleration) add_subdirectory(plasma_mirror) add_subdirectory(spacecraft_charging) add_subdirectory(uniform_plasma) +add_subdirectory(thomson_parabola_spectrometer) diff --git a/Examples/Physics_applications/thomson_parabola_spectrometer/CMakeLists.txt b/Examples/Physics_applications/thomson_parabola_spectrometer/CMakeLists.txt new file mode 100644 index 00000000000..93b5d338fec --- /dev/null +++ b/Examples/Physics_applications/thomson_parabola_spectrometer/CMakeLists.txt @@ -0,0 +1,12 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_thomson_parabola_spectrometer # name + 3 # dims + 1 # nprocs + inputs_test_3d_thomson_parabola_spectrometer # inputs + analysis_default_openpmd_regression.py # analysis + diags/diag1 # output + OFF # dependency +) diff --git a/Examples/Physics_applications/thomson_parabola_spectrometer/README.rst b/Examples/Physics_applications/thomson_parabola_spectrometer/README.rst new file mode 100644 index 00000000000..b033ee8c1dd --- /dev/null +++ b/Examples/Physics_applications/thomson_parabola_spectrometer/README.rst @@ -0,0 +1,56 @@ +.. _examples-thomson_parabola_spectrometer: + +Thomson Parabola Spectrometer +============================= + +This example simulates a Thomson parabola spectrometer (TPS) :cite:t:`ex-Rhee1987`. + +A TPS is a type of detector that separates incoming ions according to their charge-to-mass ratio (:math:`q/m`) and initial velocity (hence energy :math:`E_0 = 1/2 m v_0^2` if we assume non-relativistic dynamics). +TPSs are often used in laser-driven ion acceleration experiments, where different ion species are accelerated at once. To mimic this, we initialize a point-like source of 3 different ion species with different :math:`q/m` and :math:`E_0` (i.e. all ions have the same initial position, representative of a pinhole). + +The ions propagate along :math:`z` through 4 subsequent regions: + + - a vacuum region, the distance between the pinhole and the TPS (0.1 m) + - a region of constant electric field along :math:`x`, (0.19 m, 1e5 V/m) + - a region of constant magnetic field along :math:`x`, (0.872 T, 0.12 m) + - a vacuum region, the distance between the TPS and the screen of the detector (0.2 m) + +The initial particle velocity :math:`v_0` is sampled from a uniform distribution in the range :math:`[v_{min}, v_{max}]` where :math:`v_{min} = \sqrt{E_{max}/m}`, :math:`v_{max} = \sqrt{2E_{max}/m}`, and :math:`E_{max}` is an input parameter for each species. We assume zero transverse momentum. + +The ions are assumed to be test particles embedded in prescribed external fields, meaning that we neglect the self-field due to the ions' motion and the ions do not interact with each other. + +The detector is modeled using a ``BoundaryScrapingDiagnostic`` at the upper :math:`z` boundary of the domain, which stores the attributes of the particles when they exit the simulation box from the corresponding edge. Note that the transverse box size is large enough such that all particles exit the domain from the upper :math:`z` side. + +Run +--- + +The PICMI input file is not available for this example yet. + +For `MPI-parallel `__ runs, prefix these lines with ``mpiexec -n 4 ...`` or ``srun -n 4 ...``, depending on the system. + +.. literalinclude:: inputs + :language: ini + :caption: You can copy this file from ``Examples/Physics_applications/thomson_parabola_spectrometer/inputs_test_3d_thomson_parabola_spectrometer``. + +Visualize +--------- + +This figure below shows the ion trajectories starting from the pinhole (black star), entering the E and B field regions (purple box), up to the detector (gray plane). +The colors represent the different species: protons in blue, C :sup:`+4` in red, and C :sup:`+6` in green. +The particles are accelerated and deflected through the TPS. + +.. figure:: https://gist.github.com/assets/17280419/3e45e5aa-d1fc-46e3-aa24-d9e0d6a74d1a + :alt: Ion trajectories through a synthetic TPS. + :width: 100% + +In our simulation, the virtual detector stores all the particle data once entering it (i.e. exiting the simulation box). +The figure below shows the ions colored according to their species (same as above) and shaded according to their initial energy. +The :math:`x` coordinate represents the electric deflection, while :math:`y` the magnetic deflection. + +.. figure:: https://gist.github.com/assets/17280419/4dd1adb7-b4ab-481d-bc24-8a7ca51471d9 + :alt: Synthetic TPS screen. + :width: 100% + +.. literalinclude:: analysis.py + :language: ini + :caption: You can copy this file from ``Examples/Physics_applications/thomson_parabola_spectrometer/analysis.py``. diff --git a/Examples/Physics_applications/thomson_parabola_spectrometer/analysis.py b/Examples/Physics_applications/thomson_parabola_spectrometer/analysis.py new file mode 100644 index 00000000000..3485ffc6712 --- /dev/null +++ b/Examples/Physics_applications/thomson_parabola_spectrometer/analysis.py @@ -0,0 +1,89 @@ +import matplotlib as mpl +import matplotlib.pyplot as plt +import numpy as np +from openpmd_viewer import OpenPMDTimeSeries +from scipy.constants import c, eV + +mpl.use("Agg") +mpl.rcParams.update({"font.size": 18}) + +MeV = 1e6 * eV + +# open the BoundaryScrapingDiagnostic that represents the detector +series = OpenPMDTimeSeries("./diags/screen/particles_at_zhi/") +# open the Full diagnostic at time zero +series0 = OpenPMDTimeSeries("./diags/diag0/") +# we use the data at time 0 to retrieve the initial energy +# of all the particles the boundary + +# timesteps and real times +it = series.iterations +time = series.t # s +N_iterations = len(it) + +# list of species names +species = series.avail_species +N_species = len(species) + +fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(10, 8), dpi=300) + +# some stuff for plotting +vmin = 0 +vmax = 50 +cmap = ["Reds", "Greens", "Blues"] + +# loop through the species +for s in range(N_species): + print(species[s]) + + # arrays of positions and energies + X, Y, E = [], [], [] + for i in range(N_iterations): + # get particles at detector location + x, y, z, ids = series.get_particle( + ["x", "y", "z", "id"], iteration=it[i], species=species[s], plot=False + ) + # get particles at initialization + uz0, ids0, m = series0.get_particle( + ["uz", "id", "mass"], + iteration=series0.iterations[0], + species=species[s], + plot=False, + ) + + indeces = np.where(np.in1d(ids0, ids))[0] + + E = np.append(E, 0.5 * m[indeces] * (uz0[indeces] * c) ** 2 / MeV) + X = np.append(X, x) + Y = np.append(Y, y) + print(np.min(E), np.max(E)) + + # sort particles according to energy for nicer plot + sorted_indeces = np.argsort(E) + ax.scatter( + X[sorted_indeces], + Y[sorted_indeces], + c=E[sorted_indeces], + vmin=vmin, + vmax=vmax, + cmap=cmap[s], + ) + sorted_indeces = np.argsort(E) + ax.scatter( + X[sorted_indeces], + Y[sorted_indeces], + c=E[sorted_indeces], + vmin=vmin, + vmax=vmax, + cmap=cmap[s], + ) + +# dummy plot just to have a neutral colorbar +im = ax.scatter(np.nan, np.nan, c=np.nan, cmap="Greys_r", vmin=vmin, vmax=vmax) +plt.colorbar(im, label="E [MeV]") +ax.set_xlabel("x [m]") +ax.set_ylabel("y [m]") + +plt.tight_layout() +fig.savefig("detect.png", dpi=300) +plt.close() diff --git a/Examples/Physics_applications/thomson_parabola_spectrometer/analysis_default_openpmd_regression.py b/Examples/Physics_applications/thomson_parabola_spectrometer/analysis_default_openpmd_regression.py new file mode 120000 index 00000000000..73e5ec47001 --- /dev/null +++ b/Examples/Physics_applications/thomson_parabola_spectrometer/analysis_default_openpmd_regression.py @@ -0,0 +1 @@ +../../analysis_default_openpmd_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/thomson_parabola_spectrometer/inputs_test_3d_thomson_parabola_spectrometer b/Examples/Physics_applications/thomson_parabola_spectrometer/inputs_test_3d_thomson_parabola_spectrometer new file mode 100644 index 00000000000..04e238da86d --- /dev/null +++ b/Examples/Physics_applications/thomson_parabola_spectrometer/inputs_test_3d_thomson_parabola_spectrometer @@ -0,0 +1,192 @@ +############## +#### CONSTANTS +############## +my_constants.MeV = 1e6*q_e + +# distance between pinhole and electric field +my_constants.d1 = 0.1 # m +# length of the electric field region +my_constants.d2 = 0.19 # m +# length of the magnetic field region +my_constants.d3 = 0.12 # m +# distance between the magnetic field and the screen +my_constants.d4 = 0.2 # m + +# constant fields in the TPS +my_constants.E0 = 1e5 # V/m +my_constants.B0 = 0.872 # T + +# transverse domain +my_constants.xmin = -0.4 # m +my_constants.xmax = 0.4 # m +my_constants.ymin = -0.4 # m +my_constants.ymax = 0.4 # m + +# longitudinal domain +my_constants.zmin= -1e-3 # m +my_constants.zmax = d1+d2+d3+d4 + +# each macroparticle corresponds to 1 real particle +my_constants.N_real_particles = 1e3 +my_constants.N_macro_particles = 1e3 + +# maximum energy of the different species +# we assume that all the species have a +# uniform energy distribution in [0.5*Emax,Emax] +my_constants.Emax_hydrogen1_1 = 40*MeV +my_constants.Emax_carbon12_6 = 20*MeV +my_constants.Emax_carbon12_4 = 20*MeV + +# velocity of a very slow particle +# used to estimate the simulation time +my_constants.vz = sqrt(2*1*MeV/(12*m_p)) +my_constants.max_steps = 400 +my_constants.max_time = (-zmin+d1+d2+d3+d4) / vz +my_constants.dt = max_time / max_steps + +############# +#### NUMERICS +############# +algo.particle_shape = 1 +algo.maxwell_solver = none +algo.particle_pusher = boris +amr.max_level = 0 +warpx.verbose = 1 + +######## +#### BOX +######## +amr.n_cell = 8 8 8 +geometry.dims = 3 +geometry.prob_hi = xmax ymax zmax +geometry.prob_lo = xmin ymin zmin + +######### +#### TIME +######### +stop_time = max_time +warpx.const_dt = dt + +############# +#### BOUNDARY +############# +boundary.particle_hi = absorbing absorbing absorbing +boundary.particle_lo = absorbing absorbing absorbing + +############## +#### PARTICLES +############## +particles.species_names = hydrogen1_1 carbon12_6 carbon12_4 + +hydrogen1_1.charge = q_e +hydrogen1_1.initialize_self_fields = 0 +hydrogen1_1.injection_style = gaussian_beam +hydrogen1_1.mass = m_p +hydrogen1_1.momentum_distribution_type = uniform +hydrogen1_1.npart = N_macro_particles +hydrogen1_1.q_tot = N_real_particles*q_e +hydrogen1_1.ux_min = 0 +hydrogen1_1.uy_min = 0 +hydrogen1_1.uz_min = sqrt(Emax_hydrogen1_1/m_p)/clight +hydrogen1_1.ux_max = 0 +hydrogen1_1.uy_max = 0 +hydrogen1_1.uz_max = sqrt(2*Emax_hydrogen1_1/m_p)/clight +hydrogen1_1.x_m = 0 +hydrogen1_1.x_rms = 0 +hydrogen1_1.y_m = 0 +hydrogen1_1.y_rms = 0 +hydrogen1_1.z_m = 0 +hydrogen1_1.z_rms = 0 +hydrogen1_1.do_not_gather = 1 +hydrogen1_1.do_not_deposit = 1 + +# carbon12_6 means carbon ions with 12 nucleons, of which 6 protons +carbon12_6.charge = 6*q_e +carbon12_6.initialize_self_fields = 0 +carbon12_6.injection_style = gaussian_beam +carbon12_6.mass = 12*m_p +carbon12_6.momentum_distribution_type = uniform +carbon12_6.npart = N_macro_particles +carbon12_6.q_tot = N_real_particles*6*q_e +carbon12_6.ux_min = 0 +carbon12_6.uy_min = 0 +carbon12_6.uz_min = sqrt(Emax_carbon12_6/(12*m_p))/clight +carbon12_6.ux_max = 0 +carbon12_6.uy_max = 0 +carbon12_6.uz_max = sqrt(2*Emax_carbon12_6/(12*m_p))/clight +carbon12_6.x_m = 0 +carbon12_6.x_rms = 0 +carbon12_6.y_m = 0 +carbon12_6.y_rms = 0 +carbon12_6.z_m = 0 +carbon12_6.z_rms = 0 +carbon12_6.do_not_gather = 1 +carbon12_6.do_not_deposit = 1 + +carbon12_4.charge = 4*q_e +carbon12_4.initialize_self_fields = 0 +carbon12_4.injection_style = gaussian_beam +carbon12_4.mass = 12*m_p +carbon12_4.momentum_distribution_type = uniform +carbon12_4.npart = N_macro_particles +carbon12_4.q_tot = N_real_particles*4*q_e +carbon12_4.ux_min = 0 +carbon12_4.uy_min = 0 +carbon12_4.uz_min = sqrt(Emax_carbon12_4/(12*m_p))/clight +carbon12_4.ux_max = 0 +carbon12_4.uy_max = 0 +carbon12_4.uz_max = sqrt(2*Emax_carbon12_4/(12*m_p))/clight +carbon12_4.x_m = 0 +carbon12_4.x_rms = 0 +carbon12_4.y_m = 0 +carbon12_4.y_rms = 0 +carbon12_4.z_m = 0 +carbon12_4.z_rms = 0 +carbon12_4.do_not_gather = 1 +carbon12_4.do_not_deposit = 1 + +########### +#### FIELDS +########### +particles.E_ext_particle_init_style = parse_E_ext_particle_function +particles.Ex_external_particle_function(x,y,z,t) = "E0*(z>d1)*(z<(d1+d2))" +particles.Ey_external_particle_function(x,y,z,t) = 0 +particles.Ez_external_particle_function(x,y,z,t) = 0 + +particles.B_ext_particle_init_style = parse_B_ext_particle_function +particles.Bx_external_particle_function(x,y,z,t) = "B0*(z>d1+d2)*(z<(d1+d2+d3))" +particles.By_external_particle_function(x,y,z,t) = 0 +particles.Bz_external_particle_function(x,y,z,t) = 0 + +################ +#### DIAGNOSTICS +################ +diagnostics.diags_names = diag0 screen diag1 + +diag0.diag_type = Full +diag0.fields_to_plot = none +diag0.format = openpmd +diag0.intervals = 0:0 +diag0.write_species = 1 +diag0.species = hydrogen1_1 carbon12_6 carbon12_4 +diag0.dump_last_timestep = 0 + +# diagnostic that collects the particles at the detector's position, +# i.e. when a particle exits the domain from z_max = zhi +# we store it in the screen diagnostic +# we are assuming that most particles will exit the domain at z_max +# which requires a large enough transverse box +screen.diag_type = BoundaryScraping +screen.format = openpmd +screen.intervals = 1 +hydrogen1_1.save_particles_at_zhi = 1 +carbon12_6.save_particles_at_zhi = 1 +carbon12_4.save_particles_at_zhi = 1 + +diag1.diag_type = Full +diag1.fields_to_plot = rho_hydrogen1_1 rho_carbon12_6 rho_carbon12_4 +diag1.format = openpmd +diag1.intervals = 50:50 +diag1.write_species = 1 +diag1.species = hydrogen1_1 carbon12_6 carbon12_4 +diag1.dump_last_timestep = 0 diff --git a/Regression/Checksum/benchmarks_json/test_3d_thomson_parabola_spectrometer.json b/Regression/Checksum/benchmarks_json/test_3d_thomson_parabola_spectrometer.json new file mode 100644 index 00000000000..2346ffd8124 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_thomson_parabola_spectrometer.json @@ -0,0 +1,35 @@ +{ + "lev=0": { + "rho_carbon12_4": 8.391105120785595e-13, + "rho_carbon12_6": 1.2586657681178396e-12, + "rho_hydrogen1_1": 0.0 + }, + "carbon12_4": { + "particle_position_x": 0.24746482639048117, + "particle_position_y": 0.3712831550411343, + "particle_position_z": 291.92951822527056, + "particle_momentum_x": 7.446857998192906e-19, + "particle_momentum_y": 6.58876061665569e-18, + "particle_momentum_z": 3.0678537977188415e-16, + "particle_weight": 1000.0 + }, + "carbon12_6": { + "particle_position_x": 0.3706220153511513, + "particle_position_y": 0.5770046251488395, + "particle_position_z": 291.70616446343365, + "particle_momentum_x": 1.1143091694186902e-18, + "particle_momentum_y": 1.015840779649768e-17, + "particle_momentum_z": 3.063311157322583e-16, + "particle_weight": 1000.0 + }, + "hydrogen1_1": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} + From 018eeece1602671e1efc5cdfd1817b6e49b42616 Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Mon, 18 Nov 2024 09:57:43 -0800 Subject: [PATCH 093/278] Use AMReX FFT for IGF Solver (#5457) This replaces the implementation using HeFFTe. A new runtime parameter ablastr.nprocs_igf_fft is added. This parameter controls the the number of processes used by parallel FFT in the IGF solver. By default, all processes will be used. --------- Co-authored-by: Remi Lehe --- .azure-pipelines.yml | 12 - .github/workflows/cuda.yml | 13 +- .github/workflows/dependencies/hip.sh | 13 - .github/workflows/hip.yml | 6 +- CMakeLists.txt | 37 --- Docs/source/install/cmake.rst | 2 - Docs/source/install/dependencies.rst | 3 +- .../open_bc_poisson_solver/CMakeLists.txt | 12 - ...puts_test_3d_open_bc_poisson_solver_heffte | 1 - GNUmakefile | 1 - .../test_3d_open_bc_poisson_solver.json | 14 +- .../fields/IntegratedGreenFunctionSolver.cpp | 272 +++--------------- .../machines/desktop/spack-macos-openmp.yaml | 1 - Tools/machines/desktop/spack-ubuntu-cuda.yaml | 1 - .../machines/desktop/spack-ubuntu-openmp.yaml | 1 - Tools/machines/desktop/spack-ubuntu-rocm.yaml | 1 - .../install_a100_dependencies.sh | 39 --- .../lonestar6_warpx_a100.profile.example | 2 - .../install_cpu_dependencies.sh | 39 --- .../install_gpu_dependencies.sh | 43 --- .../perlmutter_cpu_warpx.profile.example | 2 - .../perlmutter_gpu_warpx.profile.example | 2 - .../tioga-llnl/install_mi300a_dependencies.sh | 42 --- .../tioga_mi300a_warpx.profile.example | 2 - cmake/WarpXFunctions.cmake | 5 - cmake/dependencies/AMReX.cmake | 15 +- setup.py | 2 - 27 files changed, 58 insertions(+), 525 deletions(-) delete mode 100644 Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver_heffte diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index 62d8a0a424d..d22097a208f 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -38,7 +38,6 @@ jobs: # Cartesian 3D cartesian_3d: WARPX_CMAKE_FLAGS: -DWarpX_DIMS=3 -DWarpX_FFT=ON -DWarpX_PYTHON=ON - WARPX_HEFFTE: 'TRUE' # Cylindrical RZ cylindrical_rz: WARPX_CMAKE_FLAGS: -DWarpX_DIMS=RZ -DWarpX_FFT=ON -DWarpX_PYTHON=ON @@ -121,17 +120,6 @@ jobs: -DCMAKE_CXX_STANDARD=17 \ -Duse_cmake_find_lapack=ON -Dbuild_tests=OFF -DCMAKE_VERBOSE_MAKEFILE=ON fi - if [ "${WARPX_HEFFTE:-FALSE}" == "TRUE" ]; then - cmake-easyinstall --prefix=/usr/local git+https://github.com/icl-utk-edu/heffte.git@v2.4.0 \ - -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ - -DCMAKE_CXX_STANDARD=17 -DHeffte_ENABLE_DOXYGEN=OFF \ - -DHeffte_ENABLE_FFTW=ON -DHeffte_ENABLE_TESTING=OFF \ - -DHeffte_ENABLE_CUDA=OFF -DHeffte_ENABLE_ROCM=OFF \ - -DHeffte_ENABLE_ONEAPI=OFF -DHeffte_ENABLE_MKL=OFF \ - -DHeffte_ENABLE_PYTHON=OFF -DHeffte_ENABLE_FORTRAN=OFF \ - -DHeffte_ENABLE_MAGMA=OFF \ - -DCMAKE_VERBOSE_MAKEFILE=ON - fi # Python modules required for test analysis python3 -m pip install --upgrade -r Regression/requirements.txt python3 -m pip cache purge diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index a10306789cb..8d40aba553c 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -62,16 +62,6 @@ jobs: -DBUILD_CLI_TOOLS=OFF \ -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ -DCMAKE_VERBOSE_MAKEFILE=ON - cmake-easyinstall --prefix=/usr/local \ - git+https://github.com/icl-utk-edu/heffte.git@v2.4.0 \ - -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ - -DCMAKE_CXX_STANDARD=17 -DHeffte_ENABLE_DOXYGEN=OFF \ - -DHeffte_ENABLE_FFTW=OFF -DHeffte_ENABLE_TESTING=OFF \ - -DHeffte_ENABLE_CUDA=ON -DHeffte_ENABLE_ROCM=OFF \ - -DHeffte_ENABLE_ONEAPI=OFF -DHeffte_ENABLE_MKL=OFF \ - -DHeffte_ENABLE_PYTHON=OFF -DHeffte_ENABLE_FORTRAN=OFF \ - -DHeffte_ENABLE_MAGMA=OFF \ - -DCMAKE_VERBOSE_MAKEFILE=ON - name: build WarpX run: | export CCACHE_COMPRESS=1 @@ -92,7 +82,6 @@ jobs: -DWarpX_openpmd_internal=OFF \ -DWarpX_PRECISION=SINGLE \ -DWarpX_FFT=ON \ - -DWarpX_HEFFTE=ON \ -DAMReX_CUDA_ERROR_CROSS_EXECUTION_SPACE_CALL=ON \ -DAMReX_CUDA_ERROR_CAPTURE_THIS=ON cmake --build build_sp -j 4 @@ -137,7 +126,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 4b703fec6c2ff983e465c8cef0cc4947231edb07 && cd - + cd ../amrex && git checkout --detach 456c93c7d9512f1cdffac0574973d7df41417898 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/.github/workflows/dependencies/hip.sh b/.github/workflows/dependencies/hip.sh index 2a1b4d090bc..1154bb05e58 100755 --- a/.github/workflows/dependencies/hip.sh +++ b/.github/workflows/dependencies/hip.sh @@ -79,16 +79,3 @@ sudo curl -L -o /usr/local/bin/cmake-easyinstall https://raw.githubusercontent.c sudo chmod a+x /usr/local/bin/cmake-easyinstall export CEI_SUDO="sudo" export CEI_TMP="/tmp/cei" - -# heFFTe -# -cmake-easyinstall --prefix=/usr/local \ - git+https://github.com/icl-utk-edu/heffte.git@v2.4.0 \ - -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ - -DCMAKE_CXX_STANDARD=17 -DHeffte_ENABLE_DOXYGEN=OFF \ - -DHeffte_ENABLE_FFTW=OFF -DHeffte_ENABLE_TESTING=OFF \ - -DHeffte_ENABLE_CUDA=OFF -DHeffte_ENABLE_ROCM=ON \ - -DHeffte_ENABLE_ONEAPI=OFF -DHeffte_ENABLE_MKL=OFF \ - -DHeffte_ENABLE_PYTHON=OFF -DHeffte_ENABLE_FORTRAN=OFF \ - -DHeffte_ENABLE_MAGMA=OFF \ - -DCMAKE_VERBOSE_MAKEFILE=ON diff --git a/.github/workflows/hip.yml b/.github/workflows/hip.yml index 8ba39de7742..6ab4e4a8401 100644 --- a/.github/workflows/hip.yml +++ b/.github/workflows/hip.yml @@ -61,8 +61,7 @@ jobs: -DWarpX_MPI=ON \ -DWarpX_OPENPMD=ON \ -DWarpX_PRECISION=SINGLE \ - -DWarpX_FFT=ON \ - -DWarpX_HEFFTE=ON + -DWarpX_FFT=ON cmake --build build_sp -j 4 export WARPX_MPI=OFF @@ -122,8 +121,7 @@ jobs: -DWarpX_MPI=ON \ -DWarpX_OPENPMD=ON \ -DWarpX_PRECISION=DOUBLE \ - -DWarpX_FFT=ON \ - -DWarpX_HEFFTE=ON + -DWarpX_FFT=ON cmake --build build_2d -j 4 export WARPX_MPI=OFF diff --git a/CMakeLists.txt b/CMakeLists.txt index 8ff14bacfa6..da62c943e19 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -73,7 +73,6 @@ option(WarpX_LIB "Build WarpX as a library" OFF) option(WarpX_MPI "Multi-node support (message-passing)" ON) option(WarpX_OPENPMD "openPMD I/O (HDF5, ADIOS)" ON) option(WarpX_FFT "FFT-based solvers" OFF) -option(WarpX_HEFFTE "Multi-node FFT-based solvers" OFF) option(WarpX_PYTHON "Python bindings" OFF) option(WarpX_SENSEI "SENSEI in situ diagnostics" OFF) option(WarpX_QED "QED support (requires PICSAR)" ON) @@ -146,10 +145,6 @@ mark_as_advanced(WarpX_MPI_THREAD_MULTIPLE) option(WarpX_amrex_internal "Download & build AMReX" ON) -if(WarpX_HEFFTE AND NOT WarpX_MPI) - message(FATAL_ERROR "WarpX_HEFFTE (${WarpX_HEFFTE}) can only be used if WarpX_MPI is ON.") -endif() - # change the default build type to Release (or RelWithDebInfo) instead of Debug set_default_build_type("Release") @@ -197,10 +192,6 @@ option(ABLASTR_FFT "compile AnyFFT wrappers" ${WarpX_FFT}) if(WarpX_FFT) set(ABLASTR_FFT ON CACHE STRING "FFT-based solvers" FORCE) endif() -option(ABLASTR_HEFFTE "compile AnyFFT wrappers" ${WarpX_HEFFTE}) -if(WarpX_HEFFTE) - set(ABLASTR_HEFFTE ON CACHE STRING "Multi-Node FFT-based solvers" FORCE) -endif() # this defined the variable BUILD_TESTING which is ON by default include(CTest) @@ -242,23 +233,6 @@ if(WarpX_FFT) endif() endif() -# multi-node FFT -if(WarpX_HEFFTE) - if(WarpX_COMPUTE STREQUAL CUDA) - set(_heFFTe_COMPS CUDA) - elseif(WarpX_COMPUTE STREQUAL HIP) - set(_heFFTe_COMPS ROCM) - elseif(WarpX_COMPUTE STREQUAL SYCL) - set(_heFFTe_COMPS ONEAPI) - else() # NOACC, OMP - set(_heFFTe_COMPS FFTW) # or MKL - endif() - # note: we could also enforce GPUAWARE for CUDA and HIP, which can still be - # disabled at runtime - - find_package(Heffte REQUIRED COMPONENTS ${_heFFTe_COMPS}) -endif() - # Python if(WarpX_PYTHON) find_package(Python 3.8 COMPONENTS Interpreter Development.Module REQUIRED) @@ -499,10 +473,6 @@ foreach(D IN LISTS WarpX_DIMS) endif() endif() - if(ABLASTR_HEFFTE) - target_link_libraries(ablastr_${SD} PUBLIC Heffte::Heffte) - endif() - if(WarpX_PYTHON) target_link_libraries(pyWarpX_${SD} PRIVATE pybind11::module pybind11::windows_extras) if(WarpX_PYTHON_IPO) @@ -593,13 +563,6 @@ foreach(D IN LISTS WarpX_DIMS) target_compile_definitions(ablastr_${SD} PUBLIC ABLASTR_USE_FFT) endif() - if(WarpX_HEFFTE) - target_compile_definitions(ablastr_${SD} PUBLIC WARPX_USE_HEFFTE) - endif() - if(ABLASTR_HEFFTE) - target_compile_definitions(ablastr_${SD} PUBLIC ABLASTR_USE_HEFFTE) - endif() - if(WarpX_PYTHON AND pyWarpX_VERSION_INFO) # for module __version__ target_compile_definitions(pyWarpX_${SD} PRIVATE diff --git a/Docs/source/install/cmake.rst b/Docs/source/install/cmake.rst index 41e4c40bc85..f3f881d4504 100644 --- a/Docs/source/install/cmake.rst +++ b/Docs/source/install/cmake.rst @@ -97,7 +97,6 @@ CMake Option Default & Values Descr ``WarpX_PRECISION`` SINGLE/**DOUBLE** Floating point precision (single/double) ``WarpX_PARTICLE_PRECISION`` SINGLE/**DOUBLE** Particle floating point precision (single/double), defaults to WarpX_PRECISION value if not set ``WarpX_FFT`` ON/**OFF** FFT-based solvers -``WarpX_HEFFTE`` ON/**OFF** Multi-Node FFT-based solvers ``WarpX_PYTHON`` ON/**OFF** Python bindings ``WarpX_QED`` **ON**/OFF QED support (requires PICSAR) ``WarpX_QED_TABLE_GEN`` ON/**OFF** QED table generation support (requires PICSAR and Boost) @@ -275,7 +274,6 @@ Environment Variable Default & Values Descr ``WARPX_PRECISION`` SINGLE/**DOUBLE** Floating point precision (single/double) ``WARPX_PARTICLE_PRECISION`` SINGLE/**DOUBLE** Particle floating point precision (single/double), defaults to WarpX_PRECISION value if not set ``WARPX_FFT`` ON/**OFF** FFT-based solvers -``WARPX_HEFFTE`` ON/**OFF** Multi-Node FFT-based solvers ``WARPX_QED`` **ON**/OFF PICSAR QED (requires PICSAR) ``WARPX_QED_TABLE_GEN`` ON/**OFF** QED table generation (requires PICSAR and Boost) ``BUILD_PARALLEL`` ``2`` Number of threads to use for parallel builds diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index 71a607eae6a..13e2377d568 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -28,7 +28,6 @@ Optional dependencies include: - `FFTW3 `__: for spectral solver (PSATD or IGF) support when running on CPU or SYCL - also needs the ``pkg-config`` tool on Unix -- `heFFTe 2.4.0+ `__: for multi-node spectral solver (IGF) support - `BLAS++ `__ and `LAPACK++ `__: for spectral solver (PSATD) support in RZ geometry - `Boost 1.66.0+ `__: for QED lookup tables generation support - `openPMD-api 0.15.1+ `__: we automatically download and compile a copy of openPMD-api for openPMD I/O support @@ -81,7 +80,7 @@ Conda (Linux/macOS/Windows) .. code-block:: bash - conda create -n warpx-cpu-mpich-dev -c conda-forge blaspp boost ccache cmake compilers git "heffte=*=mpi_mpich*" lapackpp "openpmd-api=*=mpi_mpich*" openpmd-viewer python make numpy pandas scipy yt "fftw=*=mpi_mpich*" pkg-config matplotlib mamba mpich mpi4py ninja pip virtualenv + conda create -n warpx-cpu-mpich-dev -c conda-forge blaspp boost ccache cmake compilers git lapackpp "openpmd-api=*=mpi_mpich*" openpmd-viewer python make numpy pandas scipy yt "fftw=*=mpi_mpich*" pkg-config matplotlib mamba mpich mpi4py ninja pip virtualenv conda activate warpx-cpu-mpich-dev # compile WarpX with -DWarpX_MPI=ON diff --git a/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt b/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt index d6141f0b4ab..c5ec4583da1 100644 --- a/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt +++ b/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt @@ -12,15 +12,3 @@ if(WarpX_FFT) OFF # dependency ) endif() - -if(WarpX_HEFFTE) - add_warpx_test( - test_3d_open_bc_poisson_solver_heffte # name - 3 # dims - 2 # nprocs - inputs_test_3d_open_bc_poisson_solver_heffte # inputs - analysis.py # analysis - diags/diag1000001 # output - OFF # dependency - ) -endif() diff --git a/Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver_heffte b/Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver_heffte deleted file mode 100644 index 4f0a50df037..00000000000 --- a/Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver_heffte +++ /dev/null @@ -1 +0,0 @@ -FILE = inputs_test_3d_open_bc_poisson_solver diff --git a/GNUmakefile b/GNUmakefile index 1cc78403c7b..6298dd83369 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -38,7 +38,6 @@ USE_OPENPMD = FALSE WarpxBinDir = Bin USE_FFT = FALSE -USE_HEFFTE = FALSE USE_RZ = FALSE USE_EB = FALSE diff --git a/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json b/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json index af9ab3a0bdd..80561aaa4e1 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json +++ b/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver.json @@ -1,19 +1,19 @@ { "lev=0": { - "Bx": 100915933.446046, + "Bx": 100915933.44604117, "By": 157610622.18548763, - "Bz": 2.76973993530483e-13, - "Ex": 4.725065270619211e+16, - "Ey": 3.0253948989388292e+16, + "Bz": 9.614441087794229e-14, + "Ex": 4.725065270619209e+16, + "Ey": 3.025394898938681e+16, "Ez": 3276573.9514776673, "rho": 10994013582437.193 }, "electron": { - "particle_momentum_x": 5.701277606055763e-19, - "particle_momentum_y": 3.6504516636842883e-19, + "particle_momentum_x": 5.7012776060557455e-19, + "particle_momentum_y": 3.650451663685222e-19, "particle_momentum_z": 1.145432768297242e-10, "particle_position_x": 17.314086912497864, - "particle_position_y": 0.25836912671877965, + "particle_position_y": 0.25836912671877954, "particle_position_z": 10066.329600000008, "particle_weight": 19969036501.910976 } diff --git a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp index 546326d7fe0..b142978c8be 100644 --- a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp +++ b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp @@ -8,7 +8,6 @@ #include #include -#include #include #include @@ -18,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -25,13 +25,9 @@ #include #include #include +#include #include -#if defined(ABLASTR_USE_FFT) && defined(ABLASTR_USE_HEFFTE) -#include -#endif - - namespace ablastr::fields { void @@ -42,10 +38,6 @@ computePhiIGF ( amrex::MultiFab const & rho, { using namespace amrex::literals; - BL_PROFILE_VAR_NS("ablastr::fields::computePhiIGF: FFTs", timer_ffts); - BL_PROFILE_VAR_NS("ablastr::fields::computePhiIGF: FFT plans", timer_plans); - BL_PROFILE_VAR_NS("ablastr::fields::computePhiIGF: parallel copies", timer_pcopies); - BL_PROFILE("ablastr::fields::computePhiIGF"); // Define box that encompasses the full domain @@ -53,240 +45,44 @@ computePhiIGF ( amrex::MultiFab const & rho, domain.surroundingNodes(); // get nodal points, since `phi` and `rho` are nodal domain.grow( phi.nGrowVect() ); // include guard cells - int const nx = domain.length(0); - int const ny = domain.length(1); - int const nz = domain.length(2); - - // Allocate 2x wider arrays for the convolution of rho with the Green function - amrex::Box const realspace_box = amrex::Box( - {domain.smallEnd(0), domain.smallEnd(1), domain.smallEnd(2)}, - {2*nx-1+domain.smallEnd(0), 2*ny-1+domain.smallEnd(1), 2*nz-1+domain.smallEnd(2)}, - amrex::IntVect::TheNodeVector() ); + // Do we grow the domain in the z-direction in the 2D mode? + bool const do_2d_fft = false; -#if !defined(ABLASTR_USE_HEFFTE) - // Without distributed FFTs (i.e. without heFFTe): - // allocate the 2x wider array on a single box - amrex::BoxArray const realspace_ba = amrex::BoxArray( realspace_box ); - // Define a distribution mapping for the global FFT, with only one box - amrex::DistributionMapping dm_global_fft; - dm_global_fft.define( realspace_ba ); -#elif defined(ABLASTR_USE_HEFFTE) - // With distributed FFTs (i.e. with heFFTe): - // Define a new distribution mapping which is decomposed purely along z - // and has one box per MPI rank - int const nprocs = amrex::ParallelDescriptor::NProcs(); - amrex::BoxArray realspace_ba; - amrex::DistributionMapping dm_global_fft; + int nprocs = amrex::ParallelDescriptor::NProcs(); { - int realspace_nx = realspace_box.length(0); - int realspace_ny = realspace_box.length(1); - int realspace_nz = realspace_box.length(2); - int minsize_z = realspace_nz / nprocs; - int nleft_z = realspace_nz - minsize_z*nprocs; - - AMREX_ALWAYS_ASSERT(realspace_nz >= nprocs); - // We are going to split realspace_box in such a way that the first - // nleft boxes has minsize_z+1 nodes and the others minsize - // nodes. We do it this way instead of BoxArray::maxSize to make - // sure there are exactly nprocs boxes and there are no overlaps. - amrex::BoxList bl(amrex::IndexType::TheNodeType()); - for (int iproc = 0; iproc < nprocs; ++iproc) { - int zlo, zhi; - if (iproc < nleft_z) { - zlo = iproc*(minsize_z+1); - zhi = zlo + minsize_z; - - } else { - zlo = iproc*minsize_z + nleft_z; - zhi = zlo + minsize_z - 1; - - } - amrex::Box tbx(amrex::IntVect(0,0,zlo),amrex::IntVect(realspace_nx-1,realspace_ny-1,zhi),amrex::IntVect(1)); - - tbx.shift(realspace_box.smallEnd()); - bl.push_back(tbx); - } - realspace_ba.define(std::move(bl)); - amrex::Vector pmap(nprocs); - std::iota(pmap.begin(), pmap.end(), 0); - dm_global_fft.define(std::move(pmap)); + amrex::ParmParse pp("ablastr"); + pp.queryAdd("nprocs_igf_fft", nprocs); + nprocs = std::max(1,std::min(nprocs, amrex::ParallelDescriptor::NProcs())); } -#endif - - // Allocate required arrays - amrex::MultiFab tmp_rho = amrex::MultiFab(realspace_ba, dm_global_fft, 1, 0); - tmp_rho.setVal(0); - amrex::MultiFab tmp_G = amrex::MultiFab(realspace_ba, dm_global_fft, 1, 0); - tmp_G.setVal(0); - - BL_PROFILE_VAR_START(timer_pcopies); - // Copy from rho to tmp_rho - tmp_rho.ParallelCopy( rho, 0, 0, 1, amrex::IntVect::TheZeroVector(), amrex::IntVect::TheZeroVector() ); - BL_PROFILE_VAR_STOP(timer_pcopies); - -#if !defined(ABLASTR_USE_HEFFTE) - // Without distributed FFTs (i.e. without heFFTe): - // We loop over the original box (not the 2x wider one), and the other quadrants by periodicity - amrex::BoxArray const& igf_compute_box = amrex::BoxArray( domain ); -#else - // With distributed FFTs (i.e. with heFFTe): - // We loop over the full 2x wider box, since 1 MPI rank does not necessarily own the data for the other quadrants - amrex::BoxArray const& igf_compute_box = tmp_G.boxArray(); -#endif - - // Compute the integrated Green function -#ifdef AMREX_USE_OMP -#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) -#endif - for (amrex::MFIter mfi(igf_compute_box, dm_global_fft, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { - - amrex::Box const bx = mfi.tilebox(); - amrex::IntVect const lo = realspace_box.smallEnd(); - amrex::IntVect const hi = realspace_box.bigEnd(); - - // Fill values of the Green function - amrex::Real const dx = cell_size[0]; - amrex::Real const dy = cell_size[1]; - amrex::Real const dz = cell_size[2]; - - amrex::Array4 const tmp_G_arr = tmp_G.array(mfi); - amrex::ParallelFor( bx, - [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept - { - int const i0 = i - lo[0]; - int const j0 = j - lo[1]; - int const k0 = k - lo[2]; - amrex::Real const x = i0*dx; - amrex::Real const y = j0*dy; - amrex::Real const z = k0*dz; - -#if !defined(ABLASTR_USE_HEFFTE) - // Without distributed FFTs (i.e. without heFFTe): - amrex::Real const G_value = SumOfIntegratedPotential(x , y , z , dx, dy, dz); - tmp_G_arr(i,j,k) = G_value; - // Fill the rest of the array by periodicity - if (i0>0) {tmp_G_arr(hi[0]+1-i0, j , k ) = G_value;} - if (j0>0) {tmp_G_arr(i , hi[1]+1-j0, k ) = G_value;} - if (k0>0) {tmp_G_arr(i , j , hi[2]+1-k0) = G_value;} - if ((i0>0)&&(j0>0)) {tmp_G_arr(hi[0]+1-i0, hi[1]+1-j0, k ) = G_value;} - if ((j0>0)&&(k0>0)) {tmp_G_arr(i , hi[1]+1-j0, hi[2]+1-k0) = G_value;} - if ((i0>0)&&(k0>0)) {tmp_G_arr(hi[0]+1-i0, j , hi[2]+1-k0) = G_value;} - if ((i0>0)&&(j0>0)&&(k0>0)) {tmp_G_arr(hi[0]+1-i0, hi[1]+1-j0, hi[2]+1-k0) = G_value;} -#else - // With distributed FFTs (i.e. with heFFTe): - amrex::Real x_hi = dx*(hi[0]+2); - amrex::Real y_hi = dy*(hi[1]+2); - amrex::Real z_hi = dz*(hi[2]+2); - if ((i0< nx)&&(j0< ny)&&(k0< nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x , y , z , dx, dy, dz); } - if ((i0< nx)&&(j0> ny)&&(k0< nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x , y_hi-y, z , dx, dy, dz); } - if ((i0< nx)&&(j0< ny)&&(k0> nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x , y , z_hi-z, dx, dy, dz); } - if ((i0> nx)&&(j0> ny)&&(k0< nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x_hi-x, y_hi-y, z , dx, dy, dz); } - if ((i0< nx)&&(j0> ny)&&(k0> nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x , y_hi-y, z_hi-z, dx, dy, dz); } - if ((i0> nx)&&(j0< ny)&&(k0> nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x_hi-x, y , z_hi-z, dx, dy, dz); } - if ((i0> nx)&&(j0> ny)&&(k0> nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x_hi-x, y_hi-y, z_hi-z, dx, dy, dz); } - if ((i0> nx)&&(j0< ny)&&(k0< nz)) { tmp_G_arr(i,j,k) = SumOfIntegratedPotential(x_hi-x, y , z , dx, dy, dz); } -#endif - } - ); + static std::unique_ptr> obc_solver; + if (!obc_solver) { + amrex::ExecOnFinalize([&] () { obc_solver.reset(); }); } - - // Prepare to perform global FFT - // Since there is 1 MPI rank per box, here each MPI rank obtains its local box and the associated boxid - const int local_boxid = amrex::ParallelDescriptor::MyProc(); // because of how we made the DistributionMapping - if (local_boxid < realspace_ba.size()) { - // When not using heFFTe, there is only one box (the global box) - // It is taken care of my MPI rank 0 ; other ranks have no work (hence the if condition) - - const amrex::Box local_nodal_box = realspace_ba[local_boxid]; - amrex::Box local_box(local_nodal_box.smallEnd(), local_nodal_box.bigEnd()); - local_box.shift(-realspace_box.smallEnd()); // This simplifies the setup because the global lo is zero now - // Since we the domain decompostion is in the z-direction, setting up c_local_box is simple. - amrex::Box c_local_box = local_box; - c_local_box.setBig(0, local_box.length(0)/2+1); - - // Allocate array in spectral space - using SpectralField = amrex::BaseFab< amrex::GpuComplex< amrex::Real > > ; - SpectralField tmp_rho_fft(c_local_box, 1, amrex::The_Device_Arena()); - SpectralField tmp_G_fft(c_local_box, 1, amrex::The_Device_Arena()); - tmp_rho_fft.shift(realspace_box.smallEnd()); - tmp_G_fft.shift(realspace_box.smallEnd()); - - // Create FFT plans - BL_PROFILE_VAR_START(timer_plans); -#if !defined(ABLASTR_USE_HEFFTE) - const amrex::IntVect fft_size = realspace_ba[local_boxid].length(); - ablastr::math::anyfft::FFTplan forward_plan_rho = ablastr::math::anyfft::CreatePlan( - fft_size, tmp_rho[local_boxid].dataPtr(), - reinterpret_cast(tmp_rho_fft.dataPtr()), - ablastr::math::anyfft::direction::R2C, AMREX_SPACEDIM); - ablastr::math::anyfft::FFTplan forward_plan_G = ablastr::math::anyfft::CreatePlan( - fft_size, tmp_G[local_boxid].dataPtr(), - reinterpret_cast(tmp_G_fft.dataPtr()), - ablastr::math::anyfft::direction::R2C, AMREX_SPACEDIM); - ablastr::math::anyfft::FFTplan backward_plan = ablastr::math::anyfft::CreatePlan( - fft_size, tmp_G[local_boxid].dataPtr(), - reinterpret_cast( tmp_G_fft.dataPtr()), - ablastr::math::anyfft::direction::C2R, AMREX_SPACEDIM); -#elif defined(ABLASTR_USE_HEFFTE) -#if defined(AMREX_USE_CUDA) - heffte::fft3d_r2c fft -#elif defined(AMREX_USE_HIP) - heffte::fft3d_r2c fft -#else - heffte::fft3d_r2c fft -#endif - ({{local_box.smallEnd(0), local_box.smallEnd(1), local_box.smallEnd(2)}, - {local_box.bigEnd(0), local_box.bigEnd(1), local_box.bigEnd(2)}}, - {{c_local_box.smallEnd(0), c_local_box.smallEnd(1), c_local_box.smallEnd(2)}, - {c_local_box.bigEnd(0), c_local_box.bigEnd(1), c_local_box.bigEnd(2)}}, - 0, amrex::ParallelDescriptor::Communicator()); - using heffte_complex = typename heffte::fft_output::type; - heffte_complex* rho_fft_data = (heffte_complex*) tmp_rho_fft.dataPtr(); - heffte_complex* G_fft_data = (heffte_complex*) tmp_G_fft.dataPtr(); -#endif - BL_PROFILE_VAR_STOP(timer_plans); - - // Perform forward FFTs - BL_PROFILE_VAR_START(timer_ffts); -#if !defined(ABLASTR_USE_HEFFTE) - ablastr::math::anyfft::Execute(forward_plan_rho); - ablastr::math::anyfft::Execute(forward_plan_G); -#elif defined(ABLASTR_USE_HEFFTE) - fft.forward(tmp_rho[local_boxid].dataPtr(), rho_fft_data); - fft.forward(tmp_G[local_boxid].dataPtr(), G_fft_data); -#endif - BL_PROFILE_VAR_STOP(timer_ffts); - - // Multiply tmp_G_fft and tmp_rho_fft in spectral space - // Store the result in-place in Gtmp_G_fft, to save memory - tmp_G_fft.template mult(tmp_rho_fft, 0, 0, 1); - amrex::Gpu::streamSynchronize(); - - // Perform backward FFT - BL_PROFILE_VAR_START(timer_ffts); -#if !defined(ABLASTR_USE_HEFFTE) - ablastr::math::anyfft::Execute(backward_plan); -#elif defined(ABLASTR_USE_HEFFTE) - fft.backward(G_fft_data, tmp_G[local_boxid].dataPtr()); -#endif - BL_PROFILE_VAR_STOP(timer_ffts); - -#if !defined(ABLASTR_USE_HEFFTE) - // Loop to destroy FFT plans - ablastr::math::anyfft::DestroyPlan(forward_plan_G); - ablastr::math::anyfft::DestroyPlan(forward_plan_rho); - ablastr::math::anyfft::DestroyPlan(backward_plan); -#endif + if (!obc_solver || obc_solver->Domain() != domain) { + amrex::FFT::Info info{}; + if (do_2d_fft) { info.setBatchMode(true); } + info.setNumProcs(nprocs); + obc_solver = std::make_unique>(domain, info); } - // Normalize, since (FFT + inverse FFT) results in a factor N - const amrex::Real normalization = 1._rt / realspace_box.numPts(); - tmp_G.mult( normalization ); - - BL_PROFILE_VAR_START(timer_pcopies); - // Copy from tmp_G to phi - phi.ParallelCopy( tmp_G, 0, 0, 1, amrex::IntVect::TheZeroVector(), phi.nGrowVect()); - BL_PROFILE_VAR_STOP(timer_pcopies); + auto const& lo = domain.smallEnd(); + amrex::Real const dx = cell_size[0]; + amrex::Real const dy = cell_size[1]; + amrex::Real const dz = cell_size[2]; + + obc_solver->setGreensFunction( + [=] AMREX_GPU_DEVICE (int i, int j, int k) -> amrex::Real + { + int const i0 = i - lo[0]; + int const j0 = j - lo[1]; + int const k0 = k - lo[2]; + amrex::Real const x = i0*dx; + amrex::Real const y = j0*dy; + amrex::Real const z = k0*dz; + return SumOfIntegratedPotential(x, y, z, dx, dy, dz); + }); + + obc_solver->solve(phi, rho); } } // namespace ablastr::fields diff --git a/Tools/machines/desktop/spack-macos-openmp.yaml b/Tools/machines/desktop/spack-macos-openmp.yaml index 3ea78625b78..820cf7069fd 100644 --- a/Tools/machines/desktop/spack-macos-openmp.yaml +++ b/Tools/machines/desktop/spack-macos-openmp.yaml @@ -23,7 +23,6 @@ spack: - conduit ~fortran - fftw - hdf5 ~fortran - - heffte ~cuda +fftw - lapackpp ~cuda ~rocm ^blaspp ~cuda +openmp ~rocm - mpi - llvm-openmp diff --git a/Tools/machines/desktop/spack-ubuntu-cuda.yaml b/Tools/machines/desktop/spack-ubuntu-cuda.yaml index 19b9ae12e24..08d0c95ee4b 100644 --- a/Tools/machines/desktop/spack-ubuntu-cuda.yaml +++ b/Tools/machines/desktop/spack-ubuntu-cuda.yaml @@ -25,7 +25,6 @@ spack: - cuda - fftw - hdf5 - - heffte - lapackpp - mpi - pkgconfig diff --git a/Tools/machines/desktop/spack-ubuntu-openmp.yaml b/Tools/machines/desktop/spack-ubuntu-openmp.yaml index 1eb7d4074a7..b658f1e009d 100644 --- a/Tools/machines/desktop/spack-ubuntu-openmp.yaml +++ b/Tools/machines/desktop/spack-ubuntu-openmp.yaml @@ -22,7 +22,6 @@ spack: - ecp-data-vis-sdk +adios2 +ascent +hdf5 +sensei - fftw - hdf5 - - heffte ~cuda +fftw - lapackpp ~cuda ~rocm ^blaspp ~cuda +openmp ~rocm - mpi - pkgconfig diff --git a/Tools/machines/desktop/spack-ubuntu-rocm.yaml b/Tools/machines/desktop/spack-ubuntu-rocm.yaml index 7eee1baa13c..45c9b0f776e 100644 --- a/Tools/machines/desktop/spack-ubuntu-rocm.yaml +++ b/Tools/machines/desktop/spack-ubuntu-rocm.yaml @@ -21,7 +21,6 @@ spack: - cmake - ecp-data-vis-sdk +adios2 +ascent +hdf5 +sensei - hdf5 - - heffte - hip - lapackpp - llvm-amdgpu diff --git a/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh b/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh index cd29664a978..fd3a2d3f756 100755 --- a/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh +++ b/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh @@ -96,45 +96,6 @@ CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S $HOME/src/lapackpp -B ${build_dir}/lap cmake --build ${build_dir}/lapackpp-a100-build --target install --parallel 16 rm -rf ${build_dir}/lapackpp-a100-build -# heFFTe -if [ -d $HOME/src/heffte ] -then - cd $HOME/src/heffte - git fetch --prune - git checkout v2.4.0 - cd - -else - git clone -b v2.4.0 https://github.com/icl-utk-edu/heffte.git ${HOME}/src/heffte -fi -rm -rf ${HOME}/src/heffte-a100-build -cmake \ - -S ${HOME}/src/heffte \ - -B ${build_dir}/heffte-a100-build \ - -DBUILD_SHARED_LIBS=ON \ - -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_CXX_STANDARD=17 \ - -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON \ - -DCMAKE_INSTALL_PREFIX=${SW_DIR}/heffte-2.4.0 \ - -DHeffte_DISABLE_GPU_AWARE_MPI=OFF \ - -DHeffte_ENABLE_AVX=OFF \ - -DHeffte_ENABLE_AVX512=OFF \ - -DHeffte_ENABLE_FFTW=OFF \ - -DHeffte_ENABLE_CUDA=ON \ - -DHeffte_ENABLE_ROCM=OFF \ - -DHeffte_ENABLE_ONEAPI=OFF \ - -DHeffte_ENABLE_MKL=OFF \ - -DHeffte_ENABLE_DOXYGEN=OFF \ - -DHeffte_SEQUENTIAL_TESTING=OFF \ - -DHeffte_ENABLE_TESTING=OFF \ - -DHeffte_ENABLE_TRACING=OFF \ - -DHeffte_ENABLE_PYTHON=OFF \ - -DHeffte_ENABLE_FORTRAN=OFF \ - -DHeffte_ENABLE_SWIG=OFF \ - -DHeffte_ENABLE_MAGMA=OFF -cmake --build ${build_dir}/heffte-a100-build --target install --parallel 16 -rm -rf ${build_dir}/heffte-a100-build - - # Python ###################################################################### # python3 -m pip install --upgrade pip diff --git a/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example b/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example index 148299f281c..57c98da9b4a 100644 --- a/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example +++ b/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example @@ -20,13 +20,11 @@ export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.1:${CMAKE_PREFIX_PATH} export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.8.3:${CMAKE_PREFIX_PATH} export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.05.31:${CMAKE_PREFIX_PATH} export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.05.31:${CMAKE_PREFIX_PATH} -export CMAKE_PREFIX_PATH=${SW_DIR}/heffte-2.4.0:${CMAKE_PREFIX_PATH} export LD_LIBRARY_PATH=${SW_DIR}/c-blosc-1.21.1/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.8.3/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${SW_DIR}/heffte-2.4.0/lib64:$LD_LIBRARY_PATH export PATH=${SW_DIR}/adios2-2.8.3/bin:${PATH} diff --git a/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh b/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh index 437300b8303..7608cb3f666 100755 --- a/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh +++ b/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh @@ -107,45 +107,6 @@ CXX=$(which CC) CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S $HOME/src/lapackpp -B cmake --build ${build_dir}/lapackpp-pm-cpu-build --target install --parallel 16 rm -rf ${build_dir}/lapackpp-pm-cpu-build -# heFFTe -if [ -d $HOME/src/heffte ] -then - cd $HOME/src/heffte - git fetch --prune - git checkout v2.4.0 - cd - -else - git clone -b v2.4.0 https://github.com/icl-utk-edu/heffte.git ${HOME}/src/heffte -fi -rm -rf ${HOME}/src/heffte-pm-cpu-build -cmake \ - -S ${HOME}/src/heffte \ - -B ${build_dir}/heffte-pm-cpu-build \ - -DBUILD_SHARED_LIBS=ON \ - -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_CXX_STANDARD=17 \ - -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON \ - -DCMAKE_INSTALL_PREFIX=${SW_DIR}/heffte-2.4.0 \ - -DHeffte_DISABLE_GPU_AWARE_MPI=ON \ - -DHeffte_ENABLE_AVX=ON \ - -DHeffte_ENABLE_AVX512=OFF \ - -DHeffte_ENABLE_FFTW=ON \ - -DHeffte_ENABLE_CUDA=OFF \ - -DHeffte_ENABLE_ROCM=OFF \ - -DHeffte_ENABLE_ONEAPI=OFF \ - -DHeffte_ENABLE_MKL=OFF \ - -DHeffte_ENABLE_DOXYGEN=OFF \ - -DHeffte_SEQUENTIAL_TESTING=OFF \ - -DHeffte_ENABLE_TESTING=OFF \ - -DHeffte_ENABLE_TRACING=OFF \ - -DHeffte_ENABLE_PYTHON=OFF \ - -DHeffte_ENABLE_FORTRAN=OFF \ - -DHeffte_ENABLE_SWIG=OFF \ - -DHeffte_ENABLE_MAGMA=OFF -cmake --build ${build_dir}/heffte-pm-cpu-build --target install --parallel 16 -rm -rf ${build_dir}/heffte-pm-cpu-build - - # Python ###################################################################### # python3 -m pip install --upgrade pip diff --git a/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh b/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh index c77f075a3a8..d08ca7457d4 100755 --- a/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh +++ b/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh @@ -107,49 +107,6 @@ CXX=$(which CC) CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S $HOME/src/lapackpp -B cmake --build ${build_dir}/lapackpp-pm-gpu-build --target install --parallel 16 rm -rf ${build_dir}/lapackpp-pm-gpu-build -# heFFTe -if [ -d $HOME/src/heffte ] -then - cd $HOME/src/heffte - git fetch --prune - git checkout v2.4.0 - cd - -else - git clone -b v2.4.0 https://github.com/icl-utk-edu/heffte.git ${HOME}/src/heffte -fi -rm -rf ${HOME}/src/heffte-pm-gpu-build -cmake \ - -S ${HOME}/src/heffte \ - -B ${build_dir}/heffte-pm-gpu-build \ - -DBUILD_SHARED_LIBS=ON \ - -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_CXX_STANDARD=17 \ - -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON \ - -DCMAKE_INSTALL_PREFIX=${SW_DIR}/heffte-2.4.0 \ - -DHeffte_DISABLE_GPU_AWARE_MPI=OFF \ - -DHeffte_ENABLE_AVX=OFF \ - -DHeffte_ENABLE_AVX512=OFF \ - -DHeffte_ENABLE_FFTW=OFF \ - -DHeffte_ENABLE_CUDA=ON \ - -DHeffte_ENABLE_ROCM=OFF \ - -DHeffte_ENABLE_ONEAPI=OFF \ - -DHeffte_ENABLE_MKL=OFF \ - -DHeffte_ENABLE_DOXYGEN=OFF \ - -DHeffte_SEQUENTIAL_TESTING=OFF \ - -DHeffte_ENABLE_TESTING=OFF \ - -DHeffte_ENABLE_TRACING=OFF \ - -DHeffte_ENABLE_PYTHON=OFF \ - -DHeffte_ENABLE_FORTRAN=OFF \ - -DHeffte_ENABLE_SWIG=OFF \ - -DHeffte_ENABLE_MAGMA=OFF -cmake --build ${build_dir}/heffte-pm-gpu-build --target install --parallel 16 -rm -rf ${build_dir}/heffte-pm-gpu-build - -# work-around for heFFTe 2.4.0 bug with NVCC -# https://github.com/icl-utk-edu/heffte/pull/54 -sed -i 's/__AVX__/NOTDEFINED_DONOTUSE/g' ${SW_DIR}/heffte-2.4.0/include/stock_fft/heffte_stock_vec_types.h - - # Python ###################################################################### # python3 -m pip install --upgrade pip diff --git a/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example b/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example index 94d598abf5b..99817924ad6 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example +++ b/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example @@ -19,13 +19,11 @@ export CMAKE_PREFIX_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/c-blosc-1.21.1 export CMAKE_PREFIX_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/adios2-2.8.3:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/blaspp-2024.05.31:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/lapackpp-2024.05.31:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/heffte-2.4.0:$CMAKE_PREFIX_PATH export LD_LIBRARY_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/c-blosc-1.21.1/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/adios2-2.8.3/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/heffte-2.4.0/lib64:$LD_LIBRARY_PATH export PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/adios2-2.8.3/bin:${PATH} diff --git a/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example b/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example index da1d55964d1..1e5325e29b9 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example +++ b/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example @@ -23,13 +23,11 @@ export CMAKE_PREFIX_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/c-blosc-1.2 export CMAKE_PREFIX_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/adios2-2.8.3:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/blaspp-2024.05.31:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/lapackpp-2024.05.31:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/heffte-2.4.0:$CMAKE_PREFIX_PATH export LD_LIBRARY_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/c-blosc-1.21.1/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/adios2-2.8.3/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/heffte-2.4.0/lib64:$LD_LIBRARY_PATH export PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/adios2-2.8.3/bin:${PATH} diff --git a/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh b/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh index 7e002838e4a..95633549698 100644 --- a/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh +++ b/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh @@ -143,48 +143,6 @@ cmake \ --parallel ${build_procs} rm -rf ${build_dir}/lapackpp-tioga-mi300a-build -# heFFTe -if [ -d ${SRC_DIR}/heffte ] -then - cd ${SRC_DIR}/heffte - git fetch --prune - git checkout v2.4.0 - cd - -else - git clone -b v2.4.0 https://github.com/icl-utk-edu/heffte.git ${SRC_DIR}/heffte -fi -cmake \ - --fresh \ - -S ${SRC_DIR}/heffte \ - -B ${build_dir}/heffte-build \ - -DBUILD_SHARED_LIBS=ON \ - -DCMAKE_BUILD_TYPE=Release \ - -DCMAKE_CXX_STANDARD=17 \ - -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON \ - -DCMAKE_INSTALL_PREFIX=${SW_DIR}/heffte-2.4.0 \ - -DHeffte_DISABLE_GPU_AWARE_MPI=OFF \ - -DHeffte_ENABLE_AVX=OFF \ - -DHeffte_ENABLE_AVX512=OFF \ - -DHeffte_ENABLE_FFTW=OFF \ - -DHeffte_ENABLE_CUDA=OFF \ - -DHeffte_ENABLE_ROCM=ON \ - -DHeffte_ENABLE_ONEAPI=OFF \ - -DHeffte_ENABLE_MKL=OFF \ - -DHeffte_ENABLE_DOXYGEN=OFF \ - -DHeffte_SEQUENTIAL_TESTING=OFF \ - -DHeffte_ENABLE_TESTING=OFF \ - -DHeffte_ENABLE_TRACING=OFF \ - -DHeffte_ENABLE_PYTHON=OFF \ - -DHeffte_ENABLE_FORTRAN=OFF \ - -DHeffte_ENABLE_SWIG=OFF \ - -DHeffte_ENABLE_MAGMA=OFF -cmake \ - --build ${build_dir}/heffte-build \ - --target install \ - --parallel ${build_procs} -rm -rf ${build_dir}/heffte-build - - # Python ###################################################################### # # sometimes, the Lassen PIP Index is down diff --git a/Tools/machines/tioga-llnl/tioga_mi300a_warpx.profile.example b/Tools/machines/tioga-llnl/tioga_mi300a_warpx.profile.example index e3da37c5522..53fe21844c1 100644 --- a/Tools/machines/tioga-llnl/tioga_mi300a_warpx.profile.example +++ b/Tools/machines/tioga-llnl/tioga_mi300a_warpx.profile.example @@ -31,13 +31,11 @@ export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-2.15.1:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.10.1:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.05.31:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.05.31:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${SW_DIR}/heffte-2.4.0:$CMAKE_PREFIX_PATH export LD_LIBRARY_PATH=${SW_DIR}/c-blosc-2.15.1/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.10.1/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${SW_DIR}/heffte-2.4.0/lib64:$LD_LIBRARY_PATH export PATH=${SW_DIR}/adios2-2.10.1/bin:${PATH} diff --git a/cmake/WarpXFunctions.cmake b/cmake/WarpXFunctions.cmake index 43efd89efc5..543d0cd0ce4 100644 --- a/cmake/WarpXFunctions.cmake +++ b/cmake/WarpXFunctions.cmake @@ -313,10 +313,6 @@ function(set_warpx_binary_name D) set_property(TARGET ${tgt} APPEND_STRING PROPERTY OUTPUT_NAME ".FFT") endif() - if(WarpX_HEFFTE) - set_property(TARGET ${tgt} APPEND_STRING PROPERTY OUTPUT_NAME ".HEFFTE") - endif() - if(WarpX_EB) set_property(TARGET ${tgt} APPEND_STRING PROPERTY OUTPUT_NAME ".EB") endif() @@ -462,7 +458,6 @@ function(warpx_print_summary) message(" PARTICLE PRECISION: ${WarpX_PARTICLE_PRECISION}") message(" PRECISION: ${WarpX_PRECISION}") message(" FFT Solvers: ${WarpX_FFT}") - message(" heFFTe: ${WarpX_HEFFTE}") message(" PYTHON: ${WarpX_PYTHON}") if(WarpX_PYTHON) message(" PYTHON IPO: ${WarpX_PYTHON_IPO}") diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index e1072d03014..491e333d712 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -51,6 +51,12 @@ macro(find_amrex) set(AMReX_OMP OFF CACHE INTERNAL "") endif() + if(WarpX_FFT) + set(AMReX_FFT ON CACHE INTERNAL "") + else() + set(AMReX_FFT OFF CACHE INTERNAL "") + endif() + if(WarpX_EB) set(AMReX_EB ON CACHE INTERNAL "") else() @@ -243,6 +249,11 @@ macro(find_amrex) foreach(D IN LISTS WarpX_amrex_dim) set(COMPONENT_DIMS ${COMPONENT_DIMS} ${D}D) endforeach() + if(WarpX_FFT) + set(COMPONENT_FFT FFT) + else() + set(COMPONENT_FFT) + endif() if(WarpX_EB) set(COMPONENT_EB EB) else() @@ -260,7 +271,7 @@ macro(find_amrex) endif() set(COMPONENT_PRECISION ${WarpX_PRECISION} P${WarpX_PARTICLE_PRECISION}) - find_package(AMReX 24.11 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) + find_package(AMReX 456c93c7d9512f1cdffac0574973d7df41417898 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} ${COMPONENT_FFT} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) # note: TINYP skipped because user-configured and optional # AMReX CMake helper scripts @@ -283,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "4b703fec6c2ff983e465c8cef0cc4947231edb07" +set(WarpX_amrex_branch "456c93c7d9512f1cdffac0574973d7df41417898" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/setup.py b/setup.py index fc99b75f2f0..cdb8a6d844e 100644 --- a/setup.py +++ b/setup.py @@ -105,7 +105,6 @@ def build_extension(self, ext): "-DWarpX_PRECISION=" + WARPX_PRECISION, "-DWarpX_PARTICLE_PRECISION=" + WARPX_PARTICLE_PRECISION, "-DWarpX_FFT:BOOL=" + WARPX_FFT, - "-DWarpX_HEFFTE:BOOL=" + WARPX_HEFFTE, "-DWarpX_PYTHON:BOOL=ON", "-DWarpX_PYTHON_IPO:BOOL=" + WARPX_PYTHON_IPO, "-DWarpX_QED:BOOL=" + WARPX_QED, @@ -208,7 +207,6 @@ def build_extension(self, ext): WARPX_PRECISION = env.pop("WARPX_PRECISION", "DOUBLE") WARPX_PARTICLE_PRECISION = env.pop("WARPX_PARTICLE_PRECISION", WARPX_PRECISION) WARPX_FFT = env.pop("WARPX_FFT", "OFF") -WARPX_HEFFTE = env.pop("WARPX_HEFFTE", "OFF") WARPX_QED = env.pop("WARPX_QED", "ON") WARPX_QED_TABLE_GEN = env.pop("WARPX_QED_TABLE_GEN", "OFF") WARPX_DIMS = env.pop("WARPX_DIMS", "1;2;RZ;3") From 55653b33f29508b7e402fb15be635cf5c760bc29 Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 18 Nov 2024 11:05:59 -0800 Subject: [PATCH 094/278] Update license to explicitly list LLNL (#5461) --- LICENSE.txt | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/LICENSE.txt b/LICENSE.txt index 2965985ebb1..ba0df767288 100644 --- a/LICENSE.txt +++ b/LICENSE.txt @@ -14,10 +14,11 @@ this list of conditions and the following disclaimer. notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. -(3) Neither the name of the University of California, Lawrence Berkeley -National Laboratory, U.S. Dept. of Energy nor the names of its contributors -may be used to endorse or promote products derived from this software -without specific prior written permission. +(3) Neither the name of the University of California, +Lawrence Berkeley National Laboratory, Lawrence Livermore National Security, +Lawrence Livermore National Laboratory, U.S. Dept. of Energy nor the names of +its contributors may be used to endorse or promote products derived from this +software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" From c9c8f2c0dac08a756346e1d74ddf86a58536620a Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 18 Nov 2024 11:38:20 -0800 Subject: [PATCH 095/278] AMReX/pyAMReX/PICSAR: weekly update (#5468) - Weekly update to latest AMReX: ```console ./Tools/Release/updateAMReX.py ``` - Weekly update to latest pyAMReX: ```console ./Tools/Release/updatepyAMReX.py ``` - Weekly update to latest PICSAR (no changes): ```console ./Tools/Release/updatePICSAR.py ``` --- cmake/dependencies/pyAMReX.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index 1dbd5e9fde6..8e0e26e55db 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "24.11" +set(WarpX_pyamrex_branch "66fc71fecf77eee903e9c60100f1243f9e157744" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") From 6606083088bc0fea6c7c9de9822996b05bcb35dc Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 18 Nov 2024 15:54:22 -0800 Subject: [PATCH 096/278] Docs: update list of TC members (#5465) I think we forgot to update the list of TC members available in the GitHub repository and in the online documentation. --- GOVERNANCE.rst | 2 ++ 1 file changed, 2 insertions(+) diff --git a/GOVERNANCE.rst b/GOVERNANCE.rst index 588e8b2df6e..f0efb350213 100644 --- a/GOVERNANCE.rst +++ b/GOVERNANCE.rst @@ -54,7 +54,9 @@ Technical Committee Current Roster ^^^^^^^^^^^^^^ +- Justin Ray Angus - Luca Fedeli +- Arianna Formenti - Roelof Groenewald - David Grote - Axel Huebl From ccef6f8e7793247a213d0de48571c7bfec6be1e8 Mon Sep 17 00:00:00 2001 From: Marco Garten Date: Mon, 18 Nov 2024 16:04:50 -0800 Subject: [PATCH 097/278] Documentation: Clarify reduced diagnostics (#5462) This commit aims to fix a potential misunderstanding that reduced diagnostics are not for production purposes by emphasizing that "reduced" indicates in-situ reduction operations. Additionally, not all reduced diagnostics are text files anymore. Hence, the docs are being adjusted by this PR. --- Docs/source/usage/parameters.rst | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 7e513f4484d..31c3ca947fb 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2680,7 +2680,7 @@ WarpX has five types of diagnostics: ``TimeAveraged`` diagnostics only allow field data, which they output after averaging over a period of time, ``BackTransformed`` diagnostics are used when running a simulation in a boosted frame, to reconstruct output data to the lab frame, ``BoundaryScraping`` diagnostics are used to collect the particles that are absorbed at the boundary, throughout the simulation, and -``ReducedDiags`` allow the user to compute some reduced quantity (particle temperature, max of a field) and write a small amount of data to text files. +``ReducedDiags`` enable users to compute specific reduced quantities, such as particle temperature, energy histograms, or maximum field values, and efficiently save this in-situ analyzed data to files. Similar to what is done for physical species, WarpX has a class Diagnostics that allows users to initialize different diagnostics, each of them with different fields, resolution and period. This currently applies to standard diagnostics, but should be extended to back-transformed diagnostics and reduced diagnostics (and others) in a near future. @@ -3064,15 +3064,14 @@ In addition to their usual attributes, the saved particles have Reduced Diagnostics ^^^^^^^^^^^^^^^^^^^ -``ReducedDiags`` allow the user to compute some reduced quantity (particle temperature, max of a field) and write a small amount of data to text files. +``ReducedDiags`` enable users to compute specific reduced quantities, such as particle temperature, energy histograms, or maximum field values, and efficiently save this in-situ analyzed data to files. +This shifts analysis from post-processing to runtime calculation of reduction operations (average, maximum, ...) and can greatly save disk space when "raw" particle and field outputs from `FullDiagnostics` can be avoided in favor of single values, 1D or 2D data at possibly even higher time resolution. * ``warpx.reduced_diags_names`` (`strings`, separated by spaces) - The names given by the user of simple reduced diagnostics. - Also the names of the output `.txt` files. - This reduced diagnostics aims to produce simple outputs - of the time history of some physical quantities. + A list of user-given names for reduced diagnostics. + By default, these names are also prefixing the names of output files. If ``warpx.reduced_diags_names`` is not provided in the input file, - no reduced diagnostics will be done. + no reduced diagnostics will be activated during the run. This is then used in the rest of the input deck; in this documentation we use ```` as a placeholder. @@ -3080,7 +3079,7 @@ Reduced Diagnostics The type of reduced diagnostics associated with this ````. For example, ``ParticleEnergy``, ``FieldEnergy``, etc. All available types are described below in detail. - For all reduced diagnostics, + For all reduced diagnostics that are writing tabular data into text files, the first and the second columns in the output file are the time step and the corresponding physical time in seconds, respectively. From 4739e10a3c22f3b870755bebcf40dfbf51d0e984 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 19 Nov 2024 00:57:23 +0000 Subject: [PATCH 098/278] [pre-commit.ci] pre-commit autoupdate (#5470) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.3 → v0.7.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.3...v0.7.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d9a0a8bfdea..16a23ada3b5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.3 + rev: v0.7.4 hooks: # Run the linter - id: ruff From a1a677e3a4ff9efcedbbd648e455d958e555132f Mon Sep 17 00:00:00 2001 From: Justin Ray Angus Date: Mon, 25 Nov 2024 10:50:02 -0800 Subject: [PATCH 099/278] Split Bfield advance in two for SemiImplicit_EM evolve (#5483) This PR splits the BField advance for the SemiImplicit_EM evolve scheme into two half dt advances, consistent with the explicit evolve scheme. This makes it such that the B field at write time is at the same instance in time as the electric field, as stated in the documentation. --- .../test_1d_semi_implicit_picard.json | 4 ++-- .../FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp | 12 ++++++++---- .../FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp | 10 ++-------- Source/WarpX.H | 1 - 4 files changed, 12 insertions(+), 15 deletions(-) diff --git a/Regression/Checksum/benchmarks_json/test_1d_semi_implicit_picard.json b/Regression/Checksum/benchmarks_json/test_1d_semi_implicit_picard.json index 2c9859b037d..758db4355ec 100644 --- a/Regression/Checksum/benchmarks_json/test_1d_semi_implicit_picard.json +++ b/Regression/Checksum/benchmarks_json/test_1d_semi_implicit_picard.json @@ -1,7 +1,7 @@ { "lev=0": { - "Bx": 3559.0541122456157, - "By": 1685.942868827529, + "Bx": 3625.566538877196, + "By": 1684.1769211109035, "Bz": 0.0, "Ex": 796541204346.5195, "Ey": 961740397927.6577, diff --git a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp index 117c3baecaa..f558b3d9756 100644 --- a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp @@ -61,7 +61,7 @@ void SemiImplicitEM::OneStep ( amrex::Real a_time, // Set the member time step m_dt = a_dt; - // Fields have Eg^{n}, Bg^{n-1/2} + // Fields have Eg^{n}, Bg^{n} // Particles have up^{n} and xp^{n}. // Save up and xp at the start of the time step @@ -70,9 +70,9 @@ void SemiImplicitEM::OneStep ( amrex::Real a_time, // Save Eg at the start of the time step m_Eold.Copy( FieldType::Efield_fp ); - // Advance WarpX owned Bfield_fp to t_{n+1/2} - m_WarpX->EvolveB(m_dt, DtType::Full); - m_WarpX->ApplyMagneticFieldBCs(); + // Advance WarpX owned Bfield_fp from t_{n} to t_{n+1/2} + m_WarpX->EvolveB(0.5_rt*m_dt, DtType::FirstHalf); + m_WarpX->FillBoundaryB(m_WarpX->getngEB(), true); const amrex::Real half_time = a_time + 0.5_rt*m_dt; @@ -92,6 +92,10 @@ void SemiImplicitEM::OneStep ( amrex::Real a_time, m_E.linComb( 2._rt, m_E, -1._rt, m_Eold ); m_WarpX->SetElectricFieldAndApplyBCs( m_E ); + // Advance WarpX owned Bfield_fp from t_{n+1/2} to t_{n+1} + m_WarpX->EvolveB(0.5_rt*m_dt, DtType::SecondHalf); + m_WarpX->FillBoundaryB(m_WarpX->getngEB(), true); + } void SemiImplicitEM::ComputeRHS ( WarpXSolverVec& a_RHS, diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp index fe854881ea3..b1872ab7dba 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp +++ b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp @@ -101,7 +101,7 @@ WarpX::UpdateMagneticFieldAndApplyBCs( ablastr::fields::MultiLevelVectorField co amrex::MultiFab::Copy(*Bfp[2], *a_Bn[lev][2], 0, 0, ncomps, a_Bn[lev][2]->nGrowVect()); } EvolveB(a_thetadt, DtType::Full); - ApplyMagneticFieldBCs(); + FillBoundaryB(guard_cells.ng_alloc_EB, WarpX::sync_nodal_points); } void @@ -111,14 +111,8 @@ WarpX::FinishMagneticFieldAndApplyBCs( ablastr::fields::MultiLevelVectorField co using warpx::fields::FieldType; FinishImplicitField(m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, 0), a_Bn, a_theta); - ApplyMagneticFieldBCs(); -} - -void -WarpX::ApplyMagneticFieldBCs() -{ - FillBoundaryB(guard_cells.ng_alloc_EB, WarpX::sync_nodal_points); ApplyBfieldBoundary(0, PatchType::fine, DtType::Full); + FillBoundaryB(guard_cells.ng_alloc_EB, WarpX::sync_nodal_points); } void diff --git a/Source/WarpX.H b/Source/WarpX.H index 1c7ed5a6a75..574478f4774 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -145,7 +145,6 @@ public: void SetElectricFieldAndApplyBCs ( const WarpXSolverVec& a_E ); void UpdateMagneticFieldAndApplyBCs ( ablastr::fields::MultiLevelVectorField const& a_Bn, amrex::Real a_thetadt ); - void ApplyMagneticFieldBCs (); void SpectralSourceFreeFieldAdvance (); void FinishMagneticFieldAndApplyBCs ( ablastr::fields::MultiLevelVectorField const& a_Bn, amrex::Real a_theta ); From be77c7680409cf7654dc492129ad1413e781c906 Mon Sep 17 00:00:00 2001 From: Justin Ray Angus Date: Mon, 25 Nov 2024 15:27:03 -0800 Subject: [PATCH 100/278] Setting laser particle positions to be time-centered for implicit solvers (#5485) The current deposition schemes expect the particle positions to be time-centered when using the implicit solvers. This is not currently the case for the laser particles. This PR fixes this issue. It should be commented that there is really no need for the current deposition schemes to expect the particle positions to be time centered for the implicit solvers. This is somewhat of a legacy thing from PICNIC. This can and should be changed in a future PR. Here are results of Ey generated from the analysis script for the 1D CI test using the semi-implicit method: ![plt_Ey](https://github.com/user-attachments/assets/6c925872-66e0-4d7c-85ac-94800d49c71d) Here are results of Ey generated from the analysis script for the 2D CI test using the semi-implicit method: ![plt_Ey](https://github.com/user-attachments/assets/32813bba-1074-4beb-85f8-da9289436c32) Here are results generated from the analysis script for the 3D CI test using the semi-implicit method: ![laser_analysis](https://github.com/user-attachments/assets/0e3a8505-a039-4e79-99d1-496ff23cd498) --- Examples/Tests/laser_injection/CMakeLists.txt | 20 +++++ .../inputs_test_1d_laser_injection_implicit | 79 +++++++++++++++++++ .../inputs_test_2d_laser_injection_implicit | 75 ++++++++++++++++++ .../test_1d_laser_injection_implicit.json | 13 +++ .../test_2d_laser_injection_implicit.json | 13 +++ Source/Particles/LaserParticleContainer.cpp | 24 ++++-- 6 files changed, 216 insertions(+), 8 deletions(-) create mode 100644 Examples/Tests/laser_injection/inputs_test_1d_laser_injection_implicit create mode 100644 Examples/Tests/laser_injection/inputs_test_2d_laser_injection_implicit create mode 100644 Regression/Checksum/benchmarks_json/test_1d_laser_injection_implicit.json create mode 100644 Regression/Checksum/benchmarks_json/test_2d_laser_injection_implicit.json diff --git a/Examples/Tests/laser_injection/CMakeLists.txt b/Examples/Tests/laser_injection/CMakeLists.txt index cec027deb70..a15075bb43e 100644 --- a/Examples/Tests/laser_injection/CMakeLists.txt +++ b/Examples/Tests/laser_injection/CMakeLists.txt @@ -30,3 +30,23 @@ add_warpx_test( diags/diag1000020 # output OFF # dependency ) + +add_warpx_test( + test_1d_laser_injection_implicit # name + 1 # dims + 2 # nprocs + inputs_test_1d_laser_injection_implicit # inputs + analysis_1d.py # analysis + diags/diag1000240 # output + OFF # dependency +) + +add_warpx_test( + test_2d_laser_injection_implicit # name + 2 # dims + 2 # nprocs + inputs_test_2d_laser_injection_implicit # inputs + analysis_2d.py # analysis + diags/diag1000240 # output + OFF # dependency +) diff --git a/Examples/Tests/laser_injection/inputs_test_1d_laser_injection_implicit b/Examples/Tests/laser_injection/inputs_test_1d_laser_injection_implicit new file mode 100644 index 00000000000..758e2cebaa1 --- /dev/null +++ b/Examples/Tests/laser_injection/inputs_test_1d_laser_injection_implicit @@ -0,0 +1,79 @@ +# Maximum number of time steps +max_step = 240 + +# number of grid points +amr.n_cell = 352 + +# Maximum allowable size of each subdomain in the problem domain; +# this is used to decompose the domain for parallel calculations. +amr.max_grid_size = 32 + +# Maximum level in hierarchy (for now must be 0, i.e., one level in total) +amr.max_level = 0 + +# Geometry +geometry.dims = 1 +geometry.prob_lo = -15.e-6 # physical domain +geometry.prob_hi = 15.e-6 + +boundary.field_lo = pec +boundary.field_hi = pec + +warpx.serialize_initial_conditions = 1 + +# Verbosity +warpx.verbose = 1 + +# Algorithms +algo.current_deposition = esirkepov +warpx.use_filter = 0 + +# implicit evolve scheme +algo.evolve_scheme = "semi_implicit_em" +# +implicit_evolve.nonlinear_solver = "newton" +newton.verbose = true +newton.max_iterations = 21 +newton.relative_tolerance = 1.0e-8 +newton.require_convergence = true +# +gmres.verbose_int = 2 +gmres.max_iterations = 1000 +gmres.relative_tolerance = 1.0e-4 + +# CFL +warpx.cfl = 0.9 + +# Order of particle shape factors +algo.particle_shape = 1 + +# Laser +lasers.names = laser1 +laser1.profile = Gaussian +laser1.position = 0.e-6 0.e-6 0.e-6 # This point is on the laser plane +laser1.direction = 0. 0. 1. # The plane normal direction +laser1.polarization = 1. 1. 0. # The main polarization vector +laser1.e_max = 4.e12 # Maximum amplitude of the laser field (in V/m) +laser1.wavelength = 1.0e-6 # The wavelength of the laser (in meters) +laser1.profile_waist = 5.e-6 # The waist of the laser (in meters) +laser1.profile_duration = 10.e-15 # The duration of the laser (in seconds) +laser1.profile_t_peak = 24.e-15 # The time at which the laser reaches its peak (in seconds) +laser1.profile_focal_distance = 13.109e-6 # Focal distance from the antenna (in meters) + # With this focal distance the laser is at focus + # at the end of the simulation. + +# Diagnostics +diagnostics.diags_names = diag1 openpmd +diag1.intervals = 20 +diag1.diag_type = Full + +openpmd.intervals = 20 +openpmd.diag_type = Full +openpmd.format = openpmd + +# Moving window +warpx.do_moving_window = 1 +warpx.moving_window_dir = z +warpx.moving_window_v = 1.0 # in units of the speed of light +warpx.start_moving_window_step = 20 +warpx.end_moving_window_step = 200 diff --git a/Examples/Tests/laser_injection/inputs_test_2d_laser_injection_implicit b/Examples/Tests/laser_injection/inputs_test_2d_laser_injection_implicit new file mode 100644 index 00000000000..be6a704b171 --- /dev/null +++ b/Examples/Tests/laser_injection/inputs_test_2d_laser_injection_implicit @@ -0,0 +1,75 @@ +# Maximum number of time steps +max_step = 240 + +# number of grid points +amr.n_cell = 480 352 + +# Maximum allowable size of each subdomain in the problem domain; +# this is used to decompose the domain for parallel calculations. +amr.max_grid_size = 32 + +# Maximum level in hierarchy (for now must be 0, i.e., one level in total) +amr.max_level = 0 + +# Geometry +geometry.dims = 2 +geometry.prob_lo = -20.e-6 -15.e-6 # physical domain +geometry.prob_hi = 20.e-6 15.e-6 + +boundary.field_lo = pec periodic +boundary.field_hi = pec periodic + +warpx.serialize_initial_conditions = 1 + +# Verbosity +warpx.verbose = 1 + +# Algorithms +algo.current_deposition = esirkepov +warpx.use_filter = 0 + +# implicit evolve scheme +algo.evolve_scheme = "semi_implicit_em" +# +implicit_evolve.nonlinear_solver = "newton" +newton.verbose = true +newton.max_iterations = 21 +newton.relative_tolerance = 1.0e-8 +newton.require_convergence = true +# +gmres.verbose_int = 2 +gmres.max_iterations = 1000 +gmres.relative_tolerance = 1.0e-4 + +# CFL +warpx.cfl = 1.0 + +# Order of particle shape factors +algo.particle_shape = 1 + +# Laser +lasers.names = laser1 +laser1.profile = Gaussian +laser1.position = 10.e-6 0.e-6 0.e-6 # This point is on the laser plane +laser1.direction = 2. 0. 1. # The plane normal direction +laser1.polarization = 1. 1. -2. # The main polarization vector +laser1.e_max = 4.e12 # Maximum amplitude of the laser field (in V/m) +laser1.wavelength = 1.0e-6 # The wavelength of the laser (in meters) +laser1.profile_waist = 5.e-6 # The waist of the laser (in meters) +laser1.profile_duration = 10.e-15 # The duration of the laser (in seconds) +laser1.profile_t_peak = 24.e-15 # The time at which the laser reaches its peak (in seconds) +laser1.profile_focal_distance = 13.109e-6 # Focal distance from the antenna (in meters) + # With this focal distance the laser is at focus + # at the end of the simulation. + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 240 +diag1.diag_type = Full + +# Moving window +warpx.do_moving_window = 1 +warpx.moving_window_dir = x +warpx.moving_window_v = 1.0 # in units of the speed of light +warpx.start_moving_window_step = 20 +warpx.end_moving_window_step = 200 diff --git a/Regression/Checksum/benchmarks_json/test_1d_laser_injection_implicit.json b/Regression/Checksum/benchmarks_json/test_1d_laser_injection_implicit.json new file mode 100644 index 00000000000..a89d6ccc2d4 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_1d_laser_injection_implicit.json @@ -0,0 +1,13 @@ +{ + "lev=0": { + "Bx": 374596.7817425552, + "By": 374596.7817425552, + "Bz": 0.0, + "Ex": 111502789524279.0, + "Ey": 111502789524279.0, + "Ez": 0.0, + "jx": 73098054407.2772, + "jy": 73098054407.2772, + "jz": 0.0 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_laser_injection_implicit.json b/Regression/Checksum/benchmarks_json/test_2d_laser_injection_implicit.json new file mode 100644 index 00000000000..b77b951e92a --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_laser_injection_implicit.json @@ -0,0 +1,13 @@ +{ + "lev=0": { + "Bx": 19699314.38858362, + "By": 101297372.8536657, + "Bz": 39796093.072294116, + "Ex": 1.3881256464656438e+16, + "Ey": 1.322100107139857e+16, + "Ez": 2.6833518029118908e+16, + "jx": 3.669364941403736e+16, + "jy": 3.669364586262695e+16, + "jz": 7.338729883115621e+16 + } +} diff --git a/Source/Particles/LaserParticleContainer.cpp b/Source/Particles/LaserParticleContainer.cpp index c804bb12797..1954b822084 100644 --- a/Source/Particles/LaserParticleContainer.cpp +++ b/Source/Particles/LaserParticleContainer.cpp @@ -917,11 +917,13 @@ LaserParticleContainer::update_laser_particle (WarpXParIter& pti, puyp[i] = gamma * vy; puzp[i] = gamma * vz; - // Push the the particle positions + // Push the particle positions // When using the implicit solver, this function is called multiple times per timestep // (within the linear and nonlinear solver). Thus, the position of the particles needs to be reset - // to the initial position (at the beginning of the timestep), before updating the particle position + // to the initial position (at the beginning of the timestep), before updating the particle position. + // Also, the current deposition schemes expect the particle positions to be time centered + // (cur_time + 0.5*dt) for PushType::Implicit. ParticleReal x=0., y=0., z=0.; if (push_type == PushType::Explicit) { @@ -930,20 +932,26 @@ LaserParticleContainer::update_laser_particle (WarpXParIter& pti, #if !defined(WARPX_DIM_1D_Z) if (push_type == PushType::Implicit) { - x = x_n[i]; + x = x_n[i] + vx * dt*0.5_prt; + } + else { + x += vx * dt; } - x += vx * dt; #endif #if defined(WARPX_DIM_3D) || defined(WARPX_DIM_RZ) if (push_type == PushType::Implicit) { - y = y_n[i]; + y = y_n[i] + vy * dt*0.5_prt; + } + else { + y += vy * dt; } - y += vy * dt; #endif if (push_type == PushType::Implicit) { - z = z_n[i]; + z = z_n[i] + vz * dt*0.5_prt; + } + else { + z += vz * dt; } - z += vz * dt; SetPosition(i, x, y, z); } From 0905d0b1f8cd9d6bc76e4f43adf9630926bc09cf Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 2 Dec 2024 16:04:29 -0800 Subject: [PATCH 101/278] Update Perlmutter profiles to fix Boost dependency (#5477) Update the Boost module to use the latest E4S (23.08) GCC/12.3.0 stack on Perlmutter. This should fix #5471. --- .../perlmutter-nersc/perlmutter_cpu_warpx.profile.example | 2 +- .../perlmutter-nersc/perlmutter_gpu_warpx.profile.example | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example b/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example index 99817924ad6..488d53c6af9 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example +++ b/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example @@ -11,7 +11,7 @@ module load cmake/3.24.3 module load cray-fftw/3.3.10.6 # optional: for QED support with detailed tables -export BOOST_ROOT=/global/common/software/spackecp/perlmutter/e4s-23.05/default/spack/opt/spack/linux-sles15-zen3/gcc-11.2.0/boost-1.82.0-ow5r5qrgslcwu33grygouajmuluzuzv3 +export BOOST_ROOT=/global/common/software/spackecp/perlmutter/e4s-23.08/default/spack/opt/spack/linux-sles15-zen3/gcc-12.3.0/boost-1.83.0-nxqk3hnci5g3wqv75wvsmuke3w74mzxi # optional: for openPMD and PSATD+RZ support module load cray-hdf5-parallel/1.12.2.9 diff --git a/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example b/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example index 1e5325e29b9..7e76d1366a3 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example +++ b/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example @@ -15,7 +15,7 @@ module load cudatoolkit module load cmake/3.24.3 # optional: for QED support with detailed tables -export BOOST_ROOT=/global/common/software/spackecp/perlmutter/e4s-23.05/default/spack/opt/spack/linux-sles15-zen3/gcc-11.2.0/boost-1.82.0-ow5r5qrgslcwu33grygouajmuluzuzv3 +export BOOST_ROOT=/global/common/software/spackecp/perlmutter/e4s-23.08/default/spack/opt/spack/linux-sles15-zen3/gcc-12.3.0/boost-1.83.0-nxqk3hnci5g3wqv75wvsmuke3w74mzxi # optional: for openPMD and PSATD+RZ support module load cray-hdf5-parallel/1.12.2.9 From 780a584f447f9f7750eddd6e87367e112617bf3e Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Tue, 3 Dec 2024 09:20:16 -0800 Subject: [PATCH 102/278] CI: fix Clang UB sanitizer, disable Clang thread sanitizer (#5474) I started seeing this error from the Clang sanitizer workflow in several PRs: ```console E: The repository 'http://archive.ubuntu.com/ubuntu mantic Release' does not have a Release file. E: The repository 'http://archive.ubuntu.com/ubuntu mantic-updates Release' does not have a Release file. E: The repository 'http://archive.ubuntu.com/ubuntu mantic-backports Release' does not have a Release file. E: The repository 'http://security.ubuntu.com/ubuntu mantic-security Release' does not have a Release file. ``` The Ubuntu 23.10 container was introduced in #5181 when `ubuntu-24.04` was pre-release as GitHub-hosted runner. I think we can try to move to `ubuntu-24.04` now and see if this is enough to fix the issue above. --- .github/workflows/clang_sanitizers.yml | 17 ++++------------- .github/workflows/dependencies/clang17.sh | 5 ----- 2 files changed, 4 insertions(+), 18 deletions(-) diff --git a/.github/workflows/clang_sanitizers.yml b/.github/workflows/clang_sanitizers.yml index e89cb676a03..d63a329bf64 100644 --- a/.github/workflows/clang_sanitizers.yml +++ b/.github/workflows/clang_sanitizers.yml @@ -15,8 +15,7 @@ concurrency: jobs: build_UB_sanitizer: name: Clang UB sanitizer - runs-on: ubuntu-22.04 - container: ubuntu:23.10 + runs-on: ubuntu-24.04 if: github.event.pull_request.draft == false env: CC: clang @@ -65,10 +64,6 @@ jobs: - name: run with UB sanitizer run: | - # We need these two lines because these tests run inside a docker container - export OMPI_ALLOW_RUN_AS_ROOT=1 - export OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1 - export OMP_NUM_THREADS=2 #MPI implementations often leak memory @@ -81,9 +76,9 @@ jobs: build_thread_sanitizer: name: Clang thread sanitizer - runs-on: ubuntu-22.04 - container: ubuntu:23.10 - if: github.event.pull_request.draft == false + runs-on: ubuntu-24.04 + # TODO Fix data race conditions and re-enable job + if: 0 #github.event.pull_request.draft == false env: CC: clang CXX: clang++ @@ -149,10 +144,6 @@ jobs: export TSAN_OPTIONS='ignore_noninstrumented_modules=1' export ARCHER_OPTIONS="verbose=1" - # We need these two lines because these tests run inside a docker container - export OMPI_ALLOW_RUN_AS_ROOT=1 - export OMPI_ALLOW_RUN_AS_ROOT_CONFIRM=1 - export OMP_NUM_THREADS=2 mpirun -n 2 ./build/bin/warpx.rz Examples/Physics_applications/laser_acceleration/inputs_base_rz warpx.serialize_initial_conditions = 0 diff --git a/.github/workflows/dependencies/clang17.sh b/.github/workflows/dependencies/clang17.sh index d208a9f3f3b..fb04e2a5914 100755 --- a/.github/workflows/dependencies/clang17.sh +++ b/.github/workflows/dependencies/clang17.sh @@ -7,11 +7,6 @@ set -eu -o pipefail -# This dependency file is currently used within a docker container, -# which does not come with sudo. -apt-get -qqq update -apt-get -y install sudo - # `man apt.conf`: # Number of retries to perform. If this is non-zero APT will retry # failed files the given number of times. From 4be253988c25e8cd5d39f9cba8410cac86086d5a Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 19:53:59 +0000 Subject: [PATCH 103/278] [pre-commit.ci] pre-commit autoupdate (#5486) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.4 → v0.8.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.4...v0.8.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 16a23ada3b5..e196a7b3187 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.7.4 + rev: v0.8.1 hooks: # Run the linter - id: ruff From 99a90a4d8e174ba77ea785de28e511f437a152d0 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 3 Dec 2024 13:47:29 -0800 Subject: [PATCH 104/278] [pre-commit.ci] pre-commit autoupdate (#5486) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.7.4 → v0.8.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.7.4...v0.8.1) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> From 9e5346e350ce88310d4e99b85a64ab9e8961a98e Mon Sep 17 00:00:00 2001 From: David Grote Date: Wed, 4 Dec 2024 13:08:26 -0800 Subject: [PATCH 105/278] Add reduced_diags general input (#5479) This PR adds the `reduced_diags` input parameter group that allows setting of parameters common to all reduced diagnostics. For example 'reduced_diags.intervals` can be set once rather than having to set it for each individual reduced diagnostic. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Docs/source/usage/parameters.rst | 21 ++++++++----- .../inputs_test_2d_laser_ion_acc_picmi.py | 10 ++----- .../inputs_test_1d_semi_implicit_picard | 3 +- .../inputs_test_1d_theta_implicit_picard | 3 +- .../inputs_test_2d_theta_implicit_jfnk_vandb | 3 +- ...test_2d_theta_implicit_jfnk_vandb_filtered | 3 +- ...inputs_test_2d_theta_implicit_strang_psatd | 3 +- Python/pywarpx/picmi.py | 30 ++++++++++++++++++- .../Diagnostics/ReducedDiags/ReducedDiags.cpp | 8 ++++- 9 files changed, 57 insertions(+), 27 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 31c3ca947fb..c3756d44d96 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -3589,22 +3589,27 @@ This shifts analysis from post-processing to runtime calculation of reduction op * ``Timestep`` This type outputs the simulation's physical timestep (in seconds) at each mesh refinement level. -* ``.intervals`` (`string`) +* ``reduced_diags.intervals`` (`string`) Using the `Intervals Parser`_ syntax, this string defines the timesteps at which reduced - diagnostics are written to file. + diagnostics are written to the file. + This can also be specified for the specific diagnostic by setting ``.intervals``. -* ``.path`` (`string`) optional (default `./diags/reducedfiles/`) - The path that the output file will be stored. +* ``reduced_diags.path`` (`string`) optional (default `./diags/reducedfiles/`) + The path where the output file will be stored. + This can also be specified for the specific diagnostic by setting ``.path``. -* ``.extension`` (`string`) optional (default `txt`) - The extension of the output file. +* ``reduced_diags.extension`` (`string`) optional (default `txt`) + The extension of the output file (the suffix). + This can also be specified for the specific diagnostic by setting ``.extension``. -* ``.separator`` (`string`) optional (default a `whitespace`) +* ``reduced_diags.separator`` (`string`) optional (default a `whitespace`) The separator between row values in the output file. The default separator is a whitespace. + This can also be specified for the specific diagnostic by setting ``.separator``. -* ``.precision`` (`integer`) optional (default `14`) +* ``reduced_diags.precision`` (`integer`) optional (default `14`) The precision used when writing out the data to the text files. + This can also be specified for the specific diagnostic by setting ``.precision``. Lookup tables and other settings for QED modules ------------------------------------------------ diff --git a/Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc_picmi.py b/Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc_picmi.py index 66ba5f64091..c869c770b99 100755 --- a/Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc_picmi.py +++ b/Examples/Physics_applications/laser_ion/inputs_test_2d_laser_ion_acc_picmi.py @@ -196,7 +196,6 @@ histuH_rdiag = picmi.ReducedDiagnostic( diag_type="ParticleHistogram", name="histuH", - period=100, species=hydrogen, bin_number=1000, bin_min=0.0, @@ -208,7 +207,6 @@ histue_rdiag = picmi.ReducedDiagnostic( diag_type="ParticleHistogram", name="histue", - period=100, species=electrons, bin_number=1000, bin_min=0.0, @@ -222,7 +220,6 @@ histuzAll_rdiag = picmi.ReducedDiagnostic( diag_type="ParticleHistogram", name="histuzAll", - period=100, species=hydrogen, bin_number=1000, bin_min=-0.474, @@ -233,7 +230,6 @@ field_probe_z_rdiag = picmi.ReducedDiagnostic( diag_type="FieldProbe", name="FieldProbe_Z", - period=100, integrate=0, probe_geometry="Line", x_probe=0.0, @@ -246,7 +242,6 @@ field_probe_scat_point_rdiag = picmi.ReducedDiagnostic( diag_type="FieldProbe", name="FieldProbe_ScatPoint", - period=1, integrate=0, probe_geometry="Point", x_probe=0.0, @@ -256,7 +251,6 @@ field_probe_scat_line_rdiag = picmi.ReducedDiagnostic( diag_type="FieldProbe", name="FieldProbe_ScatLine", - period=100, integrate=1, probe_geometry="Line", x_probe=-2.5e-6, @@ -267,7 +261,8 @@ ) load_balance_costs_rdiag = picmi.ReducedDiagnostic( - diag_type="LoadBalanceCosts", name="LBC", period=100 + diag_type="LoadBalanceCosts", + name="LBC", ) # Set up simulation @@ -278,6 +273,7 @@ particle_shape="cubic", warpx_numprocs=[1, 2], # deactivate `numprocs` for dynamic load balancing warpx_use_filter=1, + warpx_reduced_diags_intervals=100, warpx_load_balance_intervals=100, warpx_load_balance_costs_update="heuristic", ) diff --git a/Examples/Tests/implicit/inputs_test_1d_semi_implicit_picard b/Examples/Tests/implicit/inputs_test_1d_semi_implicit_picard index 8ef0304bebb..39df05ff72c 100644 --- a/Examples/Tests/implicit/inputs_test_1d_semi_implicit_picard +++ b/Examples/Tests/implicit/inputs_test_1d_semi_implicit_picard @@ -85,7 +85,6 @@ diag1.electrons.variables = z w ux uy uz diag1.protons.variables = z w ux uy uz warpx.reduced_diags_names = particle_energy field_energy +reduced_diags.intervals = 1 particle_energy.type = ParticleEnergy -particle_energy.intervals = 1 field_energy.type = FieldEnergy -field_energy.intervals = 1 diff --git a/Examples/Tests/implicit/inputs_test_1d_theta_implicit_picard b/Examples/Tests/implicit/inputs_test_1d_theta_implicit_picard index 2ed4d746708..80e4e7033fc 100644 --- a/Examples/Tests/implicit/inputs_test_1d_theta_implicit_picard +++ b/Examples/Tests/implicit/inputs_test_1d_theta_implicit_picard @@ -85,7 +85,6 @@ diag1.electrons.variables = z w ux uy uz diag1.protons.variables = z w ux uy uz warpx.reduced_diags_names = particle_energy field_energy +reduced_diags.intervals = 1 particle_energy.type = ParticleEnergy -particle_energy.intervals = 1 field_energy.type = FieldEnergy -field_energy.intervals = 1 diff --git a/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb index 0cdf2ebe40d..bab9a03878c 100644 --- a/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb +++ b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb @@ -109,7 +109,6 @@ diag1.electrons.variables = x z w ux uy uz diag1.protons.variables = x z w ux uy uz warpx.reduced_diags_names = particle_energy field_energy +reduced_diags.intervals = 1 particle_energy.type = ParticleEnergy -particle_energy.intervals = 1 field_energy.type = FieldEnergy -field_energy.intervals = 1 diff --git a/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb_filtered b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb_filtered index 4849a5e30a3..c7457e02af8 100644 --- a/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb_filtered +++ b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb_filtered @@ -109,7 +109,6 @@ diag1.electrons.variables = x z w ux uy uz diag1.protons.variables = x z w ux uy uz warpx.reduced_diags_names = particle_energy field_energy +reduced_diags.intervals = 1 particle_energy.type = ParticleEnergy -particle_energy.intervals = 1 field_energy.type = FieldEnergy -field_energy.intervals = 1 diff --git a/Examples/Tests/implicit/inputs_test_2d_theta_implicit_strang_psatd b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_strang_psatd index f68d1d324ac..46bc6b3d301 100644 --- a/Examples/Tests/implicit/inputs_test_2d_theta_implicit_strang_psatd +++ b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_strang_psatd @@ -92,7 +92,6 @@ diag1.electrons.variables = x z w ux uy uz diag1.protons.variables = x z w ux uy uz warpx.reduced_diags_names = particle_energy field_energy +reduced_diags.intervals = 1 particle_energy.type = ParticleEnergy -particle_energy.intervals = 1 field_energy.type = FieldEnergy -field_energy.intervals = 1 diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index afd28851f70..c5946376d52 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -2770,6 +2770,21 @@ class Simulation(picmistandard.PICMI_Simulation): warpx_used_inputs_file: string, optional The name of the text file that the used input parameters is written to, + + warpx_reduced_diags_path: string, optional + Sets the default path for reduced diagnostic output files + + warpx_reduced_diags_extension: string, optional + Sets the default extension for reduced diagnostic output files + + warpx_reduced_diags_intervals: string, optional + Sets the default intervals for reduced diagnostic output files + + warpx_reduced_diags_separator: string, optional + Sets the default separator for reduced diagnostic output files + + warpx_reduced_diags_precision: integer, optional + Sets the default precision for reduced diagnostic output files """ # Set the C++ WarpX interface (see _libwarpx.LibWarpX) as an extension to @@ -2836,6 +2851,12 @@ def init(self, kw): self.checkpoint_signals = kw.pop("warpx_checkpoint_signals", None) self.numprocs = kw.pop("warpx_numprocs", None) + self.reduced_diags_path = kw.pop("warpx_reduced_diags_path", None) + self.reduced_diags_extension = kw.pop("warpx_reduced_diags_extension", None) + self.reduced_diags_intervals = kw.pop("warpx_reduced_diags_intervals", None) + self.reduced_diags_separator = kw.pop("warpx_reduced_diags_separator", None) + self.reduced_diags_precision = kw.pop("warpx_reduced_diags_precision", None) + self.inputs_initialized = False self.warpx_initialized = False @@ -2902,6 +2923,13 @@ def initialize_inputs(self): pywarpx.warpx.numprocs = self.numprocs + reduced_diags = pywarpx.warpx.get_bucket("reduced_diags") + reduced_diags.path = self.reduced_diags_path + reduced_diags.extension = self.reduced_diags_extension + reduced_diags.intervals = self.reduced_diags_intervals + reduced_diags.separator = self.reduced_diags_separator + reduced_diags.precision = self.reduced_diags_precision + particle_shape = self.particle_shape for s in self.species: if s.particle_shape is not None: @@ -3943,7 +3971,7 @@ def __init__( self, diag_type, name=None, - period=1, + period=None, path=None, extension=None, separator=None, diff --git a/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp b/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp index ec31d9de81c..a3529cd305d 100644 --- a/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp +++ b/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp @@ -28,12 +28,15 @@ m_rd_name{rd_name} { BackwardCompatibility(); + const ParmParse pp_rd("reduced_diags"); const ParmParse pp_rd_name(m_rd_name); // read path + pp_rd.query("path", m_path); pp_rd_name.query("path", m_path); // read extension + pp_rd.query("extension", m_extension); pp_rd_name.query("extension", m_extension); // check if it is a restart run @@ -61,13 +64,16 @@ m_rd_name{rd_name} // read reduced diags intervals std::vector intervals_string_vec = {"1"}; - pp_rd_name.getarr("intervals", intervals_string_vec); + pp_rd.queryarr("intervals", intervals_string_vec); + pp_rd_name.queryarr("intervals", intervals_string_vec); m_intervals = utils::parser::IntervalsParser(intervals_string_vec); // read separator + pp_rd.query("separator", m_sep); pp_rd_name.query("separator", m_sep); // precision of data in the output file + utils::parser::queryWithParser(pp_rd, "precision", m_precision); utils::parser::queryWithParser(pp_rd_name, "precision", m_precision); } // end constructor From 4f2b3b6a6110e3260f0acfda0a0afc26dc2fd872 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Wed, 4 Dec 2024 15:59:02 -0800 Subject: [PATCH 106/278] Release 24.12 (#5496) Prepare the December release of WarpX, following the documentation at https://warpx.readthedocs.io/en/latest/maintenance/release.html: 1. Update to latest AMReX release: ```console ./Tools/Release/updateAMReX.py ``` 2. Update to latest pyAMReX release: ```console ./Tools/Release/updatepyAMReX.py ``` 3. Update to latest PICSAR release (no changes, still 24.09): ```console ./Tools/Release/updatePICSAR.py ``` 4. Update WarpX version number: ```console ./Tools/Release/newVersion.sh ``` --- .github/workflows/cuda.yml | 2 +- CMakeLists.txt | 2 +- Docs/source/conf.py | 4 ++-- Python/setup.py | 2 +- cmake/dependencies/AMReX.cmake | 4 ++-- cmake/dependencies/pyAMReX.cmake | 4 ++-- setup.py | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 8d40aba553c..0a68d850e7e 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -126,7 +126,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 456c93c7d9512f1cdffac0574973d7df41417898 && cd - + cd ../amrex && git checkout --detach 24.12 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/CMakeLists.txt b/CMakeLists.txt index da62c943e19..c7a889633da 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ # Preamble #################################################################### # cmake_minimum_required(VERSION 3.24.0) -project(WarpX VERSION 24.11) +project(WarpX VERSION 24.12) include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake) diff --git a/Docs/source/conf.py b/Docs/source/conf.py index e081a490ee8..e54a6cc23ba 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -107,9 +107,9 @@ def __init__(self, *args, **kwargs): # built documents. # # The short X.Y version. -version = "24.11" +version = "24.12" # The full version, including alpha/beta/rc tags. -release = "24.11" +release = "24.12" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/Python/setup.py b/Python/setup.py index 5ac5a950d99..8080b62acf4 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -65,7 +65,7 @@ setup( name="pywarpx", - version="24.11", + version="24.12", packages=["pywarpx"], package_dir={"pywarpx": "pywarpx"}, description="""Wrapper of WarpX""", diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 491e333d712..152a2618dee 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -271,7 +271,7 @@ macro(find_amrex) endif() set(COMPONENT_PRECISION ${WarpX_PRECISION} P${WarpX_PARTICLE_PRECISION}) - find_package(AMReX 456c93c7d9512f1cdffac0574973d7df41417898 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} ${COMPONENT_FFT} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) + find_package(AMReX 24.12 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} ${COMPONENT_FFT} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) # note: TINYP skipped because user-configured and optional # AMReX CMake helper scripts @@ -294,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "456c93c7d9512f1cdffac0574973d7df41417898" +set(WarpX_amrex_branch "24.12" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index 8e0e26e55db..6f0e07bf79e 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -59,7 +59,7 @@ function(find_pyamrex) endif() elseif(NOT WarpX_pyamrex_internal) # TODO: MPI control - find_package(pyAMReX 24.11 CONFIG REQUIRED) + find_package(pyAMReX 24.12 CONFIG REQUIRED) message(STATUS "pyAMReX: Found version '${pyAMReX_VERSION}'") endif() endfunction() @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "66fc71fecf77eee903e9c60100f1243f9e157744" +set(WarpX_pyamrex_branch "24.12" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") diff --git a/setup.py b/setup.py index cdb8a6d844e..0feb0a710d4 100644 --- a/setup.py +++ b/setup.py @@ -280,7 +280,7 @@ def build_extension(self, ext): setup( name="pywarpx", # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version="24.11", + version="24.12", packages=["pywarpx"], package_dir={"pywarpx": "Python/pywarpx"}, author="Jean-Luc Vay, David P. Grote, Maxence Thévenet, Rémi Lehe, Andrew Myers, Weiqun Zhang, Axel Huebl, et al.", From 7f2fee5156047393d462437c975a5276b3895a67 Mon Sep 17 00:00:00 2001 From: David Grote Date: Wed, 4 Dec 2024 18:47:45 -0800 Subject: [PATCH 107/278] Fix read_raw_data.py script (#5490) The numpy routine `product` has been obsoleted, replaced by `prod`. --- Tools/PostProcessing/read_raw_data.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Tools/PostProcessing/read_raw_data.py b/Tools/PostProcessing/read_raw_data.py index a180cad18e0..bc63b43f3cf 100644 --- a/Tools/PostProcessing/read_raw_data.py +++ b/Tools/PostProcessing/read_raw_data.py @@ -202,7 +202,7 @@ def _read_field(raw_file, field_name): f.seek(offset) if header.version == 1: f.readline() # skip the first line - arr = np.fromfile(f, "float64", np.product(shape)) + arr = np.fromfile(f, "float64", np.prod(shape)) arr = arr.reshape(shape, order="F") box_shape = [slice(low, hig + 1) for low, hig in zip(lo, hi)] if header.ncomp > 1: @@ -225,7 +225,7 @@ def _read_buffer(snapshot, header_fn, _component_names): lo = box[0] - dom_lo hi = box[1] - dom_lo shape = hi - lo + 1 - size = np.product(shape) + size = np.prod(shape) with open(snapshot + "/Level_0/" + fn, "rb") as f: f.seek(offset) if header.version == 1: From a31030912a256a0e0da112fa24d738f73276882c Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Wed, 4 Dec 2024 19:22:30 -0800 Subject: [PATCH 108/278] Remove unused function `ScrapeParticles` (#5491) --- Source/WarpX.H | 2 -- 1 file changed, 2 deletions(-) diff --git a/Source/WarpX.H b/Source/WarpX.H index 574478f4774..aa8c42715d3 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -1597,8 +1597,6 @@ private: */ void HandleParticlesAtBoundaries (int step, amrex::Real cur_time, int num_moved); - void ScrapeParticles (); - /** Update the E and B fields in the explicit em PIC scheme. * * At the beginning, we have B^{n} and E^{n}. From 2318fd7b1d8d595e572e4dff461af00eabbb7e93 Mon Sep 17 00:00:00 2001 From: David Grote Date: Fri, 6 Dec 2024 14:37:20 -0800 Subject: [PATCH 109/278] Add MultiFab method that takes an `int` for the direction (#5473) This adds a new way to call the `multifab` routine from Python, taking an integer for the direction instead of requiring an instance of the `Direction` class. This also cleans up the documentation some for the other versions of the routine. --- Source/Python/WarpX.cpp | 27 +++++++++++++++++++++++++-- 1 file changed, 25 insertions(+), 2 deletions(-) diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 921adff254f..01ab2d3e48f 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -142,7 +142,7 @@ where 'prefix' is the part of 'internal_name';' before the [])doc" py::arg("scalar_name"), py::arg("level"), py::return_value_policy::reference_internal, - R"doc(Return scalar fields (MultiFabs) by name and level, e.g., ``\"rho_fp\"``, ``\"phi_fp"``, ... + R"doc(Return scalar fields (MultiFabs) by name and level. The name is in the form like``\"rho_fp\"``, ``\"phi_fp"``. The level is an integer with 0 being the lowest level. The physical fields in WarpX have the following naming: @@ -164,7 +164,30 @@ The physical fields in WarpX have the following naming: py::arg("dir"), py::arg("level"), py::return_value_policy::reference_internal, - R"doc(Return the component of a vector field (MultiFab) by name, direction, and level, e.g., ``\"Efield_aux\"``, ``\"Efield_fp"``, ... + R"doc(Return the component of a vector field (MultiFab) by name, direction, and level. The name is in the form like ``\"Efield_aux\"``, ``\"Efield_fp"``, etc. The direction is a Direction instance, Direction(idir) where idir is an integer 0, 1, or 2. The level is an integer with 0 being the lowest level. + +The physical fields in WarpX have the following naming: + +- ``_fp`` are the "fine" patches, the regular resolution of a current mesh-refinement level +- ``_aux`` are temporary (auxiliar) patches at the same resolution as ``_fp``. + They usually include contributions from other levels and can be interpolated for gather routines of particles. +- ``_cp`` are "coarse" patches, at the same resolution (but not necessary values) as the ``_fp`` of ``level - 1`` + (only for level 1 and higher).)doc" + ) + .def("multifab", + [](WarpX & wx, std::string vector_name, int idir, int level) { + Direction const dir{idir}; + if (wx.m_fields.has(vector_name, dir, level)) { + return wx.m_fields.get(vector_name, dir, level); + } else { + throw std::runtime_error("The vector field '" + vector_name + "' is unknown or is not allocated!"); + } + }, + py::arg("vector_name"), + py::arg("idir"), + py::arg("level"), + py::return_value_policy::reference_internal, + R"doc(Return the component of a vector field (MultiFab) by name, direction, and level. The name is in the form like ``\"Efield_aux\"``, ``\"Efield_fp"``, etc. The direction is an integer 0, 1, or 2. The level is an integer with 0 being the lowest level. The physical fields in WarpX have the following naming: From c705575d8af802f8e513a6a16f801b82943bc620 Mon Sep 17 00:00:00 2001 From: David Grote Date: Fri, 6 Dec 2024 17:43:39 -0800 Subject: [PATCH 110/278] Fix the extrapolation for guard cells in the insulator region (#5499) For the nodal guard cells in the insulator region, the extrapolation was being done relative to cells only inside the domain, not using the value on the boundary. This fixes this to do extrapolation from the value on the boundary and the next inward cell. --- Source/BoundaryConditions/PEC_Insulator.cpp | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/Source/BoundaryConditions/PEC_Insulator.cpp b/Source/BoundaryConditions/PEC_Insulator.cpp index cfcd718c21c..b9926e4bf22 100644 --- a/Source/BoundaryConditions/PEC_Insulator.cpp +++ b/Source/BoundaryConditions/PEC_Insulator.cpp @@ -62,8 +62,9 @@ namespace amrex::GpuArray const fbndry_hi) { using namespace amrex::literals; + amrex::IntVect ijk_next = ijk_vec; + amrex::IntVect ijk_nextp1 = ijk_vec; amrex::IntVect ijk_mirror = ijk_vec; - amrex::IntVect ijk_mirrorp1 = ijk_vec; bool OnBoundary = false; bool GuardCell = false; bool isInsulatorBoundary = false; @@ -114,12 +115,14 @@ namespace } else if (ig > 0) { GuardCell = true; + // Location of the next cells inward + ijk_next[idim] = ijk_vec[idim] - ig*iside; + ijk_nextp1[idim] = ijk_next[idim] - ig*iside; + // Mirror location inside the domain by "ig" number of cells ijk_mirror[idim] = ( (iside == -1) ? (dom_lo[idim] + ig - (1 - is_nodal[idim])) - : (dom_hi[idim] + 1 - ig)); - // Location twice as far in, for extrapolation - ijk_mirrorp1[idim] = 2*ijk_mirror[idim] - ijk_vec[idim]; + : (dom_hi[idim] - ig + 1)); // Check for components with even symmetry. // True for E_like and tangential, and B_like and normal @@ -156,12 +159,12 @@ namespace // The value on the boundary is left unmodified // The values in the guard cells are extrapolated if (GuardCell) { - field(ijk_vec, n) = 2._rt*field(ijk_mirror, n) - field(ijk_mirrorp1, n); + field(ijk_vec, n) = 2._rt*field(ijk_next, n) - field(ijk_nextp1, n); } } else if ((OnBoundary || GuardCell) && set_field) { field(ijk_vec, n) = field_value; } else if (GuardCell) { - field(ijk_vec, n) = 2._rt*field(ijk_mirror, n) - field(ijk_mirrorp1, n); + field(ijk_vec, n) = 2._rt*field(ijk_next, n) - field(ijk_nextp1, n); } } else { if (OnBoundary && (E_like ^ is_normal_to_boundary)) { From 3f3c91d09a7f9d53f09419b56a68dac29e20407f Mon Sep 17 00:00:00 2001 From: David Grote Date: Fri, 6 Dec 2024 19:47:39 -0800 Subject: [PATCH 111/278] Fix the time for the insulator boundary field evaluation (#5488) In the application of the insulator boundary conditions, the field in the insulator is specified using an expression that can be a function of time. This PR updates the calls to that routine to pass in the correct time, for example the time at the half step or the end of the step. This requires passing the time down the call chain to the routines that apply the insulator boundaries which makes this a fairly sizable set of changes. Note that in some places the variable names were changed to be more precise about which time is passed in. For example, using `start_time` whenever the time at the start of the step is expected. --- .../test_2d_pec_field_insulator.json | 6 +- .../WarpXFieldBoundaries.cpp | 18 +++--- Source/Evolve/WarpXEvolve.cpp | 46 +++++++-------- .../HybridPICModel/HybridPICModel.cpp | 6 +- .../ImplicitSolvers/SemiImplicitEM.H | 4 +- .../ImplicitSolvers/SemiImplicitEM.cpp | 19 +++--- .../StrangImplicitSpectralEM.H | 6 +- .../StrangImplicitSpectralEM.cpp | 24 ++++---- .../ImplicitSolvers/ThetaImplicitEM.H | 15 +++-- .../ImplicitSolvers/ThetaImplicitEM.cpp | 34 +++++------ .../ImplicitSolvers/WarpXImplicitOps.cpp | 30 +++++----- .../SpectralSolver/SpectralSolver.H | 3 + .../SpectralSolver/SpectralSolver.cpp | 1 + .../SpectralSolver/SpectralSolverRZ.H | 3 + .../SpectralSolver/SpectralSolverRZ.cpp | 2 +- Source/FieldSolver/WarpXPushFieldsEM.cpp | 58 ++++++++++--------- Source/WarpX.H | 50 ++++++++-------- 17 files changed, 172 insertions(+), 153 deletions(-) diff --git a/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator.json b/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator.json index 622cb5e5d30..b389856e66b 100644 --- a/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator.json +++ b/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator.json @@ -1,11 +1,11 @@ { "lev=0": { "Bx": 0.0, - "By": 0.33065279639752304, + "By": 0.3605690508580849, "Bz": 0.0, - "Ex": 31873416.396984838, + "Ex": 37101418.32484252, "Ey": 0.0, - "Ez": 99285542.27022335, + "Ez": 108228802.9434361, "jx": 0.0, "jy": 0.0, "jz": 0.0 diff --git a/Source/BoundaryConditions/WarpXFieldBoundaries.cpp b/Source/BoundaryConditions/WarpXFieldBoundaries.cpp index 7566979557e..692c9938e86 100644 --- a/Source/BoundaryConditions/WarpXFieldBoundaries.cpp +++ b/Source/BoundaryConditions/WarpXFieldBoundaries.cpp @@ -49,7 +49,7 @@ namespace } -void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) +void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type, amrex::Real time) { using ablastr::fields::Direction; @@ -94,7 +94,6 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) } if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { - amrex::Real const tnew = gett_new(lev); if (patch_type == PatchType::fine) { pec_insulator_boundary->ApplyPEC_InsulatortoEfield( {m_fields.get(FieldType::Efield_fp,Direction{0},lev), @@ -102,7 +101,7 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) m_fields.get(FieldType::Efield_fp,Direction{2},lev)}, field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), - lev, patch_type, ref_ratio, tnew); + lev, patch_type, ref_ratio, time); if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { // apply pec on split E-fields in PML region const bool split_pml_field = true; @@ -110,7 +109,7 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) m_fields.get_alldirs(FieldType::pml_E_fp, lev), field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), - lev, patch_type, ref_ratio, tnew, + lev, patch_type, ref_ratio, time, split_pml_field); } } else { @@ -120,7 +119,7 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) m_fields.get(FieldType::Efield_cp,Direction{2},lev)}, field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), - lev, patch_type, ref_ratio, tnew); + lev, patch_type, ref_ratio, time); if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { // apply pec on split E-fields in PML region const bool split_pml_field = true; @@ -128,7 +127,7 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) m_fields.get_alldirs(FieldType::pml_E_cp, lev), field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), - lev, patch_type, ref_ratio, tnew, + lev, patch_type, ref_ratio, time, split_pml_field); } } @@ -147,7 +146,7 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type) #endif } -void WarpX::ApplyBfieldBoundary (const int lev, PatchType patch_type, DtType a_dt_type) +void WarpX::ApplyBfieldBoundary (const int lev, PatchType patch_type, DtType a_dt_type, amrex::Real time) { using ablastr::fields::Direction; @@ -172,7 +171,6 @@ void WarpX::ApplyBfieldBoundary (const int lev, PatchType patch_type, DtType a_d } if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { - amrex::Real const tnew = gett_new(lev); if (patch_type == PatchType::fine) { pec_insulator_boundary->ApplyPEC_InsulatortoBfield( {m_fields.get(FieldType::Bfield_fp,Direction{0},lev), @@ -180,7 +178,7 @@ void WarpX::ApplyBfieldBoundary (const int lev, PatchType patch_type, DtType a_d m_fields.get(FieldType::Bfield_fp,Direction{2},lev)}, field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), - lev, patch_type, ref_ratio, tnew); + lev, patch_type, ref_ratio, time); } else { pec_insulator_boundary->ApplyPEC_InsulatortoBfield( {m_fields.get(FieldType::Bfield_cp,Direction{0},lev), @@ -188,7 +186,7 @@ void WarpX::ApplyBfieldBoundary (const int lev, PatchType patch_type, DtType a_d m_fields.get(FieldType::Bfield_cp,Direction{2},lev)}, field_boundary_lo, field_boundary_hi, get_ng_fieldgather(), Geom(lev), - lev, patch_type, ref_ratio, tnew); + lev, patch_type, ref_ratio, time); } } diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index e9540be3da7..163138ca572 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -390,7 +390,7 @@ WarpX::OneStep_nosub (Real cur_time) WarpX::Hybrid_QED_Push(dt); FillBoundaryE(guard_cells.ng_alloc_EB); } - PushPSATD(); + PushPSATD(cur_time); if (do_pml) { DampPML(); @@ -418,15 +418,15 @@ WarpX::OneStep_nosub (Real cur_time) FillBoundaryF(guard_cells.ng_FieldSolverF); FillBoundaryG(guard_cells.ng_FieldSolverG); - EvolveB(0.5_rt * dt[0], DtType::FirstHalf); // We now have B^{n+1/2} + EvolveB(0.5_rt * dt[0], DtType::FirstHalf, cur_time); // We now have B^{n+1/2} FillBoundaryB(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); if (WarpX::em_solver_medium == MediumForEM::Vacuum) { // vacuum medium - EvolveE(dt[0]); // We now have E^{n+1} + EvolveE(dt[0], cur_time); // We now have E^{n+1} } else if (WarpX::em_solver_medium == MediumForEM::Macroscopic) { // macroscopic medium - MacroscopicEvolveE(dt[0]); // We now have E^{n+1} + MacroscopicEvolveE(dt[0], cur_time); // We now have E^{n+1} } else { WARPX_ABORT_WITH_MESSAGE("Medium for EM is unknown"); } @@ -434,7 +434,7 @@ WarpX::OneStep_nosub (Real cur_time) EvolveF(0.5_rt * dt[0], DtType::SecondHalf); EvolveG(0.5_rt * dt[0], DtType::SecondHalf); - EvolveB(0.5_rt * dt[0], DtType::SecondHalf); // We now have B^{n+1} + EvolveB(0.5_rt * dt[0], DtType::SecondHalf, cur_time + 0.5_rt * dt[0]); // We now have B^{n+1} if (do_pml) { DampPML(); @@ -808,10 +808,10 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) { pml[lev]->PushPSATD(m_fields, lev); } - ApplyEfieldBoundary(lev, PatchType::fine); - if (lev > 0) { ApplyEfieldBoundary(lev, PatchType::coarse); } - ApplyBfieldBoundary(lev, PatchType::fine, DtType::FirstHalf); - if (lev > 0) { ApplyBfieldBoundary(lev, PatchType::coarse, DtType::FirstHalf); } + ApplyEfieldBoundary(lev, PatchType::fine, cur_time + dt[0]); + if (lev > 0) { ApplyEfieldBoundary(lev, PatchType::coarse, cur_time + dt[0]); } + ApplyBfieldBoundary(lev, PatchType::fine, DtType::FirstHalf, cur_time + dt[0]); + if (lev > 0) { ApplyBfieldBoundary(lev, PatchType::coarse, DtType::FirstHalf, cur_time + dt[0]); } } // Damp fields in PML before exchanging guard cells @@ -886,17 +886,17 @@ WarpX::OneStep_sub1 (Real cur_time) m_fields.get_mr_levels(FieldType::rho_cp, finest_level), fine_lev, PatchType::fine, 0, 2*ncomps); - EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); + EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf, cur_time); EvolveF(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); FillBoundaryB(fine_lev, PatchType::fine, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); FillBoundaryF(fine_lev, PatchType::fine, guard_cells.ng_alloc_F, WarpX::sync_nodal_points); - EvolveE(fine_lev, PatchType::fine, dt[fine_lev]); + EvolveE(fine_lev, PatchType::fine, dt[fine_lev], cur_time); FillBoundaryE(fine_lev, PatchType::fine, guard_cells.ng_FieldGather); - EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::SecondHalf); + EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::SecondHalf, cur_time + 0.5_rt * dt[fine_lev]); EvolveF(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::SecondHalf); if (do_pml) { @@ -922,22 +922,22 @@ WarpX::OneStep_sub1 (Real cur_time) m_fields.get_mr_levels(FieldType::rho_buf, finest_level), coarse_lev, 0, ncomps); - EvolveB(fine_lev, PatchType::coarse, dt[fine_lev], DtType::FirstHalf); + EvolveB(fine_lev, PatchType::coarse, dt[fine_lev], DtType::FirstHalf, cur_time); EvolveF(fine_lev, PatchType::coarse, dt[fine_lev], DtType::FirstHalf); FillBoundaryB(fine_lev, PatchType::coarse, guard_cells.ng_FieldGather); FillBoundaryF(fine_lev, PatchType::coarse, guard_cells.ng_FieldSolverF); - EvolveE(fine_lev, PatchType::coarse, dt[fine_lev]); + EvolveE(fine_lev, PatchType::coarse, dt[fine_lev], cur_time); FillBoundaryE(fine_lev, PatchType::coarse, guard_cells.ng_FieldGather); - EvolveB(coarse_lev, PatchType::fine, 0.5_rt*dt[coarse_lev], DtType::FirstHalf); + EvolveB(coarse_lev, PatchType::fine, 0.5_rt*dt[coarse_lev], DtType::FirstHalf, cur_time); EvolveF(coarse_lev, PatchType::fine, 0.5_rt*dt[coarse_lev], DtType::FirstHalf); FillBoundaryB(coarse_lev, PatchType::fine, guard_cells.ng_FieldGather, WarpX::sync_nodal_points); FillBoundaryF(coarse_lev, PatchType::fine, guard_cells.ng_FieldSolverF, WarpX::sync_nodal_points); - EvolveE(coarse_lev, PatchType::fine, 0.5_rt*dt[coarse_lev]); + EvolveE(coarse_lev, PatchType::fine, 0.5_rt*dt[coarse_lev], cur_time); FillBoundaryE(coarse_lev, PatchType::fine, guard_cells.ng_FieldGather); // TODO Remove call to FillBoundaryAux before UpdateAuxilaryData? @@ -961,16 +961,16 @@ WarpX::OneStep_sub1 (Real cur_time) m_fields.get_mr_levels(FieldType::rho_cp, finest_level), fine_lev, PatchType::fine, 0, ncomps); - EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); + EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf, cur_time + dt[fine_lev]); EvolveF(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); FillBoundaryB(fine_lev, PatchType::fine, guard_cells.ng_FieldSolver); FillBoundaryF(fine_lev, PatchType::fine, guard_cells.ng_FieldSolverF); - EvolveE(fine_lev, PatchType::fine, dt[fine_lev]); + EvolveE(fine_lev, PatchType::fine, dt[fine_lev], cur_time + dt[fine_lev]); FillBoundaryE(fine_lev, PatchType::fine, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); - EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::SecondHalf); + EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::SecondHalf, cur_time + 1.5_rt*dt[fine_lev]); EvolveF(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::SecondHalf); if (do_pml) { @@ -997,11 +997,11 @@ WarpX::OneStep_sub1 (Real cur_time) m_fields.get_mr_levels(FieldType::rho_buf, finest_level), coarse_lev, ncomps, ncomps); - EvolveE(fine_lev, PatchType::coarse, dt[fine_lev]); + EvolveE(fine_lev, PatchType::coarse, dt[fine_lev], cur_time + 0.5_rt * dt[fine_lev]); FillBoundaryE(fine_lev, PatchType::coarse, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); - EvolveB(fine_lev, PatchType::coarse, dt[fine_lev], DtType::SecondHalf); + EvolveB(fine_lev, PatchType::coarse, dt[fine_lev], DtType::SecondHalf, cur_time + 0.5_rt * dt[fine_lev]); EvolveF(fine_lev, PatchType::coarse, dt[fine_lev], DtType::SecondHalf); if (do_pml) { @@ -1016,11 +1016,11 @@ WarpX::OneStep_sub1 (Real cur_time) FillBoundaryF(fine_lev, PatchType::coarse, guard_cells.ng_FieldSolverF, WarpX::sync_nodal_points); - EvolveE(coarse_lev, PatchType::fine, 0.5_rt*dt[coarse_lev]); + EvolveE(coarse_lev, PatchType::fine, 0.5_rt*dt[coarse_lev], cur_time + 0.5_rt*dt[coarse_lev]); FillBoundaryE(coarse_lev, PatchType::fine, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); - EvolveB(coarse_lev, PatchType::fine, 0.5_rt*dt[coarse_lev], DtType::SecondHalf); + EvolveB(coarse_lev, PatchType::fine, 0.5_rt*dt[coarse_lev], DtType::SecondHalf, cur_time + 0.5_rt*dt[coarse_lev]); EvolveF(coarse_lev, PatchType::fine, 0.5_rt*dt[coarse_lev], DtType::SecondHalf); if (do_pml) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 424f194ff37..20989cbeca9 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -348,7 +348,8 @@ void HybridPICModel::HybridPICSolveE ( Efield, current_fp_plasma, Jfield, Bfield, rhofield, *electron_pressure_fp, edge_lengths, lev, this, solve_for_Faraday ); - warpx.ApplyEfieldBoundary(lev, patch_type); + amrex::Real const time = warpx.gett_old(0) + warpx.getdt(0); + warpx.ApplyEfieldBoundary(lev, patch_type, time); } void HybridPICModel::CalculateElectronPressure() const @@ -556,6 +557,7 @@ void HybridPICModel::FieldPush ( HybridPICSolveE(Efield, Jfield, Bfield, rhofield, edge_lengths, true); warpx.FillBoundaryE(ng, nodal_sync); // Push forward the B-field using Faraday's law - warpx.EvolveB(dt, dt_type); + amrex::Real const t_old = warpx.gett_old(0); + warpx.EvolveB(dt, dt_type, t_old); warpx.FillBoundaryB(ng, nodal_sync); } diff --git a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.H b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.H index b6c808e0ab9..62401d7b48f 100644 --- a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.H +++ b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.H @@ -57,13 +57,13 @@ public: void PrintParameters () const override; - void OneStep ( amrex::Real a_time, + void OneStep ( amrex::Real start_time, amrex::Real a_dt, int a_step ) override; void ComputeRHS ( WarpXSolverVec& a_RHS, const WarpXSolverVec& a_E, - amrex::Real a_time, + amrex::Real half_time, int a_nl_iter, bool a_from_jacobian ) override; diff --git a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp index f558b3d9756..41fdf515581 100644 --- a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp @@ -52,7 +52,7 @@ void SemiImplicitEM::PrintParameters () const amrex::Print() << "-----------------------------------------------------------\n\n"; } -void SemiImplicitEM::OneStep ( amrex::Real a_time, +void SemiImplicitEM::OneStep ( amrex::Real start_time, amrex::Real a_dt, int a_step ) { @@ -71,10 +71,10 @@ void SemiImplicitEM::OneStep ( amrex::Real a_time, m_Eold.Copy( FieldType::Efield_fp ); // Advance WarpX owned Bfield_fp from t_{n} to t_{n+1/2} - m_WarpX->EvolveB(0.5_rt*m_dt, DtType::FirstHalf); + m_WarpX->EvolveB(0.5_rt*m_dt, DtType::FirstHalf, start_time); m_WarpX->FillBoundaryB(m_WarpX->getngEB(), true); - const amrex::Real half_time = a_time + 0.5_rt*m_dt; + const amrex::Real half_time = start_time + 0.5_rt*m_dt; // Solve nonlinear system for Eg at t_{n+1/2} // Particles will be advanced to t_{n+1/2} @@ -82,7 +82,7 @@ void SemiImplicitEM::OneStep ( amrex::Real a_time, m_nlsolver->Solve( m_E, m_Eold, half_time, 0.5_rt*m_dt ); // Update WarpX owned Efield_fp to t_{n+1/2} - m_WarpX->SetElectricFieldAndApplyBCs( m_E ); + m_WarpX->SetElectricFieldAndApplyBCs( m_E, half_time ); // Advance particles from time n+1/2 to time n+1 m_WarpX->FinishImplicitParticleUpdate(); @@ -90,27 +90,28 @@ void SemiImplicitEM::OneStep ( amrex::Real a_time, // Advance Eg from time n+1/2 to time n+1 // Eg^{n+1} = 2.0*Eg^{n+1/2} - Eg^n m_E.linComb( 2._rt, m_E, -1._rt, m_Eold ); - m_WarpX->SetElectricFieldAndApplyBCs( m_E ); + const amrex::Real new_time = start_time + m_dt; + m_WarpX->SetElectricFieldAndApplyBCs( m_E, new_time ); // Advance WarpX owned Bfield_fp from t_{n+1/2} to t_{n+1} - m_WarpX->EvolveB(0.5_rt*m_dt, DtType::SecondHalf); + m_WarpX->EvolveB(0.5_rt*m_dt, DtType::SecondHalf, half_time); m_WarpX->FillBoundaryB(m_WarpX->getngEB(), true); } void SemiImplicitEM::ComputeRHS ( WarpXSolverVec& a_RHS, const WarpXSolverVec& a_E, - amrex::Real a_time, + amrex::Real half_time, int a_nl_iter, bool a_from_jacobian ) { // Update WarpX-owned Efield_fp using current state of Eg from // the nonlinear solver at time n+theta - m_WarpX->SetElectricFieldAndApplyBCs( a_E ); + m_WarpX->SetElectricFieldAndApplyBCs( a_E, half_time ); // Update particle positions and velocities using the current state // of Eg and Bg. Deposit current density at time n+1/2 - m_WarpX->ImplicitPreRHSOp( a_time, m_dt, a_nl_iter, a_from_jacobian ); + m_WarpX->ImplicitPreRHSOp( half_time, m_dt, a_nl_iter, a_from_jacobian ); // RHS = cvac^2*0.5*dt*( curl(Bg^{n+1/2}) - mu0*Jg^{n+1/2} ) m_WarpX->ImplicitComputeRHSE(0.5_rt*m_dt, a_RHS); diff --git a/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.H b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.H index d1587cfb9d1..fcfcb9821a7 100644 --- a/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.H +++ b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.H @@ -58,13 +58,13 @@ public: void PrintParameters () const override; - void OneStep ( amrex::Real a_time, + void OneStep ( amrex::Real start_time, amrex::Real a_dt, int a_step ) override; void ComputeRHS ( WarpXSolverVec& a_RHS, const WarpXSolverVec& a_E, - amrex::Real a_time, + amrex::Real half_time, int a_nl_iter, bool a_from_jacobian ) override; @@ -92,7 +92,7 @@ private: * \brief Update the E and B fields owned by WarpX */ void UpdateWarpXFields ( WarpXSolverVec const& a_E, - amrex::Real a_time ); + amrex::Real half_time ); /** * \brief Nonlinear solver is for the time-centered values of E. After diff --git a/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp index 501cbed10eb..cd672f18f98 100644 --- a/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp @@ -54,7 +54,7 @@ void StrangImplicitSpectralEM::PrintParameters () const amrex::Print() << "-----------------------------------------------------------\n\n"; } -void StrangImplicitSpectralEM::OneStep ( amrex::Real a_time, +void StrangImplicitSpectralEM::OneStep ( amrex::Real start_time, amrex::Real a_dt, int a_step ) { @@ -70,13 +70,13 @@ void StrangImplicitSpectralEM::OneStep ( amrex::Real a_time, m_WarpX->SaveParticlesAtImplicitStepStart(); // Advance the fields to time n+1/2 source free - m_WarpX->SpectralSourceFreeFieldAdvance(); + m_WarpX->SpectralSourceFreeFieldAdvance(start_time); // Save the fields at the start of the step m_Eold.Copy( FieldType::Efield_fp ); m_E.Copy(m_Eold); // initial guess for E - amrex::Real const half_time = a_time + 0.5_rt*m_dt; + amrex::Real const half_time = start_time + 0.5_rt*m_dt; // Solve nonlinear system for E at t_{n+1/2} // Particles will be advanced to t_{n+1/2} @@ -89,27 +89,27 @@ void StrangImplicitSpectralEM::OneStep ( amrex::Real a_time, m_WarpX->FinishImplicitParticleUpdate(); // Advance E and B fields from time n+1/2 to time n+1 - amrex::Real const new_time = a_time + m_dt; + amrex::Real const new_time = start_time + m_dt; FinishFieldUpdate( new_time ); // Advance the fields to time n+1 source free - m_WarpX->SpectralSourceFreeFieldAdvance(); + m_WarpX->SpectralSourceFreeFieldAdvance(half_time); } void StrangImplicitSpectralEM::ComputeRHS ( WarpXSolverVec& a_RHS, WarpXSolverVec const & a_E, - amrex::Real a_time, + amrex::Real half_time, int a_nl_iter, bool a_from_jacobian ) { // Update WarpX-owned Efield_fp and Bfield_fp using current state of // E from the nonlinear solver at time n+1/2 - UpdateWarpXFields( a_E, a_time ); + UpdateWarpXFields( a_E, half_time ); // Self consistently update particle positions and velocities using the // current state of the fields E and B. Deposit current density at time n+1/2. - m_WarpX->ImplicitPreRHSOp( a_time, m_dt, a_nl_iter, a_from_jacobian ); + m_WarpX->ImplicitPreRHSOp( half_time, m_dt, a_nl_iter, a_from_jacobian ); // For Strang split implicit PSATD, the RHS = -dt*mu*c**2*J bool const allow_type_mismatch = true; @@ -120,20 +120,20 @@ void StrangImplicitSpectralEM::ComputeRHS ( WarpXSolverVec& a_RHS, } void StrangImplicitSpectralEM::UpdateWarpXFields (WarpXSolverVec const & a_E, - amrex::Real /*a_time*/ ) + amrex::Real half_time ) { // Update Efield_fp owned by WarpX - m_WarpX->SetElectricFieldAndApplyBCs( a_E ); + m_WarpX->SetElectricFieldAndApplyBCs( a_E, half_time ); } -void StrangImplicitSpectralEM::FinishFieldUpdate ( amrex::Real /*a_new_time*/ ) +void StrangImplicitSpectralEM::FinishFieldUpdate ( amrex::Real a_new_time ) { // Eg^{n+1} = 2*E_g^{n+1/2} - E_g^n amrex::Real const c0 = 1._rt/0.5_rt; amrex::Real const c1 = 1._rt - c0; m_E.linComb( c0, m_E, c1, m_Eold ); - m_WarpX->SetElectricFieldAndApplyBCs( m_E ); + m_WarpX->SetElectricFieldAndApplyBCs( m_E, a_new_time ); } diff --git a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H index 7461b77fb51..c9de6b82a00 100644 --- a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H +++ b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.H @@ -67,13 +67,20 @@ public: void PrintParameters () const override; - void OneStep ( amrex::Real a_time, + /** + * \brief Advances the simulation one time step + * + * \param[in] start_time The time at the start of the time step + * \param[in] a_dt The time step size + * \param[in] a_step The time step number + */ + void OneStep ( amrex::Real start_time, amrex::Real a_dt, int a_step ) override; void ComputeRHS ( WarpXSolverVec& a_RHS, const WarpXSolverVec& a_E, - amrex::Real a_time, + amrex::Real start_time, int a_nl_iter, bool a_from_jacobian ) override; @@ -98,13 +105,13 @@ private: * \brief Update the E and B fields owned by WarpX */ void UpdateWarpXFields ( const WarpXSolverVec& a_E, - amrex::Real a_time ); + amrex::Real start_time ); /** * \brief Nonlinear solver is for the time-centered values of E. After * the solver, need to use m_E and m_Eold to compute E^{n+1} */ - void FinishFieldUpdate ( amrex::Real a_new_time ); + void FinishFieldUpdate ( amrex::Real end_time ); }; diff --git a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp index 8ca592517ac..aa6ee63f7df 100644 --- a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp @@ -74,7 +74,7 @@ void ThetaImplicitEM::PrintParameters () const amrex::Print() << "-----------------------------------------------------------\n\n"; } -void ThetaImplicitEM::OneStep ( const amrex::Real a_time, +void ThetaImplicitEM::OneStep ( const amrex::Real start_time, const amrex::Real a_dt, const int a_step ) { @@ -102,60 +102,58 @@ void ThetaImplicitEM::OneStep ( const amrex::Real a_time, } } - const amrex::Real theta_time = a_time + m_theta*m_dt; - // Solve nonlinear system for Eg at t_{n+theta} // Particles will be advanced to t_{n+1/2} m_E.Copy(m_Eold); // initial guess for Eg^{n+theta} - m_nlsolver->Solve( m_E, m_Eold, theta_time, m_theta*m_dt ); + m_nlsolver->Solve( m_E, m_Eold, start_time, m_theta*m_dt ); // Update WarpX owned Efield_fp and Bfield_fp to t_{n+theta} - UpdateWarpXFields( m_E, theta_time ); + UpdateWarpXFields( m_E, start_time ); // Advance particles from time n+1/2 to time n+1 m_WarpX->FinishImplicitParticleUpdate(); // Advance Eg and Bg from time n+theta to time n+1 - const amrex::Real new_time = a_time + m_dt; - FinishFieldUpdate( new_time ); + const amrex::Real end_time = start_time + m_dt; + FinishFieldUpdate( end_time ); } void ThetaImplicitEM::ComputeRHS ( WarpXSolverVec& a_RHS, const WarpXSolverVec& a_E, - amrex::Real a_time, + amrex::Real start_time, int a_nl_iter, bool a_from_jacobian ) { // Update WarpX-owned Efield_fp and Bfield_fp using current state of // Eg from the nonlinear solver at time n+theta - UpdateWarpXFields( a_E, a_time ); + UpdateWarpXFields( a_E, start_time ); // Update particle positions and velocities using the current state // of Eg and Bg. Deposit current density at time n+1/2 - m_WarpX->ImplicitPreRHSOp( a_time, m_dt, a_nl_iter, a_from_jacobian ); + const amrex::Real theta_time = start_time + m_theta*m_dt; + m_WarpX->ImplicitPreRHSOp( theta_time, m_dt, a_nl_iter, a_from_jacobian ); // RHS = cvac^2*m_theta*dt*( curl(Bg^{n+theta}) - mu0*Jg^{n+1/2} ) m_WarpX->ImplicitComputeRHSE( m_theta*m_dt, a_RHS); } void ThetaImplicitEM::UpdateWarpXFields ( const WarpXSolverVec& a_E, - amrex::Real a_time ) + amrex::Real start_time ) { - amrex::ignore_unused(a_time); // Update Efield_fp owned by WarpX - m_WarpX->SetElectricFieldAndApplyBCs( a_E ); + const amrex::Real theta_time = start_time + m_theta*m_dt; + m_WarpX->SetElectricFieldAndApplyBCs( a_E, theta_time ); // Update Bfield_fp owned by WarpX ablastr::fields::MultiLevelVectorField const& B_old = m_WarpX->m_fields.get_mr_levels_alldirs(FieldType::B_old, 0); - m_WarpX->UpdateMagneticFieldAndApplyBCs( B_old, m_theta*m_dt ); + m_WarpX->UpdateMagneticFieldAndApplyBCs( B_old, m_theta*m_dt, start_time ); } -void ThetaImplicitEM::FinishFieldUpdate ( amrex::Real a_new_time ) +void ThetaImplicitEM::FinishFieldUpdate ( amrex::Real end_time ) { - amrex::ignore_unused(a_new_time); // Eg^{n+1} = (1/theta)*Eg^{n+theta} + (1-1/theta)*Eg^n // Bg^{n+1} = (1/theta)*Bg^{n+theta} + (1-1/theta)*Bg^n @@ -163,8 +161,8 @@ void ThetaImplicitEM::FinishFieldUpdate ( amrex::Real a_new_time ) const amrex::Real c0 = 1._rt/m_theta; const amrex::Real c1 = 1._rt - c0; m_E.linComb( c0, m_E, c1, m_Eold ); - m_WarpX->SetElectricFieldAndApplyBCs( m_E ); + m_WarpX->SetElectricFieldAndApplyBCs( m_E, end_time ); ablastr::fields::MultiLevelVectorField const & B_old = m_WarpX->m_fields.get_mr_levels_alldirs(FieldType::B_old, 0); - m_WarpX->FinishMagneticFieldAndApplyBCs( B_old, m_theta ); + m_WarpX->FinishMagneticFieldAndApplyBCs( B_old, m_theta, end_time ); } diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp index b1872ab7dba..eaf96cf77ec 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp +++ b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp @@ -70,7 +70,7 @@ WarpX::ImplicitPreRHSOp ( amrex::Real a_cur_time, } void -WarpX::SetElectricFieldAndApplyBCs ( const WarpXSolverVec& a_E ) +WarpX::SetElectricFieldAndApplyBCs ( const WarpXSolverVec& a_E, amrex::Real a_time ) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( a_E.getArrayVecType()==warpx::fields::FieldType::Efield_fp, @@ -84,12 +84,12 @@ WarpX::SetElectricFieldAndApplyBCs ( const WarpXSolverVec& a_E ) amrex::MultiFab::Copy(*Efield_fp[0][1], *Evec[0][1], 0, 0, ncomps, Evec[0][1]->nGrowVect()); amrex::MultiFab::Copy(*Efield_fp[0][2], *Evec[0][2], 0, 0, ncomps, Evec[0][2]->nGrowVect()); FillBoundaryE(guard_cells.ng_alloc_EB, WarpX::sync_nodal_points); - ApplyEfieldBoundary(0, PatchType::fine); + ApplyEfieldBoundary(0, PatchType::fine, a_time); } void -WarpX::UpdateMagneticFieldAndApplyBCs( ablastr::fields::MultiLevelVectorField const& a_Bn, - amrex::Real a_thetadt ) +WarpX::UpdateMagneticFieldAndApplyBCs( ablastr::fields::MultiLevelVectorField const& a_Bn, + amrex::Real a_thetadt, amrex::Real start_time ) { using ablastr::fields::Direction; using warpx::fields::FieldType; @@ -100,23 +100,23 @@ WarpX::UpdateMagneticFieldAndApplyBCs( ablastr::fields::MultiLevelVectorField co amrex::MultiFab::Copy(*Bfp[1], *a_Bn[lev][1], 0, 0, ncomps, a_Bn[lev][1]->nGrowVect()); amrex::MultiFab::Copy(*Bfp[2], *a_Bn[lev][2], 0, 0, ncomps, a_Bn[lev][2]->nGrowVect()); } - EvolveB(a_thetadt, DtType::Full); + EvolveB(a_thetadt, DtType::Full, start_time); FillBoundaryB(guard_cells.ng_alloc_EB, WarpX::sync_nodal_points); } void -WarpX::FinishMagneticFieldAndApplyBCs( ablastr::fields::MultiLevelVectorField const& a_Bn, - amrex::Real a_theta ) +WarpX::FinishMagneticFieldAndApplyBCs( ablastr::fields::MultiLevelVectorField const& a_Bn, + amrex::Real a_theta, amrex::Real a_time ) { using warpx::fields::FieldType; FinishImplicitField(m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, 0), a_Bn, a_theta); - ApplyBfieldBoundary(0, PatchType::fine, DtType::Full); + ApplyBfieldBoundary(0, PatchType::fine, DtType::Full, a_time); FillBoundaryB(guard_cells.ng_alloc_EB, WarpX::sync_nodal_points); } void -WarpX::SpectralSourceFreeFieldAdvance () +WarpX::SpectralSourceFreeFieldAdvance (amrex::Real start_time) { using namespace amrex::literals; using warpx::fields::FieldType; @@ -142,7 +142,7 @@ WarpX::SpectralSourceFreeFieldAdvance () current_fp[1]->setVal(0._rt); current_fp[2]->setVal(0._rt); if (rho_fp) { rho_fp->setVal(0._rt); } - PushPSATD(); // Note that this does dt/2 + PushPSATD(start_time); // Note that this does dt/2 FillBoundaryE(guard_cells.ng_alloc_EB, WarpX::sync_nodal_points); FillBoundaryB(guard_cells.ng_alloc_EB, WarpX::sync_nodal_points); @@ -294,8 +294,8 @@ WarpX::FinishImplicitParticleUpdate () } void -WarpX::FinishImplicitField( ablastr::fields::MultiLevelVectorField const& Field_fp, - ablastr::fields::MultiLevelVectorField const& Field_n, +WarpX::FinishImplicitField( ablastr::fields::MultiLevelVectorField const& Field_fp, + ablastr::fields::MultiLevelVectorField const& Field_n, amrex::Real theta ) { using namespace amrex::literals; @@ -345,7 +345,7 @@ WarpX::FinishImplicitField( ablastr::fields::MultiLevelVectorField const& Field } void -WarpX::ImplicitComputeRHSE (amrex::Real a_dt, WarpXSolverVec& a_Erhs_vec) +WarpX::ImplicitComputeRHSE (amrex::Real a_dt, WarpXSolverVec& a_Erhs_vec) { for (int lev = 0; lev <= finest_level; ++lev) { @@ -354,7 +354,7 @@ WarpX::ImplicitComputeRHSE (amrex::Real a_dt, WarpXSolverVec& a_Erhs_vec) } void -WarpX::ImplicitComputeRHSE (int lev, amrex::Real a_dt, WarpXSolverVec& a_Erhs_vec) +WarpX::ImplicitComputeRHSE (int lev, amrex::Real a_dt, WarpXSolverVec& a_Erhs_vec) { WARPX_PROFILE("WarpX::ImplicitComputeRHSE()"); ImplicitComputeRHSE(lev, PatchType::fine, a_dt, a_Erhs_vec); @@ -365,7 +365,7 @@ WarpX::ImplicitComputeRHSE (int lev, amrex::Real a_dt, WarpXSolverVec& a_Erhs_v } void -WarpX::ImplicitComputeRHSE (int lev, PatchType patch_type, amrex::Real a_dt, WarpXSolverVec& a_Erhs_vec) +WarpX::ImplicitComputeRHSE (int lev, PatchType patch_type, amrex::Real a_dt, WarpXSolverVec& a_Erhs_vec) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( a_Erhs_vec.getArrayVecType()==warpx::fields::FieldType::Efield_fp, diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.H b/Source/FieldSolver/SpectralSolver/SpectralSolver.H index bcd80e421a8..a298b64ff95 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.H @@ -198,6 +198,9 @@ class SpectralSolver SpectralFieldIndex m_spectral_index; + // Solve time step size + amrex::Real m_dt; + protected: amrex::IntVect m_fill_guards; diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp b/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp index fcd52597e07..59f7c2d6d38 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralSolver.cpp @@ -42,6 +42,7 @@ SpectralSolver::SpectralSolver ( const RhoInTime rho_in_time, const bool dive_cleaning, const bool divb_cleaning) + : m_dt(dt) { // Initialize all structures using the same distribution mapping dm diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H index 61cf64036eb..7e1a4f970d2 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H +++ b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.H @@ -152,6 +152,9 @@ class SpectralSolverRZ SpectralFieldIndex m_spectral_index; + // Solve time step size + amrex::Real m_dt; + private: SpectralKSpaceRZ k_space; // Save the instance to initialize filtering diff --git a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp index 9a8cff9f1f3..7672b646b2e 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralSolverRZ.cpp @@ -40,7 +40,7 @@ SpectralSolverRZ::SpectralSolverRZ (const int lev, const RhoInTime rho_in_time, const bool dive_cleaning, const bool divb_cleaning) - : k_space(realspace_ba, dm, dx) + : m_dt(dt), k_space(realspace_ba, dm, dx) { // Initialize all structures using the same distribution mapping dm diff --git a/Source/FieldSolver/WarpXPushFieldsEM.cpp b/Source/FieldSolver/WarpXPushFieldsEM.cpp index fd786dc65ba..24640fc63c7 100644 --- a/Source/FieldSolver/WarpXPushFieldsEM.cpp +++ b/Source/FieldSolver/WarpXPushFieldsEM.cpp @@ -714,9 +714,10 @@ WarpX::PSATDScaleAverageFields (const amrex::Real scale_factor) #endif // WARPX_USE_FFT void -WarpX::PushPSATD () +WarpX::PushPSATD (amrex::Real start_time) { #ifndef WARPX_USE_FFT + amrex::ignore_unused(start_time); WARPX_ABORT_WITH_MESSAGE( "PushFieldsEM: PSATD solver selected but not built"); #else @@ -859,26 +860,28 @@ WarpX::PushPSATD () if (WarpX::do_dive_cleaning) { PSATDBackwardTransformF(); } if (WarpX::do_divb_cleaning) { PSATDBackwardTransformG(); } - // Evolve the fields in the PML boxes for (int lev = 0; lev <= finest_level; ++lev) { + // Evolve the fields in the PML boxes if (pml[lev] && pml[lev]->ok()) { pml[lev]->PushPSATD(m_fields, lev); } - ApplyEfieldBoundary(lev, PatchType::fine); - if (lev > 0) { ApplyEfieldBoundary(lev, PatchType::coarse); } - ApplyBfieldBoundary(lev, PatchType::fine, DtType::FirstHalf); - if (lev > 0) { ApplyBfieldBoundary(lev, PatchType::coarse, DtType::FirstHalf); } + + amrex::Real const new_time = start_time + spectral_solver_fp[lev]->m_dt; + ApplyEfieldBoundary(lev, PatchType::fine, new_time); + if (lev > 0) { ApplyEfieldBoundary(lev, PatchType::coarse, new_time); } + ApplyBfieldBoundary(lev, PatchType::fine, DtType::FirstHalf, new_time); + if (lev > 0) { ApplyBfieldBoundary(lev, PatchType::coarse, DtType::FirstHalf, new_time); } } #endif } void -WarpX::EvolveB (amrex::Real a_dt, DtType a_dt_type) +WarpX::EvolveB (amrex::Real a_dt, DtType a_dt_type, amrex::Real start_time) { for (int lev = 0; lev <= finest_level; ++lev) { - EvolveB(lev, a_dt, a_dt_type); + EvolveB(lev, a_dt, a_dt_type, start_time); } // Allow execution of Python callback after B-field push @@ -886,18 +889,18 @@ WarpX::EvolveB (amrex::Real a_dt, DtType a_dt_type) } void -WarpX::EvolveB (int lev, amrex::Real a_dt, DtType a_dt_type) +WarpX::EvolveB (int lev, amrex::Real a_dt, DtType a_dt_type, amrex::Real start_time) { WARPX_PROFILE("WarpX::EvolveB()"); - EvolveB(lev, PatchType::fine, a_dt, a_dt_type); + EvolveB(lev, PatchType::fine, a_dt, a_dt_type, start_time); if (lev > 0) { - EvolveB(lev, PatchType::coarse, a_dt, a_dt_type); + EvolveB(lev, PatchType::coarse, a_dt, a_dt_type, start_time); } } void -WarpX::EvolveB (int lev, PatchType patch_type, amrex::Real a_dt, DtType a_dt_type) +WarpX::EvolveB (int lev, PatchType patch_type, amrex::Real a_dt, DtType a_dt_type, amrex::Real start_time) { // Evolve B field in regular cells if (patch_type == PatchType::fine) { @@ -923,16 +926,17 @@ WarpX::EvolveB (int lev, PatchType patch_type, amrex::Real a_dt, DtType a_dt_typ } } - ApplyBfieldBoundary(lev, patch_type, a_dt_type); + amrex::Real const new_time = start_time + a_dt; + ApplyBfieldBoundary(lev, patch_type, a_dt_type, new_time); } void -WarpX::EvolveE (amrex::Real a_dt) +WarpX::EvolveE (amrex::Real a_dt, amrex::Real start_time) { for (int lev = 0; lev <= finest_level; ++lev) { - EvolveE(lev, a_dt); + EvolveE(lev, a_dt, start_time); } // Allow execution of Python callback after E-field push @@ -940,18 +944,18 @@ WarpX::EvolveE (amrex::Real a_dt) } void -WarpX::EvolveE (int lev, amrex::Real a_dt) +WarpX::EvolveE (int lev, amrex::Real a_dt, amrex::Real start_time) { WARPX_PROFILE("WarpX::EvolveE()"); - EvolveE(lev, PatchType::fine, a_dt); + EvolveE(lev, PatchType::fine, a_dt, start_time); if (lev > 0) { - EvolveE(lev, PatchType::coarse, a_dt); + EvolveE(lev, PatchType::coarse, a_dt, start_time); } } void -WarpX::EvolveE (int lev, PatchType patch_type, amrex::Real a_dt) +WarpX::EvolveE (int lev, PatchType patch_type, amrex::Real a_dt, amrex::Real start_time) { // Evolve E field in regular cells if (patch_type == PatchType::fine) { @@ -987,7 +991,8 @@ WarpX::EvolveE (int lev, PatchType patch_type, amrex::Real a_dt) } } - ApplyEfieldBoundary(lev, patch_type); + amrex::Real const new_time = start_time + a_dt; + ApplyEfieldBoundary(lev, patch_type, new_time); // ECTRhofield must be recomputed at the very end of the Efield update to ensure // that ECTRhofield is consistent with Efield @@ -1118,15 +1123,15 @@ WarpX::EvolveG (int lev, PatchType patch_type, amrex::Real a_dt, DtType /*a_dt_t } void -WarpX::MacroscopicEvolveE (amrex::Real a_dt) +WarpX::MacroscopicEvolveE (amrex::Real a_dt, amrex::Real start_time) { for (int lev = 0; lev <= finest_level; ++lev ) { - MacroscopicEvolveE(lev, a_dt); + MacroscopicEvolveE(lev, a_dt, start_time); } } void -WarpX::MacroscopicEvolveE (int lev, amrex::Real a_dt) { +WarpX::MacroscopicEvolveE (int lev, amrex::Real a_dt, amrex::Real start_time) { WARPX_PROFILE("WarpX::MacroscopicEvolveE()"); @@ -1135,11 +1140,11 @@ WarpX::MacroscopicEvolveE (int lev, amrex::Real a_dt) { "Macroscopic EvolveE is not implemented for lev>0, yet." ); - MacroscopicEvolveE(lev, PatchType::fine, a_dt); + MacroscopicEvolveE(lev, PatchType::fine, a_dt, start_time); } void -WarpX::MacroscopicEvolveE (int lev, PatchType patch_type, amrex::Real a_dt) { +WarpX::MacroscopicEvolveE (int lev, PatchType patch_type, amrex::Real a_dt, amrex::Real start_time) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( patch_type == PatchType::fine, @@ -1171,7 +1176,8 @@ WarpX::MacroscopicEvolveE (int lev, PatchType patch_type, amrex::Real a_dt) { } } - ApplyEfieldBoundary(lev, patch_type); + amrex::Real const new_time = start_time + a_dt; + ApplyEfieldBoundary(lev, patch_type, new_time); } void diff --git a/Source/WarpX.H b/Source/WarpX.H index aa8c42715d3..73998d6faf2 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -136,21 +136,21 @@ public: // // Functions used by implicit solvers // - void ImplicitPreRHSOp ( amrex::Real cur_time, - amrex::Real a_full_dt, - int a_nl_iter, - bool a_from_jacobian ); + void ImplicitPreRHSOp ( amrex::Real cur_time, + amrex::Real a_full_dt, + int a_nl_iter, + bool a_from_jacobian ); void SaveParticlesAtImplicitStepStart (); void FinishImplicitParticleUpdate (); - void SetElectricFieldAndApplyBCs ( const WarpXSolverVec& a_E ); - void UpdateMagneticFieldAndApplyBCs ( ablastr::fields::MultiLevelVectorField const& a_Bn, - amrex::Real a_thetadt ); - void SpectralSourceFreeFieldAdvance (); - void FinishMagneticFieldAndApplyBCs ( ablastr::fields::MultiLevelVectorField const& a_Bn, - amrex::Real a_theta ); - void FinishImplicitField ( const ablastr::fields::MultiLevelVectorField& Field_fp, - const ablastr::fields::MultiLevelVectorField& Field_n, - amrex::Real theta ); + void SetElectricFieldAndApplyBCs ( const WarpXSolverVec& a_E, amrex::Real a_time ); + void UpdateMagneticFieldAndApplyBCs ( ablastr::fields::MultiLevelVectorField const& a_Bn, + amrex::Real a_thetadt, amrex::Real start_time ); + void SpectralSourceFreeFieldAdvance ( amrex::Real start_time); + void FinishMagneticFieldAndApplyBCs ( ablastr::fields::MultiLevelVectorField const& a_Bn, + amrex::Real a_theta, amrex::Real a_time ); + void FinishImplicitField ( const ablastr::fields::MultiLevelVectorField& Field_fp, + const ablastr::fields::MultiLevelVectorField& Field_n, + amrex::Real theta ); void ImplicitComputeRHSE ( amrex::Real dt, WarpXSolverVec& a_Erhs_vec); void ImplicitComputeRHSE (int lev, amrex::Real dt, WarpXSolverVec& a_Erhs_vec); void ImplicitComputeRHSE (int lev, PatchType patch_type, amrex::Real dt, WarpXSolverVec& a_Erhs_vec); @@ -542,22 +542,22 @@ public: void UpdateInjectionPosition (amrex::Real dt); void ResetProbDomain (const amrex::RealBox& rb); - void EvolveE ( amrex::Real dt); - void EvolveE (int lev, amrex::Real dt); - void EvolveB ( amrex::Real dt, DtType dt_type); - void EvolveB (int lev, amrex::Real dt, DtType dt_type); + void EvolveE ( amrex::Real dt, amrex::Real start_time); + void EvolveE (int lev, amrex::Real dt, amrex::Real start_time); + void EvolveB ( amrex::Real dt, DtType dt_type, amrex::Real start_time); + void EvolveB (int lev, amrex::Real dt, DtType dt_type, amrex::Real start_time); void EvolveF ( amrex::Real dt, DtType dt_type); void EvolveF (int lev, amrex::Real dt, DtType dt_type); void EvolveG ( amrex::Real dt, DtType dt_type); void EvolveG (int lev, amrex::Real dt, DtType dt_type); - void EvolveB (int lev, PatchType patch_type, amrex::Real dt, DtType dt_type); - void EvolveE (int lev, PatchType patch_type, amrex::Real dt); + void EvolveB (int lev, PatchType patch_type, amrex::Real dt, DtType dt_type, amrex::Real start_time); + void EvolveE (int lev, PatchType patch_type, amrex::Real dt, amrex::Real start_time); void EvolveF (int lev, PatchType patch_type, amrex::Real dt, DtType dt_type); void EvolveG (int lev, PatchType patch_type, amrex::Real dt, DtType dt_type); - void MacroscopicEvolveE ( amrex::Real dt); - void MacroscopicEvolveE (int lev, amrex::Real dt); - void MacroscopicEvolveE (int lev, PatchType patch_type, amrex::Real dt); + void MacroscopicEvolveE ( amrex::Real dt, amrex::Real start_time); + void MacroscopicEvolveE (int lev, amrex::Real dt, amrex::Real start_time); + void MacroscopicEvolveE (int lev, PatchType patch_type, amrex::Real dt, amrex::Real start_time); /** * \brief Hybrid-PIC field evolve function. @@ -672,8 +672,8 @@ public: amrex::MultiFab* Jy, amrex::MultiFab* Jz, PatchType patch_type); - void ApplyEfieldBoundary (int lev, PatchType patch_type); - void ApplyBfieldBoundary (int lev, PatchType patch_type, DtType dt_type); + void ApplyEfieldBoundary (int lev, PatchType patch_type, amrex::Real cur_time); + void ApplyBfieldBoundary (int lev, PatchType patch_type, DtType dt_type, amrex::Real cur_time); #ifdef WARPX_DIM_RZ // Applies the boundary conditions that are specific to the axis when in RZ. @@ -1604,7 +1604,7 @@ private: */ void ExplicitFillBoundaryEBUpdateAux (); - void PushPSATD (); + void PushPSATD (amrex::Real start_time); #ifdef WARPX_USE_FFT From 9fdee34a4428aa1cff9672f44cbf96e8dcbba5fb Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Tue, 10 Dec 2024 13:03:00 -0800 Subject: [PATCH 112/278] Removing asserts now that AMReX has been updated. (#5476) https://github.com/AMReX-Codes/amrex/pull/4226 fixes the issue that the asserts protected against. They have been removed. This should close out https://github.com/ECP-WarpX/WarpX/issues/5444 Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- .../MagnetostaticSolver/MagnetostaticSolver.cpp | 8 -------- Source/ablastr/fields/VectorPoissonSolver.H | 5 ----- 2 files changed, 13 deletions(-) diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp index c3acf8edd84..96e92b80359 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp @@ -68,14 +68,6 @@ WarpX::ComputeMagnetostaticField() WARPX_ALWAYS_ASSERT_WITH_MESSAGE(this->max_level == 0, "Magnetostatic solver not implemented with mesh refinement."); -#if defined(AMREX_USE_EB) - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(EB::enabled(), - "Magnetostatic Solver currently requires an embedded boundary to be installed for " - "compatibility with AMReX when compiling with EB support. " - "Current workaround is to install an EB outside of domain or recompile with EB support off." - "Workaround for https://github.com/AMReX-Codes/amrex/issues/4223"); -#endif - AddMagnetostaticFieldLabFrame(); } diff --git a/Source/ablastr/fields/VectorPoissonSolver.H b/Source/ablastr/fields/VectorPoissonSolver.H index a41d242e2c2..16863320c1e 100644 --- a/Source/ablastr/fields/VectorPoissonSolver.H +++ b/Source/ablastr/fields/VectorPoissonSolver.H @@ -110,11 +110,6 @@ computeVectorPotential ( amrex::Vector > co rel_ref_ratio = amrex::Vector{{amrex::IntVect(AMREX_D_DECL(1, 1, 1))}}; } -#if !defined(AMREX_USE_EB) - ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE(!eb_enabled, - "Embedded boundary solve requested but not compiled in"); -#endif - auto const finest_level = static_cast(curr.size()) - 1; // scale J appropriately; also determine if current is zero everywhere From d5d4a9d740381c10783470e7bcce973eb151fbb1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 10 Dec 2024 16:32:54 -0800 Subject: [PATCH 113/278] [pre-commit.ci] pre-commit autoupdate (#5504) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.1 → v0.8.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.1...v0.8.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e196a7b3187..fffc41ce264 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.1 + rev: v0.8.2 hooks: # Run the linter - id: ruff From 7a7352f290e8bcdf94b4fc87b968ce2bea83214f Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Wed, 11 Dec 2024 12:47:37 -0800 Subject: [PATCH 114/278] AMReX/pyAMReX/PICSAR: weekly update (#5501) Anticipating the weekly update due to upcoming retreat. This makes https://github.com/AMReX-Codes/amrex/pull/4255 visible to WarpX. - Weekly update to latest AMReX: ```console ./Tools/Release/updateAMReX.py ``` - Weekly update to latest pyAMReX (no changes since 24.12): ```console ./Tools/Release/updatepyAMReX.py ``` - Weekly update to latest PICSAR (no changes since 24.09): ```console ./Tools/Release/updatePICSAR.py ``` --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 0a68d850e7e..a7cd884039b 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -126,7 +126,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 24.12 && cd - + cd ../amrex && git checkout --detach 96db0a665ff1e6bbe638490fd02d3aafb9188f6b && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 152a2618dee..8a7d11d5b1d 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -294,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "24.12" +set(WarpX_amrex_branch "96db0a665ff1e6bbe638490fd02d3aafb9188f6b" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") From 66204feae34ef4039b0ea787849486c1b04ecbf2 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Wed, 11 Dec 2024 12:49:06 -0800 Subject: [PATCH 115/278] Add Dan Barnes' effective potential (semi-implicit) Poisson solver (#5079) This PR adds a new electrostatic solver based on the semi-implicit scheme developed by [Barnes, Journal of Comp. Phys., 424 (2021), 109852](https://www.sciencedirect.com/science/article/pii/S0021999120306264?via%3Dihub) (see Appendix A of reference). The implementation was tested through the simulation of a Langmuir probe placed inside a uniform plasma. To this end a 2d simulation was performed with an initial uniform plasma with a conducting disk inserted in the center of the domain. The disk is kept at 0 V while Neumann boundary conditions are used for the domain boundary. Electrons and hydrogen ions were injected from all the domain boundaries using the `UniformFluxDistribution` in order to simulate the particle flux from a uniform plasma. Thermal boundaries were used for the particles with the same temperatures as the initial plasma particles. The expected outcome of this simulation is that a sheath develops around the "Langmuir probe" with value $V_s = 0.5T_e\ln\left(2\pi\frac{m_e}{2m_p}(1 + \frac{T_i}{T_e})\right) $. A pre-sheath of roughly $0.7T_e$ is also expected to form but the exact value of this depends on the domain size since the pre-sheath extends for many Debye lengths. The figure below shows an example outcome from a simulation as described above in which the semi-implicit factor was set to $C_{SI} = 10$. ![image](https://github.com/user-attachments/assets/de4c4f44-3eb1-4e4b-8297-4c047d1c0d98) Required before merging: - [x] Merging of https://github.com/AMReX-Codes/amrex/pull/3968 - [x] Add example to CI tests - [x] Update documentation --------- Signed-off-by: roelof-groenewald Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Thomas Marks --- Docs/source/refs.bib | 12 + Docs/source/usage/parameters.rst | 17 ++ Examples/Tests/CMakeLists.txt | 1 + .../CMakeLists.txt | 12 + .../analysis.py | 90 ++++++ ...effective_potential_electrostatic_picmi.py | 236 +++++++++++++++ GNUmakefile | 2 +- Python/pywarpx/picmi.py | 19 ++ ...fective_potential_electrostatic_picmi.json | 15 + Source/Diagnostics/Diagnostics.cpp | 5 +- .../ElectrostaticSolvers/CMakeLists.txt | 1 + .../EffectivePotentialES.H | 71 +++++ .../EffectivePotentialES.cpp | 258 +++++++++++++++++ .../ElectrostaticSolvers/Make.package | 1 + Source/Initialization/WarpXInitData.cpp | 4 + Source/Utils/WarpXAlgorithmSelection.H | 1 + Source/WarpX.cpp | 10 +- .../fields/EffectivePotentialPoissonSolver.H | 274 ++++++++++++++++++ Source/ablastr/fields/PoissonSolver.H | 4 +- cmake/dependencies/AMReX.cmake | 2 +- 20 files changed, 1028 insertions(+), 7 deletions(-) create mode 100644 Examples/Tests/effective_potential_electrostatic/CMakeLists.txt create mode 100755 Examples/Tests/effective_potential_electrostatic/analysis.py create mode 100644 Examples/Tests/effective_potential_electrostatic/inputs_test_3d_effective_potential_electrostatic_picmi.py create mode 100644 Regression/Checksum/benchmarks_json/test_3d_effective_potential_electrostatic_picmi.json create mode 100644 Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.H create mode 100644 Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.cpp create mode 100644 Source/ablastr/fields/EffectivePotentialPoissonSolver.H diff --git a/Docs/source/refs.bib b/Docs/source/refs.bib index 70b88a0abf8..02251c433d5 100644 --- a/Docs/source/refs.bib +++ b/Docs/source/refs.bib @@ -480,6 +480,18 @@ @article{VayFELB2009 url = {https://doi.org/10.1063/1.3080930}, } +@article{Barnes2021, + title = {Improved C1 shape functions for simplex meshes}, + author = {D.C. Barnes}, + journal = {Journal of Computational Physics}, + volume = {424}, + pages = {109852}, + year = {2021}, + issn = {0021-9991}, + doi = {https://doi.org/10.1016/j.jcp.2020.109852}, + url = {https://www.sciencedirect.com/science/article/pii/S0021999120306264}, +} + @article{Rhee1987, author = {Rhee, M. J. and Schneider, R. F. and Weidman, D. J.}, title = "{Simple time‐resolving Thomson spectrometer}", diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index c3756d44d96..3787acbd639 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -236,6 +236,23 @@ Overall simulation parameters \boldsymbol{\nabla}^2 \phi = - \rho/\epsilon_0 \qquad \boldsymbol{E} = - \boldsymbol{\nabla}\phi \\ \boldsymbol{\nabla}^2 \boldsymbol{A} = - \mu_0 \boldsymbol{j} \qquad \boldsymbol{B} = \boldsymbol{\nabla}\times\boldsymbol{A} + * ``labframe-effective-potential``: Poisson's equation is solved with a modified dielectric function + (resulting in an "effective potential") to create a semi-implicit scheme which is robust to the + numerical instability seen in explicit electrostatic PIC when :math:`\Delta t \omega_{pe} > 2`. + If this option is used the additional parameter ``warpx.effective_potential_factor`` can also be + specified to set the value of :math:`C_{EP}` (default 4). The method is stable for :math:`C_{EP} \geq 1` + regardless of :math:`\Delta t`, however, the larger :math:`C_{EP}` is set, the lower the numerical plasma + frequency will be and therefore care must be taken to not set it so high that the plasma mode + hybridizes with other modes of interest. + Details of the method can be found in Appendix A of :cite:t:`param-Barnes2021` (note that in that paper + the method is referred to as "semi-implicit electrostatic" but here it has been renamed to "effective potential" + to avoid confusion with the semi-implicit method of Chen et al.). + In short, the code solves: + + .. math:: + + \boldsymbol{\nabla}\cdot\left(1+\frac{C_{EP}}{4}\sum_{s \in \text{species}}(\omega_{ps}\Delta t)^2 \right)\boldsymbol{\nabla} \phi = - \rho/\epsilon_0 \qquad \boldsymbol{E} = - \boldsymbol{\nabla}\phi + * ``relativistic``: Poisson's equation is solved **for each species** in their respective rest frame. The corresponding field is mapped back to the simulation frame and will produce both E and B diff --git a/Examples/Tests/CMakeLists.txt b/Examples/Tests/CMakeLists.txt index 6fea9368e78..c4713123ae6 100644 --- a/Examples/Tests/CMakeLists.txt +++ b/Examples/Tests/CMakeLists.txt @@ -71,6 +71,7 @@ add_subdirectory(restart) add_subdirectory(restart_eb) add_subdirectory(rigid_injection) add_subdirectory(scraping) +add_subdirectory(effective_potential_electrostatic) add_subdirectory(silver_mueller) add_subdirectory(single_particle) add_subdirectory(space_charge_initialization) diff --git a/Examples/Tests/effective_potential_electrostatic/CMakeLists.txt b/Examples/Tests/effective_potential_electrostatic/CMakeLists.txt new file mode 100644 index 00000000000..a6545e8c5f3 --- /dev/null +++ b/Examples/Tests/effective_potential_electrostatic/CMakeLists.txt @@ -0,0 +1,12 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_effective_potential_electrostatic_picmi # name + 3 # dims + 2 # nprocs + inputs_test_3d_effective_potential_electrostatic_picmi.py # inputs + analysis.py # analysis + diags/field_diag/ # output + OFF # dependency +) diff --git a/Examples/Tests/effective_potential_electrostatic/analysis.py b/Examples/Tests/effective_potential_electrostatic/analysis.py new file mode 100755 index 00000000000..5aa9b045af0 --- /dev/null +++ b/Examples/Tests/effective_potential_electrostatic/analysis.py @@ -0,0 +1,90 @@ +#!/usr/bin/env python3 +# +# --- Analysis script for the effective potential Poisson solver test. This test +# --- is based on the adiabatic plasma expansion benchmark from Connor et al. (2021) +# --- doi.org/10.1109/TPS.2021.3072353. +# --- The electron density distribution (as a function of radius) is compared +# --- with the analytically calculated density based on the input parameters +# --- of the test simulation at each output timestep. + +import os +import sys + +import dill +import matplotlib.pyplot as plt +import numpy as np +from openpmd_viewer import OpenPMDTimeSeries +from scipy.interpolate import RegularGridInterpolator + +from pywarpx import picmi + +constants = picmi.constants + +# load simulation parameters +with open("sim_parameters.dpkl", "rb") as f: + sim = dill.load(f) + +# characteristic expansion time +tau = sim["sigma_0"] * np.sqrt(sim["M"] / (constants.kb * (sim["T_e"] + sim["T_i"]))) + + +def get_analytic_density(r, t): + expansion_factor = 1.0 + t**2 / tau**2 + T = sim["T_e"] / expansion_factor + sigma = sim["sigma_0"] * np.sqrt(expansion_factor) + return ( + sim["n_plasma"] * (T / sim["T_e"]) ** 1.5 * np.exp(-(r**2) / (2.0 * sigma**2)) + ) + + +def get_radial_function(field, info): + """Helper function to transform Cartesian data to polar data and average + over theta and phi.""" + r_grid = np.linspace(0, np.max(info.z) - 1e-3, 30) + theta_grid = np.linspace(0, 2 * np.pi, 40, endpoint=False) + phi_grid = np.linspace(0, np.pi, 20) + + r, t, p = np.meshgrid(r_grid, theta_grid, phi_grid, indexing="ij") + x_sp = r * np.sin(p) * np.cos(t) + y_sp = r * np.sin(p) * np.sin(t) + z_sp = r * np.cos(p) + + interp = RegularGridInterpolator((info.x, info.y, info.z), field) + field_sp = interp((x_sp, y_sp, z_sp)) + return r_grid, np.mean(field_sp, axis=(1, 2)) + + +diag_dir = "diags/field_diag" +ts = OpenPMDTimeSeries(diag_dir, check_all_files=True) + +rms_errors = np.zeros(len(ts.iterations)) + +for ii, it in enumerate(ts.iterations): + rho_e, info = ts.get_field(field="rho_electrons", iteration=it) + r_grid, n_e = get_radial_function(-rho_e / constants.q_e, info) + + n_e_analytic = get_analytic_density(r_grid, ts.t[ii]) + rms_errors[ii] = ( + np.sqrt(np.mean(np.sum((n_e - n_e_analytic) ** 2))) / n_e_analytic[0] + ) + + plt.plot(r_grid, n_e_analytic, "k--", alpha=0.6) + plt.plot(r_grid, n_e, label=f"t = {ts.t[ii]*1e6:.2f} $\mu$s") + +print("RMS error (%) in density: ", rms_errors) +assert np.all(rms_errors < 0.05) + +plt.ylabel("$n_e$ (m$^{-3}$)") +plt.xlabel("r (m)") +plt.grid() +plt.legend() +plt.show() + +if len(sys.argv) > 1: + sys.path.insert(1, "../../../../warpx/Regression/Checksum/") + import checksumAPI + + filename = sys.argv[1] + + test_name = os.path.split(os.getcwd())[1] + checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") diff --git a/Examples/Tests/effective_potential_electrostatic/inputs_test_3d_effective_potential_electrostatic_picmi.py b/Examples/Tests/effective_potential_electrostatic/inputs_test_3d_effective_potential_electrostatic_picmi.py new file mode 100644 index 00000000000..27b1728b7b2 --- /dev/null +++ b/Examples/Tests/effective_potential_electrostatic/inputs_test_3d_effective_potential_electrostatic_picmi.py @@ -0,0 +1,236 @@ +#!/usr/bin/env python3 +# +# --- Test script for the effective potential Poisson solver. This test is based +# --- on the adiabatic plasma expansion benchmark from Connor et al. (2021) +# --- doi.org/10.1109/TPS.2021.3072353. +# --- In the benchmark an expanding plasma ball with Gaussian density distribution +# --- in the radial direction is simulated and the time evolution of the +# --- density of the electron species is compared to an approximate analytic solution. +# --- The example is modified slightly in the following ways: +# --- 1) The original example used an electromagnetic solver with absorbing +# --- boundaries while the present case encloses the plasma in a conducting +# --- sphere. +# --- 2) The domain and plasma parameters for this case has been modified to +# --- set the cell-size and time step such that the explicit electrostatic +# --- solver is unstable. + +import dill +import numpy as np +from mpi4py import MPI as mpi +from scipy.special import erf + +from pywarpx import picmi + +constants = picmi.constants + +comm = mpi.COMM_WORLD + +simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=0) + +m_ion = 25 # Ion mass (electron masses) + +# Plasma parameters +n_plasma = 5e12 # Plasma density (m^-3) +sigma_0 = 22 # Initial Gaussian distribution standard deviation (Debye lengths) +T_e = 100.0 # Electron temperature (K) +T_i = 10.0 # Ion temperature (K) + +# Spatial domain +R = 86 # Radius of metallic sphere (Debye lengths) +NZ = 72 # Number of cells in each direction + +# Temporal domain (if not run as a CI test) +LT = 0.6e-6 # Simulation temporal length (s) + +# Numerical parameters +NPARTS = 500000 # Seed number of particles +DT = 0.8 # Time step (electron streaming) + +# Solver parameter +C_EP = 1.0 # Effective potential factor + +####################################################################### +# Calculate various plasma parameters based on the simulation input # +####################################################################### + +# Ion mass (kg) +M = m_ion * constants.m_e + +# Electron plasma frequency (Hz) +f_pe = np.sqrt(constants.q_e**2 * n_plasma / (constants.m_e * constants.ep0)) / ( + 2.0 * np.pi +) + +# Debye length (m) +lambda_e = np.sqrt(constants.ep0 * constants.kb * T_e / (n_plasma * constants.q_e**2)) + +# Thermal velocities (m/s) from v_th = np.sqrt(kT / m) +v_ti = np.sqrt(T_i * constants.kb / M) +v_te = np.sqrt(T_e * constants.kb / constants.m_e) + +R *= lambda_e +sigma_0 *= lambda_e + +dz = 2.0 * R / (NZ - 4) +Lz = dz * NZ +dt = DT * dz / v_te + +total_steps = int(LT / dt) +diag_steps = total_steps // 3 +total_steps = diag_steps * 3 + +# dump attributes needed for analysis to a dill pickle file +if comm.rank == 0: + parameter_dict = { + "sigma_0": sigma_0, + "M": M, + "T_i": T_i, + "T_e": T_e, + "n_plasma": n_plasma, + } + with open("sim_parameters.dpkl", "wb") as f: + dill.dump(parameter_dict, f) + +# print out plasma parameters +if comm.rank == 0: + print( + f"Initializing simulation with input parameters:\n" + f"\tT_e = {T_e:.1f} K\n" + f"\tT_i = {T_i:.1f} K\n" + f"\tn = {n_plasma:.1e} m^-3\n" + ) + print( + f"Plasma parameters:\n" + f"\tlambda_e = {lambda_e:.1e} m\n" + f"\tt_pe = {1.0/f_pe:.1e} s\n" + f"\tv_ti = {v_ti:.1e} m/s\n" + ) + print( + f"Numerical parameters:\n" + f"\tdz/lambda_e = {dz/lambda_e:.2f}\n" + f"\tdt*w_pe = {dt*f_pe*2.0*np.pi:.2f}\n" + f"\tdiag steps = {diag_steps:d}\n" + f"\ttotal steps = {total_steps:d}\n" + ) + + +####################################################################### +# Set geometry and boundary conditions # +####################################################################### + +grid = picmi.Cartesian3DGrid( + number_of_cells=[NZ] * 3, + lower_bound=[-Lz / 2.0] * 3, + upper_bound=[Lz / 2.0] * 3, + lower_boundary_conditions=["neumann"] * 3, + upper_boundary_conditions=["neumann"] * 3, + lower_boundary_conditions_particles=["absorbing"] * 3, + upper_boundary_conditions_particles=["absorbing"] * 3, + warpx_max_grid_size=NZ // 2, +) +simulation.time_step_size = dt +simulation.max_steps = total_steps +simulation.current_deposition_algo = "direct" +simulation.particle_shape = 1 +simulation.verbose = 1 + +####################################################################### +# Insert spherical boundary as EB # +####################################################################### + +embedded_boundary = picmi.EmbeddedBoundary( + implicit_function=f"(x**2+y**2+z**2-{R**2})", + potential=0.0, +) +simulation.embedded_boundary = embedded_boundary + +####################################################################### +# Field solver # +####################################################################### + +solver = picmi.ElectrostaticSolver( + grid=grid, + method="Multigrid", + warpx_effective_potential=True, + warpx_effective_potential_factor=C_EP, + warpx_self_fields_verbosity=0, +) +simulation.solver = solver + +####################################################################### +# Particle types setup # +####################################################################### + +total_parts = ( + n_plasma + * sigma_0**2 + * ( + (2.0 * np.pi) ** 1.5 * sigma_0 * erf(R / (np.sqrt(2) * sigma_0)) + + 4.0 * np.pi * R * np.exp(-(R**2) / (2.0 * sigma_0**2)) + ) +) + +electrons = picmi.Species( + name="electrons", + particle_type="electron", + initial_distribution=picmi.GaussianBunchDistribution( + n_physical_particles=total_parts, + rms_bunch_size=[sigma_0] * 3, + rms_velocity=[v_te] * 3, + ), +) +simulation.add_species( + electrons, + layout=picmi.PseudoRandomLayout(grid=grid, n_macroparticles=NPARTS), +) + +ions = picmi.Species( + name="ions", + charge="q_e", + mass=M, + initial_distribution=picmi.GaussianBunchDistribution( + n_physical_particles=total_parts, + rms_bunch_size=[sigma_0] * 3, + rms_velocity=[v_ti] * 3, + ), +) +simulation.add_species( + ions, + layout=picmi.PseudoRandomLayout(grid=grid, n_macroparticles=NPARTS), +) + +####################################################################### +# Add diagnostics # +####################################################################### + +field_diag = picmi.FieldDiagnostic( + name="field_diag", + grid=grid, + period=diag_steps, + data_list=[ + "E", + "J", + "T_electrons", + "T_ions", + "phi", + "rho_electrons", + "rho_ions", + ], + write_dir="diags", + warpx_format="openpmd", + warpx_openpmd_backend="h5", +) +simulation.add_diagnostic(field_diag) + +####################################################################### +# Initialize simulation # +####################################################################### + +simulation.initialize_inputs() +simulation.initialize_warpx() + +####################################################################### +# Execute simulation # +####################################################################### + +simulation.step() diff --git a/GNUmakefile b/GNUmakefile index 6298dd83369..ef3239e249d 100644 --- a/GNUmakefile +++ b/GNUmakefile @@ -43,7 +43,7 @@ USE_RZ = FALSE USE_EB = FALSE USE_LINEAR_SOLVERS_EM = TRUE -USE_LINEAR_SOLVERS_INCFLO = FALSE +USE_LINEAR_SOLVERS_INCFLO = TRUE WARPX_HOME := . include $(WARPX_HOME)/Source/Make.WarpX diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index c5946376d52..de3b26955b7 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1894,6 +1894,16 @@ class ElectrostaticSolver(picmistandard.PICMI_ElectrostaticSolver): warpx_self_fields_verbosity: integer, default=2 Level of verbosity for the lab frame solver + warpx_magnetostatic: bool, default=False + Whether to use the magnetostatic solver + + warpx_effective_potential: bool, default=False + Whether to use the effective potential Poisson solver (EP-PIC) + + warpx_effective_potential_factor: float, default=4 + If the effective potential Poisson solver is used, this sets the value + of C_EP (the method is marginally stable at C_EP = 1) + warpx_dt_update_interval: integer, optional (default = -1) How frequently the timestep is updated. Adaptive timestepping is disabled when this is <= 0. @@ -1910,6 +1920,10 @@ def init(self, kw): self.absolute_tolerance = kw.pop("warpx_absolute_tolerance", None) self.self_fields_verbosity = kw.pop("warpx_self_fields_verbosity", None) self.magnetostatic = kw.pop("warpx_magnetostatic", False) + self.effective_potential = kw.pop("warpx_effective_potential", False) + self.effective_potential_factor = kw.pop( + "warpx_effective_potential_factor", None + ) self.cfl = kw.pop("warpx_cfl", None) self.dt_update_interval = kw.pop("warpx_dt_update_interval", None) self.max_dt = kw.pop("warpx_max_dt", None) @@ -1930,6 +1944,11 @@ def solver_initialize_inputs(self): else: if self.magnetostatic: pywarpx.warpx.do_electrostatic = "labframe-electromagnetostatic" + elif self.effective_potential: + pywarpx.warpx.do_electrostatic = "labframe-effective-potential" + pywarpx.warpx.effective_potential_factor = ( + self.effective_potential_factor + ) else: pywarpx.warpx.do_electrostatic = "labframe" pywarpx.warpx.self_fields_required_precision = self.required_precision diff --git a/Regression/Checksum/benchmarks_json/test_3d_effective_potential_electrostatic_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_effective_potential_electrostatic_picmi.json new file mode 100644 index 00000000000..b61da644114 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_effective_potential_electrostatic_picmi.json @@ -0,0 +1,15 @@ +{ + "lev=0": { + "Ex": 148992.08170563323, + "Ey": 148964.62980386653, + "Ez": 149085.76473996745, + "T_electrons": 279.2796849134268, + "T_ions": 32.09677271761641, + "jx": 31.077856013948246, + "jy": 31.425549493321245, + "jz": 31.424168300110658, + "phi": 2002.2518068289028, + "rho_electrons": 0.007909027251581852, + "rho_ions": 0.008266762306092332 + } +} diff --git a/Source/Diagnostics/Diagnostics.cpp b/Source/Diagnostics/Diagnostics.cpp index 0f659065185..a60aea40242 100644 --- a/Source/Diagnostics/Diagnostics.cpp +++ b/Source/Diagnostics/Diagnostics.cpp @@ -77,8 +77,9 @@ Diagnostics::BaseReadParameters () if (utils::algorithms::is_in(m_varnames_fields, "phi")){ WARPX_ALWAYS_ASSERT_WITH_MESSAGE( warpx.electrostatic_solver_id==ElectrostaticSolverAlgo::LabFrame || - warpx.electrostatic_solver_id==ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic, - "plot phi only works if do_electrostatic = labframe or do_electrostatic = labframe-electromagnetostatic"); + warpx.electrostatic_solver_id==ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic || + warpx.electrostatic_solver_id==ElectrostaticSolverAlgo::LabFrameEffectivePotential, + "plot phi only works if do_electrostatic = labframe, do_electrostatic = labframe-electromagnetostatic or do_electrostatic = labframe-effective-potential"); } // Sanity check if user requests to plot A diff --git a/Source/FieldSolver/ElectrostaticSolvers/CMakeLists.txt b/Source/FieldSolver/ElectrostaticSolvers/CMakeLists.txt index 39c4478c110..db728f6aaba 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/CMakeLists.txt +++ b/Source/FieldSolver/ElectrostaticSolvers/CMakeLists.txt @@ -2,6 +2,7 @@ foreach(D IN LISTS WarpX_DIMS) warpx_set_suffix_dims(SD ${D}) target_sources(lib_${SD} PRIVATE + EffectivePotentialES.cpp ElectrostaticSolver.cpp LabFrameExplicitES.cpp PoissonBoundaryHandler.cpp diff --git a/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.H b/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.H new file mode 100644 index 00000000000..2ade923211c --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.H @@ -0,0 +1,71 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald (TAE Technologies) + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_EFFECTIVEPOTENTIALES_H_ +#define WARPX_EFFECTIVEPOTENTIALES_H_ + +#include "ElectrostaticSolver.H" + +#include +#include + +class EffectivePotentialES final : public ElectrostaticSolver +{ +public: + + EffectivePotentialES (int nlevs_max) : ElectrostaticSolver (nlevs_max) { + ReadParameters(); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + (nlevs_max == 1), + "Effective potential electrostatic solver only supports one level at present" + ); + } + + void InitData () override; + + void ComputeSpaceChargeField ( + ablastr::fields::MultiFabRegister& fields, + MultiParticleContainer& mpc, + MultiFluidContainer* mfl, + int max_level) override; + + void ComputeSigma ( amrex::MultiFab& sigma ) const; + + /** + * Compute the potential `phi` by solving the semi-implicit Poisson equation using the Effective Potential method + * with `rho` as the source. + * More specifically, this solves the equation + * \f[ + * \vec{\nabla}\cdot(\sigma\vec{\nabla}) \phi = -\frac{\rho}{\epsilon_0} + * \f] + * \param[out] phi The potential to be computed by this function + * \param[in] rho The total charge density + * \param[in] sigma Represents the modified dielectric + * \param[in] required_precision The relative convergence threshold for the MLMG solver + * \param[in] absolute_tolerance The absolute convergence threshold for the MLMG solver + * \param[in] max_iters The maximum number of iterations allowed for the MLMG solver + * \param[in] verbosity The verbosity setting for the MLMG solver + */ + void computePhi ( + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi + ) const; + void computePhi ( + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi, + amrex::MultiFab const& sigma, + amrex::Real required_precision, + amrex::Real absolute_tolerance, + int max_iters, + int verbosity + ) const; + +}; + +#endif // WARPX_EFFECTIVEPOTENTIALES_H_ diff --git a/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.cpp b/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.cpp new file mode 100644 index 00000000000..0a5330b049d --- /dev/null +++ b/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.cpp @@ -0,0 +1,258 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald (TAE Technologies) + * + * License: BSD-3-Clause-LBNL + */ + +#include "EffectivePotentialES.H" +#include "Fluids/MultiFluidContainer_fwd.H" +#include "EmbeddedBoundary/Enabled.H" +#include "Fields.H" +#include "Particles/MultiParticleContainer_fwd.H" +#include "Utils/Parser/ParserUtils.H" +#include "WarpX.H" + +using namespace amrex; + +void EffectivePotentialES::InitData() { + auto & warpx = WarpX::GetInstance(); + m_poisson_boundary_handler->DefinePhiBCs(warpx.Geom(0)); +} + +void EffectivePotentialES::ComputeSpaceChargeField ( + ablastr::fields::MultiFabRegister& fields, + MultiParticleContainer& mpc, + [[maybe_unused]] MultiFluidContainer* mfl, + int max_level) +{ + WARPX_PROFILE("EffectivePotentialES::ComputeSpaceChargeField"); + + using ablastr::fields::MultiLevelScalarField; + using ablastr::fields::MultiLevelVectorField; + using warpx::fields::FieldType; + + // grab the simulation fields + const MultiLevelScalarField rho_fp = fields.get_mr_levels(FieldType::rho_fp, max_level); + const MultiLevelScalarField rho_cp = fields.get_mr_levels(FieldType::rho_cp, max_level); + const MultiLevelScalarField phi_fp = fields.get_mr_levels(FieldType::phi_fp, max_level); + const MultiLevelVectorField Efield_fp = fields.get_mr_levels_alldirs(FieldType::Efield_fp, max_level); + + mpc.DepositCharge(rho_fp, 0.0_rt); + if (mfl) { + const int lev = 0; + mfl->DepositCharge(fields, *rho_fp[lev], lev); + } + + // Apply filter, perform MPI exchange, interpolate across levels + const Vector > rho_buf(num_levels); + auto & warpx = WarpX::GetInstance(); + warpx.SyncRho( rho_fp, rho_cp, amrex::GetVecOfPtrs(rho_buf) ); + +#ifndef WARPX_DIM_RZ + for (int lev = 0; lev < num_levels; lev++) { + // Reflect density over PEC boundaries, if needed. + warpx.ApplyRhofieldBoundary(lev, rho_fp[lev], PatchType::fine); + } +#endif + + // set the boundary potentials appropriately + setPhiBC(phi_fp, warpx.gett_new(0)); + + // perform phi calculation + computePhi(rho_fp, phi_fp); + + // Compute the electric field. Note that if an EB is used the electric + // field will be calculated in the computePhi call. + const std::array beta = {0._rt}; + if (!EB::enabled()) { computeE( Efield_fp, phi_fp, beta ); } +} + +void EffectivePotentialES::computePhi ( + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi) const +{ + // Calculate the mass enhancement factor - see Appendix A of + // Barnes, Journal of Comp. Phys., 424 (2021), 109852. + // The "sigma" multifab stores the dressing of the Poisson equation. It + // is a cell-centered multifab. + auto const& ba = convert(rho[0]->boxArray(), IntVect(AMREX_D_DECL(0,0,0))); + MultiFab sigma(ba, rho[0]->DistributionMap(), 1, rho[0]->nGrowVect()); + ComputeSigma(sigma); + + // Use the AMREX MLMG solver + computePhi(rho, phi, sigma, self_fields_required_precision, + self_fields_absolute_tolerance, self_fields_max_iters, + self_fields_verbosity); +} + +void EffectivePotentialES::ComputeSigma (MultiFab& sigma) const +{ + // Reset sigma to 1 + sigma.setVal(1.0_rt); + + // Get the user set value for C_SI (defaults to 4) + amrex::Real C_SI = 4.0; + const ParmParse pp_warpx("warpx"); + utils::parser::queryWithParser(pp_warpx, "effective_potential_factor", C_SI); + + int const lev = 0; + + // sigma is a cell-centered array + amrex::GpuArray const cell_centered = {0, 0, 0}; + // The "coarsening is just 1 i.e. no coarsening" + amrex::GpuArray const coarsen = {1, 1, 1}; + + // GetChargeDensity returns a nodal multifab + // Below we set all the unused dimensions to have cell-centered values for + // rho since these values will be interpolated onto a cell-centered grid + // - if this is not done the Interp function returns nonsense values. +#if defined(WARPX_DIM_3D) + amrex::GpuArray const nodal = {1, 1, 1}; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::GpuArray const nodal = {1, 1, 0}; +#elif defined(WARPX_DIM_1D_Z) + amrex::GpuArray const nodal = {1, 0, 0}; +#endif + + auto& warpx = WarpX::GetInstance(); + auto& mypc = warpx.GetPartContainer(); + + // The effective potential dielectric function is given by + // \varepsilon_{SI} = \varepsilon * (1 + \sum_{i in species} C_{SI}*(w_pi * dt)^2/4) + // Note the use of the plasma frequency in rad/s (not Hz) and the factor of 1/4, + // these choices make it so that C_SI = 1 is the marginal stability threshold. + auto mult_factor = ( + C_SI * warpx.getdt(lev) * warpx.getdt(lev) / (4._rt * PhysConst::ep0) + ); + + // Loop over each species to calculate the Poisson equation dressing + for (auto const& pc : mypc) { + // grab the charge density for this species + auto rho = pc->GetChargeDensity(lev, false); + + // Handle the parallel transfer of guard cells and apply filtering + warpx.ApplyFilterandSumBoundaryRho(lev, lev, *rho, 0, rho->nComp()); + + // get multiplication factor for this species + auto const mult_factor_pc = mult_factor * pc->getCharge() / pc->getMass(); + + // update sigma +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(sigma, TilingIfNotGPU()); mfi.isValid(); ++mfi ) { + Array4 const& sigma_arr = sigma.array(mfi); + Array4 const& rho_arr = rho->const_array(mfi); + + // Loop over the cells and update the sigma field + amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int k){ + // Interpolate rho to cell-centered value + auto const rho_cc = ablastr::coarsen::sample::Interp( + rho_arr, nodal, cell_centered, coarsen, i, j, k, 0 + ); + // add species term to sigma: + // C_SI * w_p^2 * dt^2 / 4 = C_SI / 4 * q*rho/(m*eps0) * dt^2 + sigma_arr(i, j, k, 0) += mult_factor_pc * rho_cc; + }); + + } + } +} + + +void EffectivePotentialES::computePhi ( + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi, + amrex::MultiFab const& sigma, + amrex::Real required_precision, + amrex::Real absolute_tolerance, + int max_iters, + int verbosity +) const +{ + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + // create a vector to our fields, sorted by level + amrex::Vector sorted_rho; + amrex::Vector sorted_phi; + for (int lev = 0; lev < num_levels; ++lev) { + sorted_rho.emplace_back(rho[lev]); + sorted_phi.emplace_back(phi[lev]); + } + + std::optional post_phi_calculation; +#ifdef AMREX_USE_EB + // TODO: double check no overhead occurs on "m_eb_enabled == false" + std::optional > eb_farray_box_factory; +#else + std::optional > const eb_farray_box_factory; +#endif + auto & warpx = WarpX::GetInstance(); + if (EB::enabled()) + { + // EB: use AMReX to directly calculate the electric field since with EB's the + // simple finite difference scheme in WarpX::computeE sometimes fails + + // TODO: maybe make this a helper function or pass Efield_fp directly + amrex::Vector< + amrex::Array + > e_field; + for (int lev = 0; lev < num_levels; ++lev) { + e_field.push_back( +#if defined(WARPX_DIM_1D_Z) + amrex::Array{ + warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev) + } +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::Array{ + warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev) + } +#elif defined(WARPX_DIM_3D) + amrex::Array{ + warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, lev), + warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev) + } +#endif + ); + } + post_phi_calculation = EBCalcEfromPhiPerLevel(e_field); + +#ifdef AMREX_USE_EB + amrex::Vector< + amrex::EBFArrayBoxFactory const * + > factories; + for (int lev = 0; lev < num_levels; ++lev) { + factories.push_back(&warpx.fieldEBFactory(lev)); + } + eb_farray_box_factory = factories; +#endif + } + + ablastr::fields::computeEffectivePotentialPhi( + sorted_rho, + sorted_phi, + sigma, + required_precision, + absolute_tolerance, + max_iters, + verbosity, + warpx.Geom(), + warpx.DistributionMap(), + warpx.boxArray(), + WarpX::grid_type, + false, + EB::enabled(), + WarpX::do_single_precision_comms, + warpx.refRatio(), + post_phi_calculation, + *m_poisson_boundary_handler, + warpx.gett_new(0), + eb_farray_box_factory + ); +} diff --git a/Source/FieldSolver/ElectrostaticSolvers/Make.package b/Source/FieldSolver/ElectrostaticSolvers/Make.package index a1d2d78dbb0..673f1c8aa7a 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/Make.package +++ b/Source/FieldSolver/ElectrostaticSolvers/Make.package @@ -1,6 +1,7 @@ CEXE_sources += PoissonBoundaryHandler.cpp CEXE_sources += LabFrameExplicitES.cpp CEXE_sources += RelativisticExplicitES.cpp +CEXE_sources += EffectivePotentialES.cpp CEXE_sources += ElectrostaticSolver.cpp VPATH_LOCATIONS += $(WARPX_HOME)/Source/FieldSolver/ElectrostaticSolvers diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 14d189d0dd5..daecfac8bed 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -278,6 +278,10 @@ WarpX::PrintMainPICparameters () amrex::Print() << "Operation mode: | Electrostatic" << "\n"; amrex::Print() << " | - laboratory frame, electrostatic + magnetostatic" << "\n"; } + else if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameEffectivePotential){ + amrex::Print() << "Operation mode: | Electrostatic" << "\n"; + amrex::Print() << " | - laboratory frame, effective potential scheme" << "\n"; + } else{ amrex::Print() << "Operation mode: | Electromagnetic" << "\n"; } diff --git a/Source/Utils/WarpXAlgorithmSelection.H b/Source/Utils/WarpXAlgorithmSelection.H index 98d2430afc3..187be924666 100644 --- a/Source/Utils/WarpXAlgorithmSelection.H +++ b/Source/Utils/WarpXAlgorithmSelection.H @@ -62,6 +62,7 @@ AMREX_ENUM(ElectrostaticSolverAlgo, Relativistic, LabFrameElectroMagnetostatic, LabFrame, + LabFrameEffectivePotential, Default = None); AMREX_ENUM(PoissonSolverAlgo, diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 772131ea0e7..176a6f63ddf 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -20,6 +20,7 @@ #include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" #include "FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H" #include "FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H" +#include "FieldSolver/ElectrostaticSolvers/EffectivePotentialES.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" #include "FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H" @@ -332,6 +333,11 @@ WarpX::WarpX () { m_electrostatic_solver = std::make_unique(nlevs_max); } + // Initialize the effective potential electrostatic solver if required + else if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameEffectivePotential) + { + m_electrostatic_solver = std::make_unique(nlevs_max); + } else { m_electrostatic_solver = std::make_unique(nlevs_max); @@ -2369,6 +2375,7 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm int rho_ncomps = 0; if( (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame) || (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) || + (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameEffectivePotential) || (electromagnetic_solver_id == ElectromagneticSolverAlgo::HybridPIC) ) { rho_ncomps = ncomps; } @@ -2389,7 +2396,8 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm } if (electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrame || - electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) + electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic || + electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameEffectivePotential ) { const IntVect ngPhi = IntVect( AMREX_D_DECL(1,1,1) ); m_fields.alloc_init(FieldType::phi_fp, lev, amrex::convert(ba, phi_nodal_flag), dm, diff --git a/Source/ablastr/fields/EffectivePotentialPoissonSolver.H b/Source/ablastr/fields/EffectivePotentialPoissonSolver.H new file mode 100644 index 00000000000..c6b5d2c5bcc --- /dev/null +++ b/Source/ablastr/fields/EffectivePotentialPoissonSolver.H @@ -0,0 +1,274 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Roelof Groenewald (TAE Technologies) + * + * License: BSD-3-Clause-LBNL + */ +/* + * This file was copied and edited from PoissonSolver.H in the same directory. + */ +#ifndef ABLASTR_EFFECTIVE_POTENTIAL_POISSON_SOLVER_H +#define ABLASTR_EFFECTIVE_POTENTIAL_POISSON_SOLVER_H + +#include +#include +#include +#include +#include +#include +#include +#include "PoissonSolver.H" + +#if defined(WARPX_USE_FFT) && defined(WARPX_DIM_3D) +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#ifdef AMREX_USE_EB +# include +#endif + +#include +#include + + +namespace ablastr::fields { + +/** Compute the potential `phi` by solving the Poisson equation with a modifed dielectric function + * + * Uses `rho` as a source. This uses the AMReX solver. + * + * More specifically, this solves the equation + * \f[ + * \nabla \cdot \sigma \nabla \phi = - \rho/\epsilon_0 + * \f] + * + * \tparam T_PostPhiCalculationFunctor a calculation per level directly after phi was calculated + * \tparam T_BoundaryHandler handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler + * \tparam T_FArrayBoxFactory usually nothing or an amrex::EBFArrayBoxFactory (EB ONLY) + * \param[in] rho The charge density a given species + * \param[out] phi The potential to be computed by this function + * \param[in] sigma The matrix representing the mass operator used to lower the local plasma frequency + * \param[in] relative_tolerance The relative convergence threshold for the MLMG solver + * \param[in] absolute_tolerance The absolute convergence threshold for the MLMG solver + * \param[in] max_iters The maximum number of iterations allowed for the MLMG solver + * \param[in] verbosity The verbosity setting for the MLMG solver + * \param[in] geom the geometry per level (e.g., from AmrMesh) + * \param[in] dmap the distribution mapping per level (e.g., from AmrMesh) + * \param[in] grids the grids per level (e.g., from AmrMesh) + * \param[in] is_solver_igf_on_lev0 boolean to select the Poisson solver: 1 for FFT on level 0 & Multigrid on other levels, 0 for Multigrid on all levels + * \param[in] do_single_precision_comms perform communications in single precision + * \param[in] rel_ref_ratio mesh refinement ratio between levels (default: 1) + * \param[in] post_phi_calculation perform a calculation per level directly after phi was calculated; required for embedded boundaries (default: none) + * \param[in] boundary_handler a handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler + * \param[in] current_time the current time; required for embedded boundaries (default: none) + * \param[in] eb_farray_box_factory a factory for field data, @see amrex::EBFArrayBoxFactory; required for embedded boundaries (default: none) + */ +template< + typename T_PostPhiCalculationFunctor = std::nullopt_t, + typename T_BoundaryHandler = std::nullopt_t, + typename T_FArrayBoxFactory = void +> +void +computeEffectivePotentialPhi ( + ablastr::fields::MultiLevelScalarField const& rho, + ablastr::fields::MultiLevelScalarField const& phi, + amrex::MultiFab const & sigma, + amrex::Real relative_tolerance, + amrex::Real absolute_tolerance, + int max_iters, + int verbosity, + amrex::Vector const& geom, + amrex::Vector const& dmap, + amrex::Vector const& grids, + [[maybe_unused]] utils::enums::GridType grid_type, + bool is_solver_igf_on_lev0, + bool eb_enabled = false, + bool do_single_precision_comms = false, + std::optional > rel_ref_ratio = std::nullopt, + [[maybe_unused]] T_PostPhiCalculationFunctor post_phi_calculation = std::nullopt, + [[maybe_unused]] T_BoundaryHandler const boundary_handler = std::nullopt, + [[maybe_unused]] std::optional current_time = std::nullopt, // only used for EB + [[maybe_unused]] std::optional > eb_farray_box_factory = std::nullopt // only used for EB +) { + using namespace amrex::literals; + + ABLASTR_PROFILE("computeEffectivePotentialPhi"); + + if (!rel_ref_ratio.has_value()) { + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE(rho.size() == 1u, + "rel_ref_ratio must be set if mesh-refinement is used"); + rel_ref_ratio = amrex::Vector{{amrex::IntVect(AMREX_D_DECL(1, 1, 1))}}; + } + +#if !defined(AMREX_USE_EB) + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE(!eb_enabled, + "Embedded boundary solve requested but not compiled in"); +#endif + if (eb_enabled && std::is_same_v) { + throw std::runtime_error("EB requested by eb_farray_box_factory not provided!"); + } + + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, + "FFT solver cannot be used with effective potential Poisson solve"); + +#ifdef WARPX_DIM_RZ + constexpr bool is_rz = true; +#else + constexpr bool is_rz = false; +#endif + + auto const finest_level = static_cast(rho.size() - 1); + + // determine if rho is zero everywhere + const amrex::Real max_norm_b = getMaxNormRho( + amrex::GetVecOfConstPtrs(rho), finest_level, absolute_tolerance); + + const amrex::LPInfo info; + + for (int lev=0; lev<=finest_level; lev++) { + + // Use the Multigrid (MLMG) solver but first scale rho appropriately + using namespace ablastr::constant::SI; + rho[lev]->mult(-1._rt/ep0); + + std::unique_ptr linop; + // In the presence of EB or RZ the EB enabled linear solver is used + if (eb_enabled) + { +#if defined(AMREX_USE_EB) + auto linop_nodelap = std::make_unique(); + linop_nodelap->define( + amrex::Vector{geom[lev]}, + amrex::Vector{grids[lev]}, + amrex::Vector{dmap[lev]}, + info, + amrex::Vector{eb_farray_box_factory.value()[lev]} + ); + if constexpr (!std::is_same_v) { + // if the EB potential only depends on time, the potential can be passed + // as a float instead of a callable + if (boundary_handler.phi_EB_only_t) { + linop_nodelap->setEBDirichlet(boundary_handler.potential_eb_t(current_time.value())); + } else { + linop_nodelap->setEBDirichlet(boundary_handler.getPhiEB(current_time.value())); + } + } + linop_nodelap->setSigma(lev, sigma); + linop = std::move(linop_nodelap); +#endif + } + else if (is_rz) + { + auto linop_nodelap = std::make_unique(); + linop_nodelap->define( + amrex::Vector{geom[lev]}, + amrex::Vector{grids[lev]}, + amrex::Vector{dmap[lev]}, + info + ); + linop_nodelap->setRZ(true); + linop_nodelap->setSigma(lev, sigma); + linop = std::move(linop_nodelap); + } + else + { + auto linop_nodelap = std::make_unique(); + linop_nodelap->define( + amrex::Vector{geom[lev]}, + amrex::Vector{grids[lev]}, + amrex::Vector{dmap[lev]}, + info + ); + linop_nodelap->setSigma(lev, sigma); + linop = std::move(linop_nodelap); + } + + // Set domain boundary conditions + if constexpr (std::is_same_v) { + amrex::Array const lobc = {AMREX_D_DECL( + amrex::LinOpBCType::Dirichlet, + amrex::LinOpBCType::Dirichlet, + amrex::LinOpBCType::Dirichlet + )}; + amrex::Array const hibc = lobc; + linop->setDomainBC(lobc, hibc); + } else { + linop->setDomainBC(boundary_handler.lobc, boundary_handler.hibc); + } + + // Solve the Poisson equation + amrex::MLMG mlmg(*linop); // actual solver defined here + mlmg.setVerbose(verbosity); + mlmg.setMaxIter(max_iters); + mlmg.setAlwaysUseBNorm((max_norm_b > 0)); + + const int ng = int(grid_type == utils::enums::GridType::Collocated); // ghost cells + if (ng) { + // In this case, computeE needs to use ghost nodes data. So we + // ask MLMG to fill BC for us after it solves the problem. + mlmg.setFinalFillBC(true); + } + + // Solve Poisson equation at lev + mlmg.solve( {phi[lev]}, {rho[lev]}, + relative_tolerance, absolute_tolerance ); + + // needed for solving the levels by levels: + // - coarser level is initial guess for finer level + // - coarser level provides boundary values for finer level patch + // Interpolation from phi[lev] to phi[lev+1] + // (This provides both the boundary conditions and initial guess for phi[lev+1]) + if (lev < finest_level) { + const amrex::IntVect& refratio = rel_ref_ratio.value()[lev]; + const int ncomp = linop->getNComp(); + interpolatePhiBetweenLevels(phi[lev], + phi[lev+1], + geom[lev], + do_single_precision_comms, + refratio, + ncomp, + ng); + } + + // Run additional operations, such as calculation of the E field for embedded boundaries + if constexpr (!std::is_same::value) { + if (post_phi_calculation.has_value()) { + post_phi_calculation.value()(mlmg, lev); + } + } + rho[lev]->mult(-ep0); // Multiply rho by epsilon again + } // loop over lev(els) +} + +} // namespace ablastr::fields + +#endif // ABLASTR_EFFECTIVE_POTENTIAL_POISSON_SOLVER_H diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index d7eeecead1b..aa9288fe950 100644 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -178,12 +178,12 @@ inline void interpolatePhiBetweenLevels ( * \param[in] dmap the distribution mapping per level (e.g., from AmrMesh) * \param[in] grids the grids per level (e.g., from AmrMesh) * \param[in] grid_type Integer that corresponds to the type of grid used in the simulation (collocated, staggered, hybrid) - * \param[in] boundary_handler a handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler * \param[in] is_solver_igf_on_lev0 boolean to select the Poisson solver: 1 for FFT on level 0 & Multigrid on other levels, 0 for Multigrid on all levels * \param[in] eb_enabled solve with embedded boundaries * \param[in] do_single_precision_comms perform communications in single precision * \param[in] rel_ref_ratio mesh refinement ratio between levels (default: 1) * \param[in] post_phi_calculation perform a calculation per level directly after phi was calculated; required for embedded boundaries (default: none) + * \param[in] boundary_handler a handler for boundary conditions, for example @see ElectrostaticSolver::PoissonBoundaryHandler * \param[in] current_time the current time; required for embedded boundaries (default: none) * \param[in] eb_farray_box_factory a factory for field data, @see amrex::EBFArrayBoxFactory; required for embedded boundaries (default: none) */ @@ -210,7 +210,7 @@ computePhi ( bool do_single_precision_comms = false, std::optional > rel_ref_ratio = std::nullopt, [[maybe_unused]] T_PostPhiCalculationFunctor post_phi_calculation = std::nullopt, - [[maybe_unused]] T_BoundaryHandler const boundary_handler = std::nullopt, // only used for EB + [[maybe_unused]] T_BoundaryHandler const boundary_handler = std::nullopt, [[maybe_unused]] std::optional current_time = std::nullopt, // only used for EB [[maybe_unused]] std::optional > eb_farray_box_factory = std::nullopt // only used for EB ) diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 8a7d11d5b1d..3733b729004 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -99,7 +99,7 @@ macro(find_amrex) set(AMReX_PROBINIT OFF CACHE INTERNAL "") set(AMReX_TINY_PROFILE ON CACHE BOOL "") set(AMReX_LINEAR_SOLVERS_EM ON CACHE INTERNAL "") - set(AMReX_LINEAR_SOLVERS_INCFLO OFF CACHE INTERNAL "") + set(AMReX_LINEAR_SOLVERS_INCFLO ON CACHE INTERNAL "") if(WarpX_ASCENT OR WarpX_SENSEI) set(AMReX_GPU_RDC ON CACHE BOOL "") From 540268f2c05d46b90ac8f33718efa63b47687cae Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Wed, 11 Dec 2024 13:55:18 -0800 Subject: [PATCH 116/278] Add `verbosity` flag for diagnostic output print statements (#5503) This PR passes the `warpx.verbose` value to diagnostics to set whether a message is output when the diagnostic is active. Specifically, this helps with the time-averaged diagnostic where currently a message is print at every step that averaging is done (commonly every step). --------- Signed-off-by: roelof-groenewald --- Source/Diagnostics/BTDiagnostics.cpp | 2 +- .../BoundaryScrapingDiagnostics.cpp | 2 +- Source/Diagnostics/Diagnostics.H | 2 ++ Source/Diagnostics/Diagnostics.cpp | 3 +++ Source/Diagnostics/FlushFormats/FlushFormat.H | 1 + .../FlushFormats/FlushFormatAscent.H | 3 ++- .../FlushFormats/FlushFormatAscent.cpp | 7 ++++-- .../FlushFormats/FlushFormatCatalyst.H | 1 + .../FlushFormats/FlushFormatCatalyst.cpp | 1 + .../FlushFormats/FlushFormatCheckpoint.H | 1 + .../FlushFormats/FlushFormatCheckpoint.cpp | 7 ++++-- .../FlushFormats/FlushFormatOpenPMD.H | 3 ++- .../FlushFormats/FlushFormatOpenPMD.cpp | 21 +++++++++------- .../FlushFormats/FlushFormatPlotfile.H | 1 + .../FlushFormats/FlushFormatPlotfile.cpp | 25 +++++++++++-------- .../FlushFormats/FlushFormatSensei.H | 1 + .../FlushFormats/FlushFormatSensei.cpp | 8 +++--- Source/Diagnostics/FullDiagnostics.cpp | 8 +++--- 18 files changed, 63 insertions(+), 34 deletions(-) diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 4939e2fb207..09167452c1a 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -1091,7 +1091,7 @@ BTDiagnostics::Flush (int i_buffer, bool force_flush) m_varnames, m_mf_output.at(i_buffer), m_geom_output.at(i_buffer), warpx.getistep(), labtime, m_output_species.at(i_buffer), nlev_output, file_name, m_file_min_digits, - m_plot_raw_fields, m_plot_raw_fields_guards, + m_plot_raw_fields, m_plot_raw_fields_guards, m_verbose, use_pinned_pc, isBTD, i_buffer, m_buffer_flush_counter.at(i_buffer), m_max_buffer_multifabs.at(i_buffer), m_geom_snapshot.at(i_buffer).at(0), isLastBTDFlush); diff --git a/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp b/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp index 8df58b6fb28..bcccda48c18 100644 --- a/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp +++ b/Source/Diagnostics/BoundaryScrapingDiagnostics.cpp @@ -153,7 +153,7 @@ BoundaryScrapingDiagnostics::Flush (int i_buffer, bool /* force_flush */) warpx.gett_new(0), m_output_species.at(i_buffer), nlev_output, file_prefix, - m_file_min_digits, false, false, use_pinned_pc, isBTD, + m_file_min_digits, false, false, m_verbose, use_pinned_pc, isBTD, warpx.getistep(0), bufferID, numBTDBuffers, geom, isLastBTD); diff --git a/Source/Diagnostics/Diagnostics.H b/Source/Diagnostics/Diagnostics.H index d0c70e76c1f..a078bab3597 100644 --- a/Source/Diagnostics/Diagnostics.H +++ b/Source/Diagnostics/Diagnostics.H @@ -190,6 +190,8 @@ public: protected: /** Read Parameters of the base Diagnostics class */ bool BaseReadParameters (); + /** Whether to output a message when diagnostics are output */ + int m_verbose = 2; /** Initialize member variables of the base Diagnostics class. */ void InitBaseData (); /** Initialize m_mf_output vectors and data required to construct the buffers diff --git a/Source/Diagnostics/Diagnostics.cpp b/Source/Diagnostics/Diagnostics.cpp index a60aea40242..2b3b6d7df61 100644 --- a/Source/Diagnostics/Diagnostics.cpp +++ b/Source/Diagnostics/Diagnostics.cpp @@ -62,6 +62,9 @@ Diagnostics::BaseReadParameters () std::string dims; pp_geometry.get("dims", dims); + const amrex::ParmParse pp_warpx("warpx"); + pp_warpx.query("verbose", m_verbose); + // Query list of grid fields to write to output const bool varnames_specified = pp_diag_name.queryarr("fields_to_plot", m_varnames_fields); if (!varnames_specified){ diff --git a/Source/Diagnostics/FlushFormats/FlushFormat.H b/Source/Diagnostics/FlushFormats/FlushFormat.H index be1322bd61b..f0a83f7a24b 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormat.H +++ b/Source/Diagnostics/FlushFormats/FlushFormat.H @@ -19,6 +19,7 @@ public: std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose = 2, bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, int bufferID = 1, int numBuffers = 1, diff --git a/Source/Diagnostics/FlushFormats/FlushFormatAscent.H b/Source/Diagnostics/FlushFormats/FlushFormatAscent.H index a5a10d77bdc..86ad4b09cfb 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatAscent.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatAscent.H @@ -37,11 +37,12 @@ public: std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose = 2, bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, int bufferID = 1, int numBuffers = 1, const amrex::Geometry& full_BTD_snapshot = amrex::Geometry(), - bool isLastBTDFlush = false ) const override; + bool isLastBTDFlush = false) const override; FlushFormatAscent () = default; ~FlushFormatAscent() override = default; diff --git a/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp b/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp index 36f0ef05faa..5a405568aa7 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatAscent.cpp @@ -18,6 +18,7 @@ FlushFormatAscent::WriteToFile ( const amrex::Vector& particle_diags, int nlev, const std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose, const bool /*use_pinned_pc*/, bool isBTD, int /*snapshotID*/, int /*bufferID*/, int /*numBuffers*/, const amrex::Geometry& /*full_BTD_snapshot*/, @@ -32,7 +33,9 @@ FlushFormatAscent::WriteToFile ( "In-situ visualization is not currently supported for back-transformed diagnostics."); const std::string& filename = amrex::Concatenate(prefix, iteration[0], file_min_digits); - amrex::Print() << Utils::TextMsg::Info("Writing Ascent file " + filename); + if (verbose > 0) { + amrex::Print() << Utils::TextMsg::Info("Writing Ascent file " + filename); + } // wrap mesh data WARPX_PROFILE_VAR("FlushFormatAscent::WriteToFile::MultiLevelToBlueprint", prof_ascent_mesh_blueprint); @@ -67,7 +70,7 @@ FlushFormatAscent::WriteToFile ( #else amrex::ignore_unused(varnames, mf, geom, iteration, time, - particle_diags, nlev, file_min_digits, isBTD); + particle_diags, nlev, file_min_digits, verbose, isBTD); #endif // AMREX_USE_ASCENT amrex::ignore_unused(prefix, plot_raw_fields, plot_raw_fields_guards); } diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.H b/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.H index 6974bf731a6..c5f3b9148c1 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.H @@ -43,6 +43,7 @@ public: std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose = 2, bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, int bufferID = 1, int numBuffers = 1, diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.cpp b/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.cpp index 425c13de6a4..3e542f9f871 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.cpp @@ -127,6 +127,7 @@ FlushFormatCatalyst::WriteToFile ( const amrex::Vector& particle_diags, int nlev, const std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int /*verbose*/, bool /*use_pinned_pc*/, bool isBTD, int /*snapshotID*/, int /*bufferID*/, int /*numBuffers*/, const amrex::Geometry& /*full_BTD_snapshot*/, diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H index 5c26ac97f61..cb0a6c4b6c7 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H @@ -24,6 +24,7 @@ class FlushFormatCheckpoint final : public FlushFormatPlotfile std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose = 2, bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, int bufferID = 1, int numBuffers = 1, diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp index 4d721dd6abe..788e040b0ee 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp @@ -39,6 +39,7 @@ FlushFormatCheckpoint::WriteToFile ( const std::string prefix, int file_min_digits, bool /*plot_raw_fields*/, bool /*plot_raw_fields_guards*/, + int verbose, const bool /*use_pinned_pc*/, bool /*isBTD*/, int /*snapshotID*/, int /*bufferID*/, int /*numBuffers*/, @@ -56,8 +57,10 @@ FlushFormatCheckpoint::WriteToFile ( const std::string& checkpointname = amrex::Concatenate(prefix, iteration[0], file_min_digits); - amrex::Print() << Utils::TextMsg::Info( - "Writing checkpoint " + checkpointname); + if (verbose > 0) { + amrex::Print() << Utils::TextMsg::Info( + "Writing checkpoint " + checkpointname); + } // const int nlevels = finestLevel()+1; amrex::PreBuildDirectorHierarchy(checkpointname, default_level_prefix, nlev, true); diff --git a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.H b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.H index 141760ac2a3..5666d85bf3a 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.H @@ -36,11 +36,12 @@ public: std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose, bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, int bufferID = 1, int numBuffers = 1, const amrex::Geometry& full_BTD_snapshot = amrex::Geometry(), - bool isLastBTDFlush = false ) const override; + bool isLastBTDFlush = false) const override; ~FlushFormatOpenPMD () override = default; diff --git a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp index e0c8c4ef2d6..aeb26656b46 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp @@ -123,6 +123,7 @@ FlushFormatOpenPMD::WriteToFile ( const amrex::Vector& particle_diags, int output_levels, const std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose, const bool use_pinned_pc, bool isBTD, int snapshotID, int bufferID, int numBuffers, const amrex::Geometry& full_BTD_snapshot, @@ -130,16 +131,18 @@ FlushFormatOpenPMD::WriteToFile ( { WARPX_PROFILE("FlushFormatOpenPMD::WriteToFile()"); const std::string& filename = amrex::Concatenate(prefix, iteration[0], file_min_digits); - if (!isBTD) - { - amrex::Print() << Utils::TextMsg::Info("Writing openPMD file " + filename); - } else - { - amrex::Print() << Utils::TextMsg::Info("Writing buffer " + std::to_string(bufferID+1) + " of " + std::to_string(numBuffers) - + " to snapshot " + std::to_string(snapshotID) + " to openPMD BTD " + prefix); - if (isLastBTDFlush) + if (verbose > 0) { + if (!isBTD) + { + amrex::Print() << Utils::TextMsg::Info("Writing openPMD file " + filename); + } else { - amrex::Print() << Utils::TextMsg::Info("Finished writing snapshot " + std::to_string(snapshotID) + " in openPMD BTD " + prefix); + amrex::Print() << Utils::TextMsg::Info("Writing buffer " + std::to_string(bufferID+1) + " of " + std::to_string(numBuffers) + + " to snapshot " + std::to_string(snapshotID) + " to openPMD BTD " + prefix); + if (isLastBTDFlush) + { + amrex::Print() << Utils::TextMsg::Info("Finished writing snapshot " + std::to_string(snapshotID) + " in openPMD BTD " + prefix); + } } } diff --git a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H index c62056b8907..18648c07e69 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.H @@ -31,6 +31,7 @@ public: std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose = 2, bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, int bufferID = 1, int numBuffers = 1, diff --git a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp index 0f05496e4c0..879a5986434 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp @@ -66,6 +66,7 @@ FlushFormatPlotfile::WriteToFile ( const amrex::Vector& particle_diags, int nlev, const std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose, const bool /*use_pinned_pc*/, bool isBTD, int snapshotID, int bufferID, int numBuffers, const amrex::Geometry& /*full_BTD_snapshot*/, @@ -74,17 +75,19 @@ FlushFormatPlotfile::WriteToFile ( WARPX_PROFILE("FlushFormatPlotfile::WriteToFile()"); auto & warpx = WarpX::GetInstance(); const std::string& filename = amrex::Concatenate(prefix, iteration[0], file_min_digits); - if (!isBTD) - { - amrex::Print() << Utils::TextMsg::Info("Writing plotfile " + filename); - } else - { - amrex::Print() << Utils::TextMsg::Info("Writing buffer " + std::to_string(bufferID+1) + " of " + std::to_string(numBuffers) - + " to snapshot " + std::to_string(snapshotID) + " in plotfile BTD " + prefix ); - if (isLastBTDFlush) - { - amrex::Print() << Utils::TextMsg::Info("Finished writing snapshot " + std::to_string(snapshotID) + " in plotfile BTD " + filename); - } + if (verbose > 0) { + if (!isBTD) + { + amrex::Print() << Utils::TextMsg::Info("Writing plotfile " + filename); + } else + { + amrex::Print() << Utils::TextMsg::Info("Writing buffer " + std::to_string(bufferID+1) + " of " + std::to_string(numBuffers) + + " to snapshot " + std::to_string(snapshotID) + " in plotfile BTD " + prefix ); + if (isLastBTDFlush) + { + amrex::Print() << Utils::TextMsg::Info("Finished writing snapshot " + std::to_string(snapshotID) + " in plotfile BTD " + filename); + } + } } Vector rfs; diff --git a/Source/Diagnostics/FlushFormats/FlushFormatSensei.H b/Source/Diagnostics/FlushFormats/FlushFormatSensei.H index 45ea40077e4..87ed00e539e 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatSensei.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatSensei.H @@ -57,6 +57,7 @@ public: std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, + int verbose = 2, bool use_pinned_pc = false, bool isBTD = false, int snapshotID = -1, int bufferID = 1, int numBuffers = 1, diff --git a/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp b/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp index 468ed81ce18..b96c6d76f91 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatSensei.cpp @@ -51,7 +51,7 @@ FlushFormatSensei::WriteToFile ( const amrex::Vector& particle_diags, int nlev, const std::string prefix, int file_min_digits, bool plot_raw_fields, bool plot_raw_fields_guards, - const bool use_pinned_pc, + int verbose, const bool use_pinned_pc, bool isBTD, int /*snapshotID*/, int /*bufferID*/, int /*numBuffers*/, const amrex::Geometry& /*full_BTD_snapshot*/, bool /*isLastBTDFlush*/) const { @@ -63,7 +63,7 @@ FlushFormatSensei::WriteToFile ( #ifndef AMREX_USE_SENSEI_INSITU amrex::ignore_unused(varnames, mf, iteration, time, particle_diags, - isBTD); + verbose, isBTD); #else WARPX_ALWAYS_ASSERT_WITH_MESSAGE( !isBTD, @@ -71,7 +71,9 @@ FlushFormatSensei::WriteToFile ( WARPX_PROFILE("FlushFormatSensei::WriteToFile()"); const std::string& filename = amrex::Concatenate(prefix, iteration[0], file_min_digits); - amrex::Print() << Utils::TextMsg::Info("Writing Sensei file " + filename); + if (verbose > 0) { + amrex::Print() << Utils::TextMsg::Info("Writing Sensei file " + filename); + } amrex::Vector *mf_ptr = const_cast*>(&mf); diff --git a/Source/Diagnostics/FullDiagnostics.cpp b/Source/Diagnostics/FullDiagnostics.cpp index 7a8f376cd21..946178fd1b5 100644 --- a/Source/Diagnostics/FullDiagnostics.cpp +++ b/Source/Diagnostics/FullDiagnostics.cpp @@ -261,7 +261,8 @@ FullDiagnostics::Flush ( int i_buffer, bool /* force_flush */ ) m_varnames, m_sum_mf_output.at(i_buffer), m_geom_output.at(i_buffer), warpx.getistep(), warpx.gett_new(0), m_output_species.at(i_buffer), nlev_output, m_file_prefix, - m_file_min_digits, m_plot_raw_fields, m_plot_raw_fields_guards); + m_file_min_digits, m_plot_raw_fields, m_plot_raw_fields_guards, + m_verbose); // Reset the values in the dynamic start time-averaged diagnostics after flush if (m_time_average_mode == TimeAverageType::Dynamic) { @@ -281,7 +282,8 @@ FullDiagnostics::Flush ( int i_buffer, bool /* force_flush */ ) m_varnames, m_mf_output.at(i_buffer), m_geom_output.at(i_buffer), warpx.getistep(), warpx.gett_new(0), m_output_species.at(i_buffer), nlev_output, m_file_prefix, - m_file_min_digits, m_plot_raw_fields, m_plot_raw_fields_guards); + m_file_min_digits, m_plot_raw_fields, m_plot_raw_fields_guards, + m_verbose); } FlushRaw(); @@ -340,7 +342,7 @@ FullDiagnostics::DoComputeAndPack (int step, bool force_flush) } } // Print information on when time-averaging is active - if (in_averaging_period) { + if ((m_verbose > 1) && in_averaging_period) { if (step == m_average_start_step) { amrex::Print() << Utils::TextMsg::Info( "Begin time averaging for " + m_diag_name + " and output at step " From f68415ae71dedbdd0758e5afbc66f355ae30eddb Mon Sep 17 00:00:00 2001 From: Olga Shapoval <30510597+oshapoval@users.noreply.github.com> Date: Wed, 11 Dec 2024 15:57:53 -0800 Subject: [PATCH 117/278] Added abort message if 1D PSATD is used (#5500) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added an abort message for cases where 1D PSATD is used, as it’s not implemented for this geometry. --- Source/WarpX.cpp | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 176a6f63ddf..965235e1078 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -1086,7 +1086,10 @@ WarpX::ReadParameters () WARPX_ALWAYS_ASSERT_WITH_MESSAGE( electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD, "algo.maxwell_solver = psatd is not supported because WarpX was built without spectral solvers"); #endif - +#if defined(WARPX_DIM_1D_Z) && defined(WARPX_USE_FFT) + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD, + "algo.maxwell_solver = psatd is not available for 1D geometry"); +#endif #ifdef WARPX_DIM_RZ WARPX_ALWAYS_ASSERT_WITH_MESSAGE(Geom(0).isPeriodic(0) == 0, "The problem must not be periodic in the radial direction"); From 5bc47901c512406493ed9eaf56f9be0a81feec62 Mon Sep 17 00:00:00 2001 From: Olga Shapoval <30510597+oshapoval@users.noreply.github.com> Date: Thu, 12 Dec 2024 13:51:21 -0800 Subject: [PATCH 118/278] Added AnalyticFluxDistribution class (#5422) Added an AnalyticFluxDistribution class with a parsed `flux_expression`. Depends on https://github.com/picmi-standard/picmi/pull/121 --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: David Grote --- Docs/requirements.txt | 2 +- Docs/source/usage/python.rst | 4 + Python/pywarpx/picmi.py | 93 +++++++++++++++---- Python/setup.py | 2 +- .../karolina-it4i/install_dependencies.sh | 2 +- requirements.txt | 2 +- 6 files changed, 84 insertions(+), 21 deletions(-) diff --git a/Docs/requirements.txt b/Docs/requirements.txt index 7581638551e..14fafe406fb 100644 --- a/Docs/requirements.txt +++ b/Docs/requirements.txt @@ -13,7 +13,7 @@ openpmd-viewer # for checksumAPI # PICMI API docs # note: keep in sync with version in ../requirements.txt -picmistandard==0.31.0 +picmistandard==0.33.0 # for development against an unreleased PICMI version, use: # picmistandard @ git+https://github.com/picmi-standard/picmi.git#subdirectory=PICMI_Python diff --git a/Docs/source/usage/python.rst b/Docs/source/usage/python.rst index 8b40684feb9..1af884b40e7 100644 --- a/Docs/source/usage/python.rst +++ b/Docs/source/usage/python.rst @@ -146,6 +146,10 @@ Particle distributions can be used for to initialize particles in a particle spe .. autoclass:: pywarpx.picmi.AnalyticDistribution +.. autoclass:: pywarpx.picmi.UniformFluxDistribution + +.. autoclass:: pywarpx.picmi.AnalyticFluxDistribution + .. autoclass:: pywarpx.picmi.ParticleListDistribution Particle layouts determine how to microscopically place macro particles in a grid cell. diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index de3b26955b7..d464c44726f 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -688,14 +688,34 @@ def setup_parse_momentum_functions( ) -class UniformFluxDistribution( - picmistandard.PICMI_UniformFluxDistribution, DensityDistributionBase +class UniformDistribution( + picmistandard.PICMI_UniformDistribution, DensityDistributionBase ): + def distribution_initialize_inputs( + self, species_number, layout, species, density_scale, source_name + ): + self.set_mangle_dict() + self.set_species_attributes(species, layout, source_name) + + # --- Only constant density is supported by this class + species.add_new_group_attr(source_name, "profile", "constant") + species.add_new_group_attr(source_name, "density", self.density) + if density_scale is not None: + species.add_new_group_attr(source_name, "density", density_scale) + + +class FluxDistributionBase(object): + """This is a base class for both uniform and analytic flux distributions.""" + def init(self, kw): self.inject_from_embedded_boundary = kw.pop( "warpx_inject_from_embedded_boundary", False ) + def initialize_flux_profile_func(self, species, density_scale, source_name): + """Initialize the flux profile and flux function.""" + pass + def distribution_initialize_inputs( self, species_number, layout, species, density_scale, source_name ): @@ -703,10 +723,7 @@ def distribution_initialize_inputs( self.set_mangle_dict() self.set_species_attributes(species, layout, source_name) - species.add_new_group_attr(source_name, "flux_profile", "constant") - species.add_new_group_attr(source_name, "flux", self.flux) - if density_scale is not None: - species.add_new_group_attr(source_name, "flux", density_scale) + self.initialize_flux_profile_func(species, density_scale, source_name) if not self.inject_from_embedded_boundary: species.add_new_group_attr( @@ -737,20 +754,62 @@ def distribution_initialize_inputs( ) -class UniformDistribution( - picmistandard.PICMI_UniformDistribution, DensityDistributionBase +class AnalyticFluxDistribution( + picmistandard.PICMI_AnalyticFluxDistribution, + FluxDistributionBase, + DensityDistributionBase, ): - def distribution_initialize_inputs( - self, species_number, layout, species, density_scale, source_name - ): - self.set_mangle_dict() - self.set_species_attributes(species, layout, source_name) + """ + Parameters + ---------- - # --- Only constant density is supported by this class - species.add_new_group_attr(source_name, "profile", "constant") - species.add_new_group_attr(source_name, "density", self.density) + warpx_inject_from_embedded_boundary: bool + When true, the flux is injected from the embedded boundaries instead + of a plane. + """ + + def init(self, kw): + FluxDistributionBase.init(self, kw) + + def initialize_flux_profile_func(self, species, density_scale, source_name): + species.add_new_group_attr(source_name, "flux_profile", "parse_flux_function") if density_scale is not None: - species.add_new_group_attr(source_name, "density", density_scale) + species.add_new_group_attr(source_name, "flux", density_scale) + expression = pywarpx.my_constants.mangle_expression(self.flux, self.mangle_dict) + if density_scale is None: + species.add_new_group_attr( + source_name, "flux_function(x,y,z,t)", expression + ) + else: + species.add_new_group_attr( + source_name, + "flux_function(x,y,z,t)", + "{}*({})".format(density_scale, expression), + ) + + +class UniformFluxDistribution( + picmistandard.PICMI_UniformFluxDistribution, + FluxDistributionBase, + DensityDistributionBase, +): + """ + Parameters + ---------- + + warpx_inject_from_embedded_boundary: bool + When true, the flux is injected from the embedded boundaries instead + of a plane. + """ + + def init(self, kw): + FluxDistributionBase.init(self, kw) + + def initialize_flux_profile_func(self, species, density_scale, source_name): + species.add_new_group_attr(source_name, "flux_profile", "constant") + species.add_new_group_attr(source_name, "flux", self.flux) + if density_scale is not None: + species.add_new_group_attr(source_name, "flux", density_scale) class AnalyticDistribution( diff --git a/Python/setup.py b/Python/setup.py index 8080b62acf4..fa38e14e7ce 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -70,7 +70,7 @@ package_dir={"pywarpx": "pywarpx"}, description="""Wrapper of WarpX""", package_data=package_data, - install_requires=["numpy", "picmistandard==0.31.0", "periodictable"], + install_requires=["numpy", "picmistandard==0.33.0", "periodictable"], python_requires=">=3.8", zip_safe=False, ) diff --git a/Tools/machines/karolina-it4i/install_dependencies.sh b/Tools/machines/karolina-it4i/install_dependencies.sh index 9cc4f1ee144..33d8462b55f 100755 --- a/Tools/machines/karolina-it4i/install_dependencies.sh +++ b/Tools/machines/karolina-it4i/install_dependencies.sh @@ -53,7 +53,7 @@ python -m pip install --user --upgrade matplotlib #python -m pip install --user --upgrade yt # install or update WarpX dependencies -python -m pip install --user --upgrade picmistandard==0.31.0 +python -m pip install --user --upgrade picmistandard==0.33.0 python -m pip install --user --upgrade lasy # optional: for optimas (based on libEnsemble & ax->botorch->gpytorch->pytorch) diff --git a/requirements.txt b/requirements.txt index 2c8b749abe0..e44273328de 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ periodictable~=1.5 # PICMI # note: don't forget to update the version in Docs/requirements.txt, too -picmistandard==0.31.0 +picmistandard==0.33.0 # for development against an unreleased PICMI version, use: #picmistandard @ git+https://github.com/picmi-standard/picmi.git#subdirectory=PICMI_Python From 7299895094cba9bd2170f7b52f985526255212ec Mon Sep 17 00:00:00 2001 From: David Grote Date: Fri, 13 Dec 2024 02:57:03 -0800 Subject: [PATCH 119/278] Replace quartz with dane (#5507) The Quartz system at LLNL was removed and replaced by Dane. This PR updates the scripts and install instructions for the new machine. --- Docs/source/install/hpc.rst | 2 +- .../install/hpc/{quartz.rst => dane.rst} | 78 +++++++++---------- .../quartz.sbatch => dane-llnl/dane.sbatch} | 11 ++- .../dane_warpx.profile.example} | 30 +++---- .../install_dependencies.sh | 46 +++++------ 5 files changed, 82 insertions(+), 85 deletions(-) rename Docs/source/install/hpc/{quartz.rst => dane.rst} (52%) rename Tools/machines/{quartz-llnl/quartz.sbatch => dane-llnl/dane.sbatch} (78%) rename Tools/machines/{quartz-llnl/quartz_warpx.profile.example => dane-llnl/dane_warpx.profile.example} (62%) rename Tools/machines/{quartz-llnl => dane-llnl}/install_dependencies.sh (58%) diff --git a/Docs/source/install/hpc.rst b/Docs/source/install/hpc.rst index 35884050a59..61e60359e59 100644 --- a/Docs/source/install/hpc.rst +++ b/Docs/source/install/hpc.rst @@ -50,7 +50,7 @@ This section documents quick-start guides for a selection of supercomputers that hpc/perlmutter hpc/pitzer hpc/polaris - hpc/quartz + hpc/dane hpc/summit hpc/taurus hpc/tioga diff --git a/Docs/source/install/hpc/quartz.rst b/Docs/source/install/hpc/dane.rst similarity index 52% rename from Docs/source/install/hpc/quartz.rst rename to Docs/source/install/hpc/dane.rst index a49327e8613..e9af32130f5 100644 --- a/Docs/source/install/hpc/quartz.rst +++ b/Docs/source/install/hpc/dane.rst @@ -1,9 +1,9 @@ -.. _building-quartz: +.. _building-dane: -Quartz (LLNL) +Dane (LLNL) ============= -The `Quartz Intel CPU cluster `_ is located at LLNL. +The `Dane Intel CPU cluster `_ is located at LLNL. Introduction @@ -11,9 +11,7 @@ Introduction If you are new to this system, **please see the following resources**: -* `LLNL user account `__ (login required) -* `Quartz user guide `_ -* Batch system: `Slurm `_ +* `LLNL user account `__ (`documentation `__, login required) * `Production directories `_: @@ -21,7 +19,7 @@ If you are new to this system, **please see the following resources**: * Note that the ``$HOME`` directory and the ``/usr/workspace/$(whoami)`` space are NFS mounted and *not* suitable for production quality data generation. -.. _building-quartz-preparation: +.. _building-dane-preparation: Preparation ----------- @@ -32,23 +30,23 @@ Use the following commands to download the WarpX source code: git clone https://github.com/ECP-WarpX/WarpX.git $HOME/src/warpx -We use system software modules, add environment hints and further dependencies via the file ``$HOME/quartz_warpx.profile``. +We use system software modules, add environment hints and further dependencies via the file ``$HOME/dane_warpx.profile``. Create it now: .. code-block:: bash - cp $HOME/src/warpx/Tools/machines/quartz-llnl/quartz_warpx.profile.example $HOME/quartz_warpx.profile + cp $HOME/src/warpx/Tools/machines/dane-llnl/dane_warpx.profile.example $HOME/dane_warpx.profile .. dropdown:: Script Details :color: light :icon: info :animate: fade-in-slide-down - .. literalinclude:: ../../../../Tools/machines/quartz-llnl/quartz_warpx.profile.example + .. literalinclude:: ../../../../Tools/machines/dane-llnl/dane_warpx.profile.example :language: bash Edit the 2nd line of this script, which sets the ``export proj=""`` variable. -For example, if you are member of the project ``tps``, then run ``vi $HOME/quartz_warpx.profile``. +For example, if you are member of the project ``tps``, then run ``vi $HOME/dane_warpx.profile``. Enter the edit mode by typing ``i`` and edit line 2 to read: .. code-block:: bash @@ -59,29 +57,29 @@ Exit the ``vi`` editor with ``Esc`` and then type ``:wq`` (write & quit). .. important:: - Now, and as the first step on future logins to Quartz, activate these environment settings: + Now, and as the first step on future logins to Dane, activate these environment settings: .. code-block:: bash - source $HOME/quartz_warpx.profile + source $HOME/dane_warpx.profile -Finally, since Quartz does not yet provide software modules for some of our dependencies, install them once: +Finally, since Dane does not yet provide software modules for some of our dependencies, install them once: .. code-block:: bash - bash $HOME/src/warpx/Tools/machines/quartz-llnl/install_dependencies.sh - source /usr/workspace/${USER}/quartz/venvs/warpx-quartz/bin/activate + bash $HOME/src/warpx/Tools/machines/dane-llnl/install_dependencies.sh + source /usr/workspace/${USER}/dane/venvs/warpx-dane/bin/activate .. dropdown:: Script Details :color: light :icon: info :animate: fade-in-slide-down - .. literalinclude:: ../../../../Tools/machines/quartz-llnl/install_dependencies.sh + .. literalinclude:: ../../../../Tools/machines/dane-llnl/install_dependencies.sh :language: bash -.. _building-quartz-compilation: +.. _building-dane-compilation: Compilation ----------- @@ -91,27 +89,27 @@ Use the following :ref:`cmake commands ` to compile the applicat .. code-block:: bash cd $HOME/src/warpx - rm -rf build_quartz + rm -rf build_dane - cmake -S . -B build_quartz -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" - cmake --build build_quartz -j 6 + cmake -S . -B build_dane -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" + cmake --build build_dane -j 6 -The WarpX application executables are now in ``$HOME/src/warpx/build_quartz/bin/``. +The WarpX application executables are now in ``$HOME/src/warpx/build_dane/bin/``. Additionally, the following commands will install WarpX as a Python module: .. code-block:: bash - rm -rf build_quartz_py + rm -rf build_dane_py - cmake -S . -B build_quartz_py -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" - cmake --build build_quartz_py -j 6 --target pip_install + cmake -S . -B build_dane_py -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_APP=OFF -DWarpX_PYTHON=ON -DWarpX_DIMS="1;2;RZ;3" + cmake --build build_dane_py -j 6 --target pip_install -Now, you can :ref:`submit Quartz compute jobs ` for WarpX :ref:`Python (PICMI) scripts ` (:ref:`example scripts `). -Or, you can use the WarpX executables to submit Quartz jobs (:ref:`example inputs `). -For executables, you can reference their location in your :ref:`job script ` or copy them to a location in ``$PROJWORK/$proj/``. +Now, you can :ref:`submit Dane compute jobs ` for WarpX :ref:`Python (PICMI) scripts ` (:ref:`example scripts `). +Or, you can use the WarpX executables to submit Dane jobs (:ref:`example inputs `). +For executables, you can reference their location in your :ref:`job script ` or copy them to a location in ``$PROJWORK/$proj/``. -.. _building-quartz-update: +.. _building-dane-update: Update WarpX & Dependencies --------------------------- @@ -135,34 +133,34 @@ If you already installed WarpX in the past and want to update it, start by getti And, if needed, -- :ref:`update the quartz_warpx.profile file `, +- :ref:`update the dane_warpx.profile file `, - log out and into the system, activate the now updated environment profile as usual, -- :ref:`execute the dependency install scripts `. +- :ref:`execute the dependency install scripts `. -As a last step, clean the build directory ``rm -rf $HOME/src/warpx/build_quartz`` and rebuild WarpX. +As a last step, clean the build directory ``rm -rf $HOME/src/warpx/build_dane`` and rebuild WarpX. -.. _running-cpp-quartz: +.. _running-cpp-dane: Running ------- -.. _running-cpp-quartz-CPUs: +.. _running-cpp-dane-CPUs: -Intel Xeon E5-2695 v4 CPUs +Intel Sapphire Rapids CPUs ^^^^^^^^^^^^^^^^^^^^^^^^^^ -The batch script below can be used to run a WarpX simulation on 2 nodes on the supercomputer Quartz at LLNL. +The batch script below can be used to run a WarpX simulation on 2 nodes on the supercomputer Dane at LLNL. Replace descriptions between chevrons ``<>`` by relevant values, for instance ```` could be ``plasma_mirror_inputs``. -.. literalinclude:: ../../../../Tools/machines/quartz-llnl/quartz.sbatch +.. literalinclude:: ../../../../Tools/machines/dane-llnl/dane.sbatch :language: bash - :caption: You can copy this file from ``Tools/machines/quartz-llnl/quartz.sbatch``. + :caption: You can copy this file from ``Tools/machines/dane-llnl/dane.sbatch``. -To run a simulation, copy the lines above to a file ``quartz.sbatch`` and run +To run a simulation, copy the lines above to a file ``dane.sbatch`` and run .. code-block:: bash - sbatch quartz.sbatch + sbatch dane.sbatch to submit the job. diff --git a/Tools/machines/quartz-llnl/quartz.sbatch b/Tools/machines/dane-llnl/dane.sbatch similarity index 78% rename from Tools/machines/quartz-llnl/quartz.sbatch rename to Tools/machines/dane-llnl/dane.sbatch index 4c1a82ff8e9..b2a114b3f1b 100644 --- a/Tools/machines/quartz-llnl/quartz.sbatch +++ b/Tools/machines/dane-llnl/dane.sbatch @@ -15,15 +15,14 @@ # one MPI rank per half-socket (see below) #SBATCH --tasks-per-node=2 # request all logical (virtual) cores per half-socket -#SBATCH --cpus-per-task=18 +#SBATCH --cpus-per-task=112 -# each Quartz node has 1 socket of Intel Xeon E5-2695 v4 -# each Xeon CPU is divided into 2 bus rings that each have direct L3 access +# each Dane node has 2 sockets of Intel Sapphire Rapids with 56 cores each export WARPX_NMPI_PER_NODE=2 -# each MPI rank per half-socket has 9 physical cores -# or 18 logical (virtual) cores +# each MPI rank per half-socket has 56 physical cores +# or 112 logical (virtual) cores # over-subscribing each physical core with 2x # hyperthreading led to a slight (3.5%) speedup on Cori's Intel Xeon E5-2698 v3, # so we do the same here @@ -33,7 +32,7 @@ export WARPX_NMPI_PER_NODE=2 # for N>9, also equally over close-by logical cores export OMP_PROC_BIND=spread export OMP_PLACES=threads -export OMP_NUM_THREADS=18 +export OMP_NUM_THREADS=112 EXE="" # e.g. ./warpx diff --git a/Tools/machines/quartz-llnl/quartz_warpx.profile.example b/Tools/machines/dane-llnl/dane_warpx.profile.example similarity index 62% rename from Tools/machines/quartz-llnl/quartz_warpx.profile.example rename to Tools/machines/dane-llnl/dane_warpx.profile.example index f296a0738ff..1d272979bd1 100644 --- a/Tools/machines/quartz-llnl/quartz_warpx.profile.example +++ b/Tools/machines/dane-llnl/dane_warpx.profile.example @@ -2,7 +2,7 @@ #export proj="" # edit this and comment in # required dependencies -module load cmake/3.23.1 +module load cmake/3.26.3 module load clang/14.0.6-magic module load mvapich2/2.3.7 @@ -15,38 +15,38 @@ module load boost/1.80.0 # optional: for openPMD support module load hdf5-parallel/1.14.0 -SW_DIR="/usr/workspace/${USER}/quartz" -export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.1:$CMAKE_PREFIX_PATH +SW_DIR="/usr/workspace/${USER}/dane" +export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.6:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.8.3:$CMAKE_PREFIX_PATH export PATH=${SW_DIR}/adios2-2.8.3/bin:${PATH} # optional: for PSATD in RZ geometry support -export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.05.31:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.05.31:$CMAKE_PREFIX_PATH -export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.10.26:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.10.26:$CMAKE_PREFIX_PATH +export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.10.26/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.10.26/lib64:$LD_LIBRARY_PATH # optional: for Python bindings -module load python/3.9.12 +module load python/3.12.2 -if [ -d "${SW_DIR}/venvs/warpx-quartz" ] +if [ -d "${SW_DIR}/venvs/warpx-dane" ] then - source ${SW_DIR}/venvs/warpx-quartz/bin/activate + source ${SW_DIR}/venvs/warpx-dane/bin/activate fi # optional: an alias to request an interactive node for two hours -alias getNode="srun --time=0:30:00 --nodes=1 --ntasks-per-node=2 --cpus-per-task=18 -p pdebug --pty bash" +alias getNode="srun --time=0:30:00 --nodes=1 --ntasks-per-node=2 --cpus-per-task=56 -p pdebug --pty bash" # an alias to run a command on a batch node for up to 30min # usage: runNode -alias runNode="srun --time=0:30:00 --nodes=1 --ntasks-per-node=2 --cpus-per-task=18 -p pdebug" +alias runNode="srun --time=0:30:00 --nodes=1 --ntasks-per-node=2 --cpus-per-task=56 -p pdebug" # fix system defaults: do not escape $ with a \ on tab completion shopt -s direxpand -# optimize CPU microarchitecture for Intel Xeon E5-2695 v4 +# optimize CPU microarchitecture for Intel Sapphire Rapids # note: the cc/CC/ftn wrappers below add those -export CXXFLAGS="-march=broadwell" -export CFLAGS="-march=broadwell" +export CXXFLAGS="-march=sapphirerapids" +export CFLAGS="-march=sapphirerapids" # compiler environment hints export CC=$(which clang) diff --git a/Tools/machines/quartz-llnl/install_dependencies.sh b/Tools/machines/dane-llnl/install_dependencies.sh similarity index 58% rename from Tools/machines/quartz-llnl/install_dependencies.sh rename to Tools/machines/dane-llnl/install_dependencies.sh index cfb01769384..0415d7fa8cc 100755 --- a/Tools/machines/quartz-llnl/install_dependencies.sh +++ b/Tools/machines/dane-llnl/install_dependencies.sh @@ -1,10 +1,10 @@ #!/bin/bash # -# Copyright 2023 The WarpX Community +# Copyright 2024 The WarpX Community # # This file is part of WarpX. # -# Author: Axel Huebl +# Author: Axel Huebl, David Grote # License: BSD-3-Clause-LBNL # Exit on first error encountered ############################################# @@ -14,13 +14,13 @@ set -eu -o pipefail # Check: ###################################################################### # -# Was quartz_warpx.profile sourced and configured correctly? -if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your quartz_warpx.profile file! Please edit its line 2 to continue!"; exit 1; fi +# Was dane_warpx.profile sourced and configured correctly? +if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your dane_warpx.profile file! Please edit its line 2 to continue!"; exit 1; fi # Remove old dependencies ##################################################### # -SW_DIR="/usr/workspace/${USER}/quartz" +SW_DIR="/usr/workspace/${USER}/dane" rm -rf ${SW_DIR} mkdir -p ${SW_DIR} @@ -41,13 +41,13 @@ if [ -d ${HOME}/src/c-blosc ] then cd ${HOME}/src/c-blosc git fetch --prune - git checkout v1.21.1 + git checkout v1.21.6 cd - else - git clone -b v1.21.1 https://github.com/Blosc/c-blosc.git ${HOME}/src/c-blosc + git clone -b v1.21.6 https://github.com/Blosc/c-blosc.git ${HOME}/src/c-blosc fi -cmake -S ${HOME}/src/c-blosc -B ${build_dir}/c-blosc-quartz-build -DBUILD_TESTS=OFF -DBUILD_BENCHMARKS=OFF -DDEACTIVATE_AVX2=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/c-blosc-1.21.1 -cmake --build ${build_dir}/c-blosc-quartz-build --target install --parallel 6 +cmake -S ${HOME}/src/c-blosc -B ${build_dir}/c-blosc-dane-build -DBUILD_TESTS=OFF -DBUILD_BENCHMARKS=OFF -DDEACTIVATE_AVX2=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/c-blosc-1.21.6 +cmake --build ${build_dir}/c-blosc-dane-build --target install --parallel 6 # ADIOS2 if [ -d ${HOME}/src/adios2 ] @@ -59,44 +59,44 @@ then else git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git ${HOME}/src/adios2 fi -cmake -S ${HOME}/src/adios2 -B ${build_dir}/adios2-quartz-build -DBUILD_TESTING=OFF -DADIOS2_BUILD_EXAMPLES=OFF -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_SST=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 -cmake --build ${build_dir}/adios2-quartz-build --target install -j 6 +cmake -S ${HOME}/src/adios2 -B ${build_dir}/adios2-dane-build -DBUILD_TESTING=OFF -DADIOS2_BUILD_EXAMPLES=OFF -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_SST=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 +cmake --build ${build_dir}/adios2-dane-build --target install -j 6 # BLAS++ (for PSATD+RZ) if [ -d ${HOME}/src/blaspp ] then cd ${HOME}/src/blaspp git fetch --prune - git checkout v2024.05.31 + git checkout v2024.10.26 cd - else - git clone -b v2024.05.31 https://github.com/icl-utk-edu/blaspp.git ${HOME}/src/blaspp + git clone -b v2024.10.26 https://github.com/icl-utk-edu/blaspp.git ${HOME}/src/blaspp fi -cmake -S ${HOME}/src/blaspp -B ${build_dir}/blaspp-quartz-build -Duse_openmp=ON -Duse_cmake_find_blas=ON -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=${SW_DIR}/blaspp-2024.05.31 -cmake --build ${build_dir}/blaspp-quartz-build --target install --parallel 6 +cmake -S ${HOME}/src/blaspp -B ${build_dir}/blaspp-dane-build -Duse_openmp=ON -Duse_cmake_find_blas=ON -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=${SW_DIR}/blaspp-2024.10.26 +cmake --build ${build_dir}/blaspp-dane-build --target install --parallel 6 # LAPACK++ (for PSATD+RZ) if [ -d ${HOME}/src/lapackpp ] then cd ${HOME}/src/lapackpp git fetch --prune - git checkout v2024.05.31 + git checkout v2024.10.26 cd - else - git clone -b v2024.05.31 https://github.com/icl-utk-edu/lapackpp.git ${HOME}/src/lapackpp + git clone -b v2024.10.26 https://github.com/icl-utk-edu/lapackpp.git ${HOME}/src/lapackpp fi -CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S ${HOME}/src/lapackpp -B ${build_dir}/lapackpp-quartz-build -Duse_cmake_find_lapack=ON -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=${SW_DIR}/lapackpp-2024.05.31 -cmake --build ${build_dir}/lapackpp-quartz-build --target install --parallel 6 +CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S ${HOME}/src/lapackpp -B ${build_dir}/lapackpp-dane-build -Duse_cmake_find_lapack=ON -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=${SW_DIR}/lapackpp-2024.10.26 +cmake --build ${build_dir}/lapackpp-dane-build --target install --parallel 6 # Python ###################################################################### # python3 -m pip install --upgrade --user virtualenv -rm -rf ${SW_DIR}/venvs/warpx-quartz -python3 -m venv ${SW_DIR}/venvs/warpx-quartz -source ${SW_DIR}/venvs/warpx-quartz/bin/activate +rm -rf ${SW_DIR}/venvs/warpx-dane +python3 -m venv ${SW_DIR}/venvs/warpx-dane +source ${SW_DIR}/venvs/warpx-dane/bin/activate python3 -m pip install --upgrade pip -python3 -m pip cache purge +#python3 -m pip cache purge python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel From 2343bf58c1a37091c59fd3091e697bbebd99cd17 Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 16 Dec 2024 16:17:08 -0800 Subject: [PATCH 120/278] Update Dane install to put sources in the workspace directory (#5509) This avoids issues with running out of disk space in the home directory (which is not very large). --- Docs/source/install/hpc/dane.rst | 17 ++++---- .../dane-llnl/dane_warpx.profile.example | 14 +++---- .../dane-llnl/install_dependencies.sh | 40 +++++++++---------- 3 files changed, 36 insertions(+), 35 deletions(-) diff --git a/Docs/source/install/hpc/dane.rst b/Docs/source/install/hpc/dane.rst index e9af32130f5..2e0efc99391 100644 --- a/Docs/source/install/hpc/dane.rst +++ b/Docs/source/install/hpc/dane.rst @@ -24,18 +24,19 @@ If you are new to this system, **please see the following resources**: Preparation ----------- -Use the following commands to download the WarpX source code: +Use the following commands to download the WarpX source code. +Note that these commands and the shell scripts all assume the bash shell. .. code-block:: bash - git clone https://github.com/ECP-WarpX/WarpX.git $HOME/src/warpx + git clone https://github.com/ECP-WarpX/WarpX.git /usr/workspace/${USER}/dane/src/warpx We use system software modules, add environment hints and further dependencies via the file ``$HOME/dane_warpx.profile``. Create it now: .. code-block:: bash - cp $HOME/src/warpx/Tools/machines/dane-llnl/dane_warpx.profile.example $HOME/dane_warpx.profile + cp /usr/workspace/${USER}/dane/src/warpx/Tools/machines/dane-llnl/dane_warpx.profile.example $HOME/dane_warpx.profile .. dropdown:: Script Details :color: light @@ -67,7 +68,7 @@ Finally, since Dane does not yet provide software modules for some of our depend .. code-block:: bash - bash $HOME/src/warpx/Tools/machines/dane-llnl/install_dependencies.sh + bash /usr/workspace/${USER}/dane/src/warpx/Tools/machines/dane-llnl/install_dependencies.sh source /usr/workspace/${USER}/dane/venvs/warpx-dane/bin/activate .. dropdown:: Script Details @@ -88,13 +89,13 @@ Use the following :ref:`cmake commands ` to compile the applicat .. code-block:: bash - cd $HOME/src/warpx + cd /usr/workspace/${USER}/dane/src/warpx rm -rf build_dane cmake -S . -B build_dane -DWarpX_FFT=ON -DWarpX_QED_TABLE_GEN=ON -DWarpX_DIMS="1;2;RZ;3" cmake --build build_dane -j 6 -The WarpX application executables are now in ``$HOME/src/warpx/build_dane/bin/``. +The WarpX application executables are now in ``/usr/workspace/${USER}/dane/src/warpx/build_dane/bin/``. Additionally, the following commands will install WarpX as a Python module: .. code-block:: bash @@ -118,7 +119,7 @@ If you already installed WarpX in the past and want to update it, start by getti .. code-block:: bash - cd $HOME/src/warpx + cd /usr/workspace/${USER}/dane/src/warpx # read the output of this command - does it look ok? git status @@ -137,7 +138,7 @@ And, if needed, - log out and into the system, activate the now updated environment profile as usual, - :ref:`execute the dependency install scripts `. -As a last step, clean the build directory ``rm -rf $HOME/src/warpx/build_dane`` and rebuild WarpX. +As a last step, clean the build directory ``rm -rf /usr/workspace/${USER}/dane/src/warpx/build_dane`` and rebuild WarpX. .. _running-cpp-dane: diff --git a/Tools/machines/dane-llnl/dane_warpx.profile.example b/Tools/machines/dane-llnl/dane_warpx.profile.example index 1d272979bd1..dcb895509cc 100644 --- a/Tools/machines/dane-llnl/dane_warpx.profile.example +++ b/Tools/machines/dane-llnl/dane_warpx.profile.example @@ -16,15 +16,15 @@ module load boost/1.80.0 module load hdf5-parallel/1.14.0 SW_DIR="/usr/workspace/${USER}/dane" -export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.6:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.8.3:$CMAKE_PREFIX_PATH -export PATH=${SW_DIR}/adios2-2.8.3/bin:${PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/install/c-blosc-1.21.6:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/install/adios2-2.8.3:$CMAKE_PREFIX_PATH +export PATH=${SW_DIR}/install/adios2-2.8.3/bin:${PATH} # optional: for PSATD in RZ geometry support -export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.10.26:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.10.26:$CMAKE_PREFIX_PATH -export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.10.26/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.10.26/lib64:$LD_LIBRARY_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/install/blaspp-2024.10.26:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/install/lapackpp-2024.10.26:$CMAKE_PREFIX_PATH +export LD_LIBRARY_PATH=${SW_DIR}/install/blaspp-2024.10.26/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/install/lapackpp-2024.10.26/lib64:$LD_LIBRARY_PATH # optional: for Python bindings module load python/3.12.2 diff --git a/Tools/machines/dane-llnl/install_dependencies.sh b/Tools/machines/dane-llnl/install_dependencies.sh index 0415d7fa8cc..06bee0cead8 100755 --- a/Tools/machines/dane-llnl/install_dependencies.sh +++ b/Tools/machines/dane-llnl/install_dependencies.sh @@ -18,11 +18,11 @@ set -eu -o pipefail if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your dane_warpx.profile file! Please edit its line 2 to continue!"; exit 1; fi -# Remove old dependencies ##################################################### +# The src directory should have already been created when cloning WarpX ####### # SW_DIR="/usr/workspace/${USER}/dane" -rm -rf ${SW_DIR} -mkdir -p ${SW_DIR} +rm -rf ${SW_DIR}/install +mkdir -p ${SW_DIR}/install # remove common user mistakes in python, located in .local instead of a venv python3 -m pip uninstall -qq -y pywarpx @@ -37,55 +37,55 @@ python3 -m pip uninstall -qqq -y mpi4py 2>/dev/null || true build_dir=$(mktemp -d) # c-blosc (I/O compression) -if [ -d ${HOME}/src/c-blosc ] +if [ -d ${SW_DIR}/src/c-blosc ] then - cd ${HOME}/src/c-blosc + cd ${SW_DIR}/src/c-blosc git fetch --prune git checkout v1.21.6 cd - else - git clone -b v1.21.6 https://github.com/Blosc/c-blosc.git ${HOME}/src/c-blosc + git clone -b v1.21.6 https://github.com/Blosc/c-blosc.git ${SW_DIR}/src/c-blosc fi -cmake -S ${HOME}/src/c-blosc -B ${build_dir}/c-blosc-dane-build -DBUILD_TESTS=OFF -DBUILD_BENCHMARKS=OFF -DDEACTIVATE_AVX2=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/c-blosc-1.21.6 +cmake -S ${SW_DIR}/src/c-blosc -B ${build_dir}/c-blosc-dane-build -DBUILD_TESTS=OFF -DBUILD_BENCHMARKS=OFF -DDEACTIVATE_AVX2=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/install/c-blosc-1.21.6 cmake --build ${build_dir}/c-blosc-dane-build --target install --parallel 6 # ADIOS2 -if [ -d ${HOME}/src/adios2 ] +if [ -d ${SW_DIR}/src/adios2 ] then - cd ${HOME}/src/adios2 + cd ${SW_DIR}/src/adios2 git fetch --prune git checkout v2.8.3 cd - else - git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git ${HOME}/src/adios2 + git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git ${SW_DIR}/src/adios2 fi -cmake -S ${HOME}/src/adios2 -B ${build_dir}/adios2-dane-build -DBUILD_TESTING=OFF -DADIOS2_BUILD_EXAMPLES=OFF -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_SST=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 +cmake -S ${SW_DIR}/src/adios2 -B ${build_dir}/adios2-dane-build -DBUILD_TESTING=OFF -DADIOS2_BUILD_EXAMPLES=OFF -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_SST=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/install/adios2-2.8.3 cmake --build ${build_dir}/adios2-dane-build --target install -j 6 # BLAS++ (for PSATD+RZ) -if [ -d ${HOME}/src/blaspp ] +if [ -d ${SW_DIR}/src/blaspp ] then - cd ${HOME}/src/blaspp + cd ${SW_DIR}/src/blaspp git fetch --prune git checkout v2024.10.26 cd - else - git clone -b v2024.10.26 https://github.com/icl-utk-edu/blaspp.git ${HOME}/src/blaspp + git clone -b v2024.10.26 https://github.com/icl-utk-edu/blaspp.git ${SW_DIR}/src/blaspp fi -cmake -S ${HOME}/src/blaspp -B ${build_dir}/blaspp-dane-build -Duse_openmp=ON -Duse_cmake_find_blas=ON -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=${SW_DIR}/blaspp-2024.10.26 +cmake -S ${SW_DIR}/src/blaspp -B ${build_dir}/blaspp-dane-build -Duse_openmp=ON -Duse_cmake_find_blas=ON -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=${SW_DIR}/install/blaspp-2024.10.26 cmake --build ${build_dir}/blaspp-dane-build --target install --parallel 6 # LAPACK++ (for PSATD+RZ) -if [ -d ${HOME}/src/lapackpp ] +if [ -d ${SW_DIR}/src/lapackpp ] then - cd ${HOME}/src/lapackpp + cd ${SW_DIR}/src/lapackpp git fetch --prune git checkout v2024.10.26 cd - else - git clone -b v2024.10.26 https://github.com/icl-utk-edu/lapackpp.git ${HOME}/src/lapackpp + git clone -b v2024.10.26 https://github.com/icl-utk-edu/lapackpp.git ${SW_DIR}/src/lapackpp fi -CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S ${HOME}/src/lapackpp -B ${build_dir}/lapackpp-dane-build -Duse_cmake_find_lapack=ON -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=${SW_DIR}/lapackpp-2024.10.26 +CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S ${SW_DIR}/src/lapackpp -B ${build_dir}/lapackpp-dane-build -Duse_cmake_find_lapack=ON -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=${SW_DIR}/install/lapackpp-2024.10.26 cmake --build ${build_dir}/lapackpp-dane-build --target install --parallel 6 @@ -111,7 +111,7 @@ python3 -m pip install --upgrade matplotlib python3 -m pip install --upgrade yt # install or update WarpX dependencies such as picmistandard -python3 -m pip install --upgrade -r ${HOME}/src/warpx/requirements.txt +python3 -m pip install --upgrade -r ${SW_DIR}/src/warpx/requirements.txt # ML dependencies python3 -m pip install --upgrade torch From 2a4cde239e84daeedf6481db444969a44c3b5f65 Mon Sep 17 00:00:00 2001 From: Arianna Formenti Date: Mon, 16 Dec 2024 16:26:30 -0800 Subject: [PATCH 121/278] Add quasi-3D Integrated Green Functions solver (#5089) This PR implements a quasi-3D Poisson solver based on the Integrated Green's functions. It solves the 2D Poisson equation on the `(x,y)` plane for every slice `z`. It is useful for beam-beam simulations. See PR #4648 for the full 3D solver. - [x] AMReX FFT - works with https://github.com/AMReX-Codes/amrex/pull/4255 - [x] CI test - [x] documentation :hocho: :fish: --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Docs/source/usage/parameters.rst | 8 ++ .../open_bc_poisson_solver/CMakeLists.txt | 12 +++ .../Tests/open_bc_poisson_solver/analysis.py | 1 - ...puts_test_3d_open_bc_poisson_solver_sliced | 3 + ...test_3d_open_bc_poisson_solver_sliced.json | 21 ++++ .../ElectrostaticSolver.H | 7 +- .../ElectrostaticSolver.cpp | 12 ++- .../ElectrostaticSolvers/LabFrameExplicitES.H | 0 .../LabFrameExplicitES.cpp | 2 +- .../RelativisticExplicitES.H | 0 .../RelativisticExplicitES.cpp | 4 +- .../fields/IntegratedGreenFunctionSolver.H | 96 +++++++++++++------ .../fields/IntegratedGreenFunctionSolver.cpp | 33 +++++-- Source/ablastr/fields/PoissonSolver.H | 11 ++- 14 files changed, 162 insertions(+), 48 deletions(-) create mode 100644 Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver_sliced create mode 100644 Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver_sliced.json mode change 100644 => 100755 Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H mode change 100644 => 100755 Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp mode change 100644 => 100755 Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H mode change 100644 => 100755 Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp mode change 100644 => 100755 Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H mode change 100644 => 100755 Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp mode change 100644 => 100755 Source/ablastr/fields/IntegratedGreenFunctionSolver.H mode change 100644 => 100755 Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp mode change 100644 => 100755 Source/ablastr/fields/PoissonSolver.H diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 3787acbd639..310a7986abf 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -290,6 +290,14 @@ Overall simulation parameters In electromagnetic mode, this solver can be used to initialize the species' self fields (``.initialize_self_fields=1``) provided that the field BCs are PML (``boundary.field_lo,hi = PML``). + * ``warpx.use_2d_slices_fft_solver`` (`bool`, default: 0): Select the type of Integrated Green Function solver. + If 0, solve Poisson equation in full 3D geometry. + If 1, solve Poisson equation in a quasi 3D geometry, neglecting the :math:`z` derivatives in the Laplacian of the Poisson equation. + In practice, in this case, the code performes many 2D Poisson solves on all :math:`(x,y)` slices, each slice at a given :math:`z`. + This is often a good approximation for ultra-relativistic beams propagating along the :math:`z` direction, with the relativistic solver. + As a consequence, this solver does not need to do an FFT along the :math:`z` direction, + and instead uses only transverse FFTs (along :math:`x` and :math:`y`) at each :math:`z` position (or :math:`z` "slice"). + * ``warpx.self_fields_required_precision`` (`float`, default: 1.e-11) The relative precision with which the electrostatic space-charge fields should be calculated. More specifically, the space-charge fields are diff --git a/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt b/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt index c5ec4583da1..95a8d23687e 100644 --- a/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt +++ b/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt @@ -12,3 +12,15 @@ if(WarpX_FFT) OFF # dependency ) endif() + +if(WarpX_FFT) + add_warpx_test( + test_3d_open_bc_poisson_solver_sliced # name + 3 # dims + 2 # nprocs + inputs_test_3d_open_bc_poisson_solver_sliced # inputs + analysis.py # analysis + diags/diag1000001 # output + OFF # dependency + ) +endif() diff --git a/Examples/Tests/open_bc_poisson_solver/analysis.py b/Examples/Tests/open_bc_poisson_solver/analysis.py index 8ffd9ef52e2..25b55503cff 100755 --- a/Examples/Tests/open_bc_poisson_solver/analysis.py +++ b/Examples/Tests/open_bc_poisson_solver/analysis.py @@ -61,7 +61,6 @@ def evaluate_E(x, y, z): assert np.allclose(Ex_warpx, Ex_theory, rtol=0.032, atol=0) assert np.allclose(Ey_warpx, Ey_theory, rtol=0.029, atol=0) - # compare checksums evaluate_checksum( test_name=os.path.split(os.getcwd())[1], diff --git a/Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver_sliced b/Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver_sliced new file mode 100644 index 00000000000..e2639c59e74 --- /dev/null +++ b/Examples/Tests/open_bc_poisson_solver/inputs_test_3d_open_bc_poisson_solver_sliced @@ -0,0 +1,3 @@ +FILE = inputs_test_3d_open_bc_poisson_solver + +warpx.use_2d_slices_fft_solver = 1 diff --git a/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver_sliced.json b/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver_sliced.json new file mode 100644 index 00000000000..fd4a9afbc29 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_open_bc_poisson_solver_sliced.json @@ -0,0 +1,21 @@ +{ + "lev=0": { + "Bx": 100915933.44551668, + "By": 157610622.18551716, + "Bz": 2.598515299403035e-15, + "Ex": 4.725065270620093e+16, + "Ey": 3.0253948989229424e+16, + "Ez": 2787743.3330717986, + "rho": 10994013582437.193 + }, + "electron": { + "particle_momentum_x": 5.701277606056779e-19, + "particle_momentum_y": 3.650451663675671e-19, + "particle_momentum_z": 1.145432768297242e-10, + "particle_position_x": 17.314086912497864, + "particle_position_y": 0.25836912671877954, + "particle_position_z": 10066.329600000008, + "particle_weight": 19969036501.910976 + } +} + diff --git a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H old mode 100644 new mode 100755 index e58af394a7a..f57cfff6080 --- a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H +++ b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H @@ -92,7 +92,8 @@ public: amrex::Real required_precision, amrex::Real absolute_tolerance, int max_iters, - int verbosity + int verbosity, + bool is_igf_2d_slices ) const; /** @@ -153,6 +154,10 @@ public: * 2 : convergence progress at every MLMG iteration */ int self_fields_verbosity = 2; + + /** Parameters for FFT Poisson solver aka IGF */ + // 0: full 3D, 1: many 2D z-slices (quasi-3D) + bool is_igf_2d_slices = false; }; #endif // WARPX_ELECTROSTATICSOLVER_H_ diff --git a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp old mode 100644 new mode 100755 index 0b1dca675be..429e007b4d0 --- a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp +++ b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.cpp @@ -38,7 +38,12 @@ void ElectrostaticSolver::ReadParameters () { pp_warpx, "self_fields_absolute_tolerance", self_fields_absolute_tolerance); utils::parser::queryWithParser( pp_warpx, "self_fields_max_iters", self_fields_max_iters); - pp_warpx.query("self_fields_verbosity", self_fields_verbosity); + utils::parser::queryWithParser( + pp_warpx, "self_fields_verbosity", self_fields_verbosity); + + // FFT solver flags + utils::parser::queryWithParser( + pp_warpx, "use_2d_slices_fft_solver", is_igf_2d_slices); } void @@ -121,7 +126,9 @@ ElectrostaticSolver::computePhi ( Real const required_precision, Real absolute_tolerance, int const max_iters, - int const verbosity) const + int const verbosity, + bool const is_igf_2d +) const { using ablastr::fields::Direction; @@ -202,6 +209,7 @@ ElectrostaticSolver::computePhi ( warpx.boxArray(), WarpX::grid_type, is_solver_igf_on_lev0, + is_igf_2d, EB::enabled(), WarpX::do_single_precision_comms, warpx.refRatio(), diff --git a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.H old mode 100644 new mode 100755 diff --git a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp old mode 100644 new mode 100755 index e973ae66975..643efefb2f3 --- a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp +++ b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp @@ -75,7 +75,7 @@ void LabFrameExplicitES::ComputeSpaceChargeField ( // Use the AMREX MLMG or the FFT (IGF) solver otherwise computePhi(rho_fp, phi_fp, beta, self_fields_required_precision, self_fields_absolute_tolerance, self_fields_max_iters, - self_fields_verbosity); + self_fields_verbosity, is_igf_2d_slices); #endif } diff --git a/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.H old mode 100644 new mode 100755 diff --git a/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp old mode 100644 new mode 100755 index 69647da1702..0b1bcecd1e5 --- a/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp +++ b/Source/FieldSolver/ElectrostaticSolvers/RelativisticExplicitES.cpp @@ -130,7 +130,7 @@ void RelativisticExplicitES::AddSpaceChargeField ( computePhi( amrex::GetVecOfPtrs(rho), amrex::GetVecOfPtrs(phi), beta, pc.self_fields_required_precision, pc.self_fields_absolute_tolerance, pc.self_fields_max_iters, - pc.self_fields_verbosity ); + pc.self_fields_verbosity, is_igf_2d_slices); // Compute the corresponding electric and magnetic field, from the potential phi computeE( Efield_fp, amrex::GetVecOfPtrs(phi), beta ); @@ -168,7 +168,7 @@ void RelativisticExplicitES::AddBoundaryField (ablastr::fields::MultiLevelVector computePhi( amrex::GetVecOfPtrs(rho), amrex::GetVecOfPtrs(phi), beta, self_fields_required_precision, self_fields_absolute_tolerance, self_fields_max_iters, - self_fields_verbosity ); + self_fields_verbosity, is_igf_2d_slices); // Compute the corresponding electric field, from the potential phi. computeE( Efield_fp, amrex::GetVecOfPtrs(phi), beta ); diff --git a/Source/ablastr/fields/IntegratedGreenFunctionSolver.H b/Source/ablastr/fields/IntegratedGreenFunctionSolver.H old mode 100644 new mode 100755 index 28885e167a3..9492cff885e --- a/Source/ablastr/fields/IntegratedGreenFunctionSolver.H +++ b/Source/ablastr/fields/IntegratedGreenFunctionSolver.H @@ -15,13 +15,14 @@ #include #include - #include #include namespace ablastr::fields { + using namespace amrex::literals; + /** @brief Implements equation 2 in https://doi.org/10.1103/PhysRevSTAB.10.129901 * with some modification to symmetrize the function. @@ -30,54 +31,92 @@ namespace ablastr::fields * @param[in] y y-coordinate of given location * @param[in] z z-coordinate of given location * - * @return the integrated Green function G + * @return the integrated Green function G in 3D */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE amrex::Real - IntegratedPotential (amrex::Real x, amrex::Real y, amrex::Real z) + IntegratedPotential3D (amrex::Real x, amrex::Real y, amrex::Real z) { - using namespace amrex::literals; - amrex::Real const r = std::sqrt( x*x + y*y + z*z ); - amrex::Real const G = - - 0.5_rt * z*z * std::atan( x*y/(z*r) ) - - 0.5_rt * y*y * std::atan( x*z/(y*r) ) - - 0.5_rt * x*x * std::atan( y*z/(x*r) ) - + y*z*std::asinh( x/std::sqrt(y*y + z*z) ) - + x*z*std::asinh( y/std::sqrt(x*x + z*z) ) - + x*y*std::asinh( z/std::sqrt(x*x + y*y) ); + amrex::Real const G = - 0.5_rt * z*z * std::atan( x*y/(z*r) ) + - 0.5_rt * y*y * std::atan( x*z/(y*r) ) + - 0.5_rt * x*x * std::atan( y*z/(x*r) ) + + y*z*std::asinh( x/std::sqrt(y*y + z*z) ) + + x*z*std::asinh( y/std::sqrt(x*x + z*z) ) + + x*y*std::asinh( z/std::sqrt(x*x + y*y) ); + return G; + } + + + /** @brief Implements equation 58 in https://doi.org/10.1016/j.jcp.2004.01.008 + * + * @param[in] x x-coordinate of given location + * @param[in] y y-coordinate of given location + * + * @return the integrated Green function G in 2D + */ + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE + amrex::Real + IntegratedPotential2D (amrex::Real x, amrex::Real y) + { + amrex::Real const G = 3_rt*x*y + - x*x * std::atan(y/x) + - y*y * std::atan(x/y) + - x*y * std::log(x*x + y*y); return G; } + /** @brief add * * @param[in] x x-coordinate of given location * @param[in] y y-coordinate of given location * @param[in] z z-coordinate of given location + * @param[in] dx cell size along x + * @param[in] dy cell size along y + * @param[in] dz cell size along z * - * @return the sum of integrated Green function G + * @return the sum of integrated Green function G in 3D */ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE amrex::Real - SumOfIntegratedPotential (amrex::Real x, amrex::Real y, amrex::Real z, amrex::Real dx, amrex::Real dy, amrex::Real dz) + SumOfIntegratedPotential3D (amrex::Real x, amrex::Real y, amrex::Real z, amrex::Real dx, amrex::Real dy, amrex::Real dz) { - using namespace amrex::literals; - + return 1._rt/(4._rt*ablastr::constant::math::pi*ablastr::constant::SI::ep0) * ( + IntegratedPotential3D( x+0.5_rt*dx, y+0.5_rt*dy, z+0.5_rt*dz ) + - IntegratedPotential3D( x-0.5_rt*dx, y+0.5_rt*dy, z+0.5_rt*dz ) + - IntegratedPotential3D( x+0.5_rt*dx, y-0.5_rt*dy, z+0.5_rt*dz ) + + IntegratedPotential3D( x-0.5_rt*dx, y-0.5_rt*dy, z+0.5_rt*dz ) + - IntegratedPotential3D( x+0.5_rt*dx, y+0.5_rt*dy, z-0.5_rt*dz ) + + IntegratedPotential3D( x-0.5_rt*dx, y+0.5_rt*dy, z-0.5_rt*dz ) + + IntegratedPotential3D( x+0.5_rt*dx, y-0.5_rt*dy, z-0.5_rt*dz ) + - IntegratedPotential3D( x-0.5_rt*dx, y-0.5_rt*dy, z-0.5_rt*dz ) + ); + } - amrex::Real const G_value = 1._rt/(4._rt*ablastr::constant::math::pi*ablastr::constant::SI::ep0) * ( - IntegratedPotential( x+0.5_rt*dx, y+0.5_rt*dy, z+0.5_rt*dz ) - - IntegratedPotential( x-0.5_rt*dx, y+0.5_rt*dy, z+0.5_rt*dz ) - - IntegratedPotential( x+0.5_rt*dx, y-0.5_rt*dy, z+0.5_rt*dz ) - + IntegratedPotential( x-0.5_rt*dx, y-0.5_rt*dy, z+0.5_rt*dz ) - - IntegratedPotential( x+0.5_rt*dx, y+0.5_rt*dy, z-0.5_rt*dz ) - + IntegratedPotential( x-0.5_rt*dx, y+0.5_rt*dy, z-0.5_rt*dz ) - + IntegratedPotential( x+0.5_rt*dx, y-0.5_rt*dy, z-0.5_rt*dz ) - - IntegratedPotential( x-0.5_rt*dx, y-0.5_rt*dy, z-0.5_rt*dz ) - ); - return G_value; + /** @brief add + * + * @param[in] x x-coordinate of given location + * @param[in] y y-coordinate of given location + * @param[in] dx cell size along x + * @param[in] dy cell size along y + * + * @return the sum of integrated Green function G in 2D + */ + AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE + amrex::Real + SumOfIntegratedPotential2D (amrex::Real x, amrex::Real y, amrex::Real dx, amrex::Real dy) + { + return 1._rt/(4._rt*ablastr::constant::math::pi*ablastr::constant::SI::ep0) * ( + IntegratedPotential2D( x+0.5_rt*dx, y+0.5_rt*dy ) + - IntegratedPotential2D( x+0.5_rt*dx, y-0.5_rt*dy ) + - IntegratedPotential2D( x-0.5_rt*dx, y+0.5_rt*dy ) + + IntegratedPotential2D( x-0.5_rt*dx, y-0.5_rt*dy ) + ); } + /** @brief Compute the electrostatic potential using the Integrated Green Function method * as in http://dx.doi.org/10.1103/PhysRevSTAB.9.044204 * @@ -90,7 +129,8 @@ namespace ablastr::fields computePhiIGF (amrex::MultiFab const & rho, amrex::MultiFab & phi, std::array const & cell_size, - amrex::BoxArray const & ba); + amrex::BoxArray const & ba, + bool is_igf_2d_slices); } // namespace ablastr::fields diff --git a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp old mode 100644 new mode 100755 index b142978c8be..998bb179f5b --- a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp +++ b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp @@ -34,7 +34,8 @@ void computePhiIGF ( amrex::MultiFab const & rho, amrex::MultiFab & phi, std::array const & cell_size, - amrex::BoxArray const & ba) + amrex::BoxArray const & ba, + bool const is_igf_2d_slices) { using namespace amrex::literals; @@ -45,9 +46,6 @@ computePhiIGF ( amrex::MultiFab const & rho, domain.surroundingNodes(); // get nodal points, since `phi` and `rho` are nodal domain.grow( phi.nGrowVect() ); // include guard cells - // Do we grow the domain in the z-direction in the 2D mode? - bool const do_2d_fft = false; - int nprocs = amrex::ParallelDescriptor::NProcs(); { amrex::ParmParse pp("ablastr"); @@ -61,7 +59,7 @@ computePhiIGF ( amrex::MultiFab const & rho, } if (!obc_solver || obc_solver->Domain() != domain) { amrex::FFT::Info info{}; - if (do_2d_fft) { info.setBatchMode(true); } + if (is_igf_2d_slices) { info.setBatchMode(true); } // do 2D FFTs info.setNumProcs(nprocs); obc_solver = std::make_unique>(domain, info); } @@ -71,7 +69,9 @@ computePhiIGF ( amrex::MultiFab const & rho, amrex::Real const dy = cell_size[1]; amrex::Real const dz = cell_size[2]; - obc_solver->setGreensFunction( + if (!is_igf_2d_slices){ + // 2D sliced solver + obc_solver->setGreensFunction( [=] AMREX_GPU_DEVICE (int i, int j, int k) -> amrex::Real { int const i0 = i - lo[0]; @@ -80,9 +80,26 @@ computePhiIGF ( amrex::MultiFab const & rho, amrex::Real const x = i0*dx; amrex::Real const y = j0*dy; amrex::Real const z = k0*dz; - return SumOfIntegratedPotential(x, y, z, dx, dy, dz); + + return SumOfIntegratedPotential3D(x, y, z, dx, dy, dz); + }); + }else{ + // fully 3D solver + obc_solver->setGreensFunction( + [=] AMREX_GPU_DEVICE (int i, int j, int k) -> amrex::Real + { + int const i0 = i - lo[0]; + int const j0 = j - lo[1]; + amrex::Real const x = i0*dx; + amrex::Real const y = j0*dy; + amrex::ignore_unused(k); + + return SumOfIntegratedPotential2D(x, y, dx, dy); }); + } + obc_solver->solve(phi, rho); -} +} // computePhiIGF + } // namespace ablastr::fields diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H old mode 100644 new mode 100755 index aa9288fe950..1cc7d39e9b0 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -206,6 +206,7 @@ computePhi ( amrex::Vector const& grids, utils::enums::GridType grid_type, bool is_solver_igf_on_lev0, + [[maybe_unused]] bool const is_igf_2d, bool eb_enabled = false, bool do_single_precision_comms = false, std::optional > rel_ref_ratio = std::nullopt, @@ -233,13 +234,13 @@ computePhi ( #endif #if !defined(ABLASTR_USE_FFT) - ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, - "Must compile with FFT support to use the IGF solver!"); + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, + "Must compile with FFT support to use the IGF solver!"); #endif #if !defined(WARPX_DIM_3D) - ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, - "The FFT Poisson solver is currently only implemented for 3D!"); + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE( !is_solver_igf_on_lev0, + "The FFT Poisson solver is currently only implemented for 3D!"); #endif // Set the value of beta @@ -270,7 +271,7 @@ computePhi ( if ( max_norm_b == 0 ) { phi[lev]->setVal(0); } else { - computePhiIGF( *rho[lev], *phi[lev], dx_scaled, grids[lev] ); + computePhiIGF( *rho[lev], *phi[lev], dx_scaled, grids[lev], is_igf_2d); } continue; } From 22a65510dc7fdc5fdf0c5948380599d43ef60f80 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 17 Dec 2024 00:56:26 +0000 Subject: [PATCH 122/278] [pre-commit.ci] pre-commit autoupdate (#5513) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.2 → v0.8.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.2...v0.8.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fffc41ce264..7c396c95b1d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.2 + rev: v0.8.3 hooks: # Run the linter - id: ruff From 6b2ca8b2a3d3a3c430ec67b6920aa5eb1c388f1d Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 16 Dec 2024 18:13:23 -0800 Subject: [PATCH 123/278] AMReX/pyAMReX/PICSAR: weekly update (#5512) - Weekly update to latest AMReX: ```console ./Tools/Release/updateAMReX.py ``` - Weekly update to latest pyAMReX: ```console ./Tools/Release/updatepyAMReX.py ``` - Weekly update to latest PICSAR (no changes since 24.09): ```console ./Tools/Release/updatePICSAR.py ``` --- .github/workflows/cuda.yml | 2 +- .github/workflows/dependencies/hip.sh | 1 + .github/workflows/dependencies/nvcc11-3.sh | 3 ++- .github/workflows/dependencies/nvcc11-8.sh | 3 ++- cmake/dependencies/AMReX.cmake | 2 +- cmake/dependencies/pyAMReX.cmake | 2 +- 6 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index a7cd884039b..e4967bea790 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -126,7 +126,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 96db0a665ff1e6bbe638490fd02d3aafb9188f6b && cd - + cd ../amrex && git checkout --detach b3f67385e62f387b548389222840486c0fffca57 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/.github/workflows/dependencies/hip.sh b/.github/workflows/dependencies/hip.sh index 1154bb05e58..bf15c2f7101 100755 --- a/.github/workflows/dependencies/hip.sh +++ b/.github/workflows/dependencies/hip.sh @@ -53,6 +53,7 @@ sudo apt-get install -y --no-install-recommends \ rocm-dev \ rocfft-dev \ rocprim-dev \ + rocsparse-dev \ rocrand-dev \ hiprand-dev diff --git a/.github/workflows/dependencies/nvcc11-3.sh b/.github/workflows/dependencies/nvcc11-3.sh index 92e2717e425..050b58b5947 100755 --- a/.github/workflows/dependencies/nvcc11-3.sh +++ b/.github/workflows/dependencies/nvcc11-3.sh @@ -41,7 +41,8 @@ sudo apt-get install -y \ cuda-nvml-dev-11-3 \ cuda-nvtx-11-3 \ libcufft-dev-11-3 \ - libcurand-dev-11-3 + libcurand-dev-11-3 \ + libcusparse-dev-11-3 sudo ln -s cuda-11.3 /usr/local/cuda # if we run out of temporary storage in CI: diff --git a/.github/workflows/dependencies/nvcc11-8.sh b/.github/workflows/dependencies/nvcc11-8.sh index 6089360392b..608f6c7a817 100755 --- a/.github/workflows/dependencies/nvcc11-8.sh +++ b/.github/workflows/dependencies/nvcc11-8.sh @@ -41,7 +41,8 @@ sudo apt-get install -y \ cuda-nvml-dev-11-8 \ cuda-nvtx-11-8 \ libcufft-dev-11-8 \ - libcurand-dev-11-8 + libcurand-dev-11-8 \ + libcusparse-dev-11-8 sudo ln -s cuda-11.8 /usr/local/cuda # if we run out of temporary storage in CI: diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 3733b729004..0066a3103cd 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -294,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "96db0a665ff1e6bbe638490fd02d3aafb9188f6b" +set(WarpX_amrex_branch "b3f67385e62f387b548389222840486c0fffca57" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index 6f0e07bf79e..93c4cc63e5a 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "24.12" +set(WarpX_pyamrex_branch "cba1ca5098fd4edc83b2ae630c0391140fac55f4" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") From 2ea2dd813684765256bf5c28793ddd51381b3e49 Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 16 Dec 2024 18:20:42 -0800 Subject: [PATCH 124/278] Fix FieldEnergy reduced diagnostic (#5498) Fix FieldEnergy reduced diagnostic to account for partial cell volumes on the domain boundaries. For fields that are nodal on the boundary, the integral is only over the fraction of the cell centered about the field value within the domain. This PR also tidies up the code for the diagnostic. --- Source/Diagnostics/ReducedDiags/FieldEnergy.H | 5 +- .../Diagnostics/ReducedDiags/FieldEnergy.cpp | 143 +++++++++--------- 2 files changed, 76 insertions(+), 72 deletions(-) diff --git a/Source/Diagnostics/ReducedDiags/FieldEnergy.H b/Source/Diagnostics/ReducedDiags/FieldEnergy.H index 40de174526e..fe17f15f071 100644 --- a/Source/Diagnostics/ReducedDiags/FieldEnergy.H +++ b/Source/Diagnostics/ReducedDiags/FieldEnergy.H @@ -40,13 +40,14 @@ public: void ComputeDiags(int step) final; /** - * \brief Calculate the integral of the field squared in RZ + * \brief Calculate the integral of the field squared, taking into + * account the fraction of the cell volume within the domain. * * \param field The MultiFab to be integrated * \param lev The refinement level * \return The integral */ - amrex::Real ComputeNorm2RZ(const amrex::MultiFab& field, int lev); + amrex::Real ComputeNorm2(const amrex::MultiFab& field, int lev); }; diff --git a/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp b/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp index 1a984368b4e..d16319c37e8 100644 --- a/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp +++ b/Source/Diagnostics/ReducedDiags/FieldEnergy.cpp @@ -30,7 +30,7 @@ #include #include -using namespace amrex; +using namespace amrex::literals; using warpx::fields::FieldType; // constructor @@ -40,7 +40,7 @@ FieldEnergy::FieldEnergy (const std::string& rd_name) // read number of levels int nLevel = 0; - const ParmParse pp_amr("amr"); + amrex::ParmParse const pp_amr("amr"); pp_amr.query("max_level", nLevel); nLevel += 1; @@ -48,7 +48,7 @@ FieldEnergy::FieldEnergy (const std::string& rd_name) // resize data array m_data.resize(noutputs*nLevel, 0.0_rt); - if (ParallelDescriptor::IOProcessor()) + if (amrex::ParallelDescriptor::IOProcessor()) { if ( m_write_header ) { @@ -84,10 +84,10 @@ void FieldEnergy::ComputeDiags (int step) if (!m_intervals.contains(step+1)) { return; } // get a reference to WarpX instance - auto & warpx = WarpX::GetInstance(); + auto const & warpx = WarpX::GetInstance(); // get number of level - const auto nLevel = warpx.finestLevel() + 1; + int const nLevel = warpx.finestLevel() + 1; using ablastr::fields::Direction; @@ -95,42 +95,29 @@ void FieldEnergy::ComputeDiags (int step) for (int lev = 0; lev < nLevel; ++lev) { // get MultiFab data at lev - const MultiFab & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); - const MultiFab & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); - const MultiFab & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); - const MultiFab & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); - const MultiFab & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); - const MultiFab & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); + amrex::MultiFab const & Ex = *warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, lev); + amrex::MultiFab const & Ey = *warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, lev); + amrex::MultiFab const & Ez = *warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, lev); + amrex::MultiFab const & Bx = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, lev); + amrex::MultiFab const & By = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, lev); + amrex::MultiFab const & Bz = *warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, lev); // get cell volume - const std::array &dx = WarpX::CellSize(lev); - const amrex::Real dV = dx[0]*dx[1]*dx[2]; - -#if defined(WARPX_DIM_RZ) - amrex::Real const tmpEx = ComputeNorm2RZ(Ex, lev); - amrex::Real const tmpEy = ComputeNorm2RZ(Ey, lev); - amrex::Real const tmpEz = ComputeNorm2RZ(Ez, lev); - amrex::Real const Es = tmpEx + tmpEy + tmpEz; - - amrex::Real const tmpBx = ComputeNorm2RZ(Bx, lev); - amrex::Real const tmpBy = ComputeNorm2RZ(By, lev); - amrex::Real const tmpBz = ComputeNorm2RZ(Bz, lev); - amrex::Real const Bs = tmpBx + tmpBy + tmpBz; -#else - Geometry const & geom = warpx.Geom(lev); + std::array const &dx = WarpX::CellSize(lev); + amrex::Real const dV = dx[0]*dx[1]*dx[2]; // compute E squared - Real const tmpEx = Ex.norm2(0,geom.periodicity()); - Real const tmpEy = Ey.norm2(0,geom.periodicity()); - Real const tmpEz = Ez.norm2(0,geom.periodicity()); - Real const Es = tmpEx*tmpEx + tmpEy*tmpEy + tmpEz*tmpEz; + amrex::Real const tmpEx = ComputeNorm2(Ex, lev); + amrex::Real const tmpEy = ComputeNorm2(Ey, lev); + amrex::Real const tmpEz = ComputeNorm2(Ez, lev); // compute B squared - Real const tmpBx = Bx.norm2(0,geom.periodicity()); - Real const tmpBy = By.norm2(0,geom.periodicity()); - Real const tmpBz = Bz.norm2(0,geom.periodicity()); - Real const Bs = tmpBx*tmpBx + tmpBy*tmpBy + tmpBz*tmpBz; -#endif + amrex::Real const tmpBx = ComputeNorm2(Bx, lev); + amrex::Real const tmpBy = ComputeNorm2(By, lev); + amrex::Real const tmpBz = ComputeNorm2(Bz, lev); + + amrex::Real const Es = tmpEx + tmpEy + tmpEz; + amrex::Real const Bs = tmpBx + tmpBy + tmpBz; constexpr int noutputs = 3; // total energy, E-field energy and B-field energy constexpr int index_total = 0; @@ -156,15 +143,13 @@ void FieldEnergy::ComputeDiags (int step) } // end void FieldEnergy::ComputeDiags -// Function that computes the sum of the field squared in RZ +// Function that computes the sum of the field squared. +// This takes into account the fraction of the cell volumes within the domain +// and the cell volumes in cylindrical coordinates. amrex::Real -FieldEnergy::ComputeNorm2RZ(const amrex::MultiFab& field, const int lev) +FieldEnergy::ComputeNorm2(amrex::MultiFab const& field, [[maybe_unused]]int lev) { - // get a reference to WarpX instance - auto & warpx = WarpX::GetInstance(); - - Geometry const & geom = warpx.Geom(lev); - const amrex::Real dr = geom.CellSize(0); + amrex::IntVect const is_nodal = field.ixType().toIntVect(); amrex::ReduceOps reduce_ops; amrex::ReduceData reduce_data(reduce_ops); @@ -178,45 +163,63 @@ FieldEnergy::ComputeNorm2RZ(const amrex::MultiFab& field, const int lev) amrex::Array4 const& field_arr = field.array(mfi); - const amrex::Box tilebox = mfi.tilebox(); - amrex::Box tb = convert(tilebox, field.ixType().toIntVect()); + amrex::Box const tilebox = mfi.tilebox(); + amrex::Box const tb = convert(tilebox, is_nodal); + amrex::IntVect const tb_lo = tb.smallEnd(); + amrex::IntVect const tb_hi = tb.bigEnd(); +#if defined(WARPX_DIM_RZ) // Lower corner of tile box physical domain - const amrex::XDim3 xyzmin = WarpX::LowerCorner(tilebox, lev, 0._rt); - const Dim3 lo = lbound(tilebox); - const Dim3 hi = ubound(tilebox); - const Real rmin = xyzmin.x + (tb.ixType().nodeCentered(0) ? 0._rt : 0.5_rt*dr); - const int irmin = lo.x; - const int irmax = hi.x; + auto const & warpx = WarpX::GetInstance(); + amrex::Geometry const & geom = warpx.Geom(lev); + amrex::Real const dr = geom.CellSize(0); + amrex::XDim3 const xyzmin = WarpX::LowerCorner(tilebox, lev, 0._rt); + amrex::Real const rmin = xyzmin.x + (is_nodal[0] ? 0._rt : 0.5_rt*dr); +#endif - int const ncomp = field.nComp(); + // On the boundaries, if the grid is nodal, use half of the volume. + // This applies to all boundary conditions, and to the overlap of + // boxes within the domain. + // Previously, the code used MultiFab::norm2, but that does not do + // the half-volume scaling for the domain boundaries when not periodic. + + auto volume_factor = [=] AMREX_GPU_DEVICE(int i, int j, int k, int n) noexcept { + amrex::ignore_unused(i,j,k,n); +#if defined WARPX_DIM_RZ + amrex::Real const r = rmin + (i - tb_lo[0])*dr; + amrex::Real v_factor = 2._rt*r; + if (i == tb_lo[0] && is_nodal[0]) { v_factor = r + dr/4._rt; } + if (i == tb_hi[0] && is_nodal[0]) { v_factor = r - dr/4._rt; } + if (j == tb_lo[1] && is_nodal[1]) { v_factor *= 0.5_rt; } + if (j == tb_hi[1] && is_nodal[1]) { v_factor *= 0.5_rt; } + amrex::Real const theta_integral = (n == 0 ? 1._rt : 0.5_rt); + return MathConst::pi*v_factor*theta_integral; +#else + amrex::Real v_factor = 1._rt; + AMREX_D_TERM( + if (i == tb_lo[0] && is_nodal[0]) { v_factor *= 0.5_rt; }, + if (j == tb_lo[1] && is_nodal[1]) { v_factor *= 0.5_rt; }, + if (k == tb_lo[2] && is_nodal[2]) { v_factor *= 0.5_rt; }) + AMREX_D_TERM( + if (i == tb_hi[0] && is_nodal[0]) { v_factor *= 0.5_rt; }, + if (j == tb_hi[1] && is_nodal[1]) { v_factor *= 0.5_rt; }, + if (k == tb_hi[2] && is_nodal[2]) { v_factor *= 0.5_rt; }) + return v_factor; +#endif + }; - for (int idir=0 ; idir < AMREX_SPACEDIM ; idir++) { - if (WarpX::field_boundary_hi[idir] == FieldBoundaryType::Periodic) { - // For periodic boundaries, do not include the data in the nodes - // on the upper edge of the domain - tb.enclosedCells(idir); - } - } + int const ncomp = field.nComp(); reduce_ops.eval(tb, ncomp, reduce_data, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) -> ReduceTuple { - const amrex::Real r = rmin + (i - irmin)*dr; - amrex::Real volume_factor = r; - if (r == 0._rt) { - volume_factor = dr/8._rt; - } else if (rmin == 0._rt && i == irmax) { - volume_factor = r/2._rt - dr/8._rt; - } - const amrex::Real theta_integral = (n == 0 ? 2._rt : 1._rt); - return theta_integral*field_arr(i,j,k,n)*field_arr(i,j,k,n)*volume_factor; + return field_arr(i,j,k,n)*field_arr(i,j,k,n)*volume_factor(i,j,k,n); }); } - const amrex::Real field_sum = amrex::get<0>(reduce_data.value()); - const amrex::Real result = MathConst::pi*field_sum; + amrex::Real result = amrex::get<0>(reduce_data.value()); + amrex::ParallelDescriptor::ReduceRealSum(result); + return result; } -// end Real FieldEnergy::ComputeNorm2RZ From 2cdcb77668bba4a08b36445f403d77080edeb9c3 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Tue, 17 Dec 2024 09:35:06 -0800 Subject: [PATCH 125/278] DSMC: make the check of the number of processes more robust (#5515) The previous code was brittle because it was not using `AMREX_ALWAYS_ASSERT_WITH_MESSAGE` (which indeed cannot be used inside a GPU kernel. In practice, a user could specify e.g. 10 scattering processes and not get an error. The new code checks the number of processes before calling the GPU kernel, by using `AMREX_ALWAYS_ASSERT_WITH_MESSAGE`. --------- Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- .../BinaryCollision/DSMC/CollisionFilterFunc.H | 12 ++++++------ .../Collision/BinaryCollision/DSMC/DSMCFunc.H | 6 +++++- 2 files changed, 11 insertions(+), 7 deletions(-) diff --git a/Source/Particles/Collision/BinaryCollision/DSMC/CollisionFilterFunc.H b/Source/Particles/Collision/BinaryCollision/DSMC/CollisionFilterFunc.H index 46b228b049e..c5bd2e1cec6 100644 --- a/Source/Particles/Collision/BinaryCollision/DSMC/CollisionFilterFunc.H +++ b/Source/Particles/Collision/BinaryCollision/DSMC/CollisionFilterFunc.H @@ -34,7 +34,7 @@ * @param[in] scattering processes an array of scattering processes included for consideration. * @param[in] engine the random engine. */ -template +template AMREX_GPU_HOST_DEVICE AMREX_INLINE void CollisionPairFilter (const amrex::ParticleReal u1x, const amrex::ParticleReal u1y, const amrex::ParticleReal u1z, const amrex::ParticleReal u2x, @@ -65,11 +65,11 @@ void CollisionPairFilter (const amrex::ParticleReal u1x, const amrex::ParticleRe // Evaluate the cross-section for each scattering process to determine // the total collision probability. - AMREX_ASSERT_WITH_MESSAGE( - (process_count < 4), "Too many scattering processes in DSMC routine." - ); - int coll_type[4] = {0, 0, 0, 0}; - amrex::ParticleReal sigma_sums[4] = {0._prt, 0._prt, 0._prt, 0._prt}; + + // The size of the arrays below is a compile-time constant (template parameter) + // for performance reasons: it avoids dynamic memory allocation on the GPU. + int coll_type[max_process_count] = {0}; + amrex::ParticleReal sigma_sums[max_process_count] = {0._prt}; for (int ii = 0; ii < process_count; ii++) { auto const& scattering_process = scattering_processes[ii]; coll_type[ii] = int(scattering_process.m_type); diff --git a/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.H b/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.H index a692d2cbb9e..5a3c925e9bd 100644 --- a/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.H +++ b/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.H @@ -163,7 +163,11 @@ public: u1y[I1[i1]] = u1xbuf*std::sin(theta) + u1y[I1[i1]]*std::cos(theta); #endif - CollisionPairFilter( + const int max_process_count = 4; // Pre-defined value, for performance reasons + AMREX_ALWAYS_ASSERT_WITH_MESSAGE( + (m_process_count < max_process_count), "Too many scattering processes in DSMC routine (hardcoded to only allow 4). Update the max_process_count value in source code to allow more scattering processes." + ); + CollisionPairFilter( u1x[ I1[i1] ], u1y[ I1[i1] ], u1z[ I1[i1] ], u2x[ I2[i2] ], u2y[ I2[i2] ], u2z[ I2[i2] ], m1, m2, w1[ I1[i1] ], w2[ I2[i2] ], From b4eebb9a4ff39a91523ac0a92ba87fdadc0e55c6 Mon Sep 17 00:00:00 2001 From: Arianna Formenti Date: Wed, 18 Dec 2024 14:23:42 -0800 Subject: [PATCH 126/278] Follow-up: quasi-3D IGF solver (#5516) This is a minor follow-up to PR #5089. It fixes two comments and adds the documentation entry about `ablastr.nprocs_igf_fft`. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Docs/source/usage/parameters.rst | 9 ++++++++- Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp | 4 ++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 310a7986abf..31f0e06ab5b 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -290,7 +290,7 @@ Overall simulation parameters In electromagnetic mode, this solver can be used to initialize the species' self fields (``.initialize_self_fields=1``) provided that the field BCs are PML (``boundary.field_lo,hi = PML``). - * ``warpx.use_2d_slices_fft_solver`` (`bool`, default: 0): Select the type of Integrated Green Function solver. + * ``warpx.use_2d_slices_fft_solver`` (`bool`) optional (default: 0): Select the type of Integrated Green Function solver. If 0, solve Poisson equation in full 3D geometry. If 1, solve Poisson equation in a quasi 3D geometry, neglecting the :math:`z` derivatives in the Laplacian of the Poisson equation. In practice, in this case, the code performes many 2D Poisson solves on all :math:`(x,y)` slices, each slice at a given :math:`z`. @@ -298,6 +298,13 @@ Overall simulation parameters As a consequence, this solver does not need to do an FFT along the :math:`z` direction, and instead uses only transverse FFTs (along :math:`x` and :math:`y`) at each :math:`z` position (or :math:`z` "slice"). + * ``ablastr.nprocs_igf_fft`` (`int`) optional (default: number of MPI ranks): Number of MPI ranks used to parallalelize the FFT solver. + This can be less or equal than then number of MPI ranks that are used to run the overall simulation. + It can be useful if the auxiliary simulation boxes fit within a single process, so to avoid extra communications. + The auxiliary boxes are extended boxes in real and spectral space that are used to perform the necessary FFTs. + The extended simulation box size in real space is :math:`2n_x-1, 2n_y-1, 2n_z-1` with the 3D solver, :math:`2n_x-1, 2n_y -1, n_z` with the 2D solver. + The extended simulation box size in spectral space is :math:`n_x, 2n_y-1, 2n_z-1` with the 3D solver, :math:`n_x, 2n_y-1, n_z` with the 2D solver. + * ``warpx.self_fields_required_precision`` (`float`, default: 1.e-11) The relative precision with which the electrostatic space-charge fields should be calculated. More specifically, the space-charge fields are diff --git a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp index 998bb179f5b..74f9b308acd 100755 --- a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp +++ b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp @@ -70,7 +70,7 @@ computePhiIGF ( amrex::MultiFab const & rho, amrex::Real const dz = cell_size[2]; if (!is_igf_2d_slices){ - // 2D sliced solver + // fully 3D solver obc_solver->setGreensFunction( [=] AMREX_GPU_DEVICE (int i, int j, int k) -> amrex::Real { @@ -84,7 +84,7 @@ computePhiIGF ( amrex::MultiFab const & rho, return SumOfIntegratedPotential3D(x, y, z, dx, dy, dz); }); }else{ - // fully 3D solver + // 2D sliced solver obc_solver->setGreensFunction( [=] AMREX_GPU_DEVICE (int i, int j, int k) -> amrex::Real { From d79fe71ae810364b02017ef70c82c70f667c8e19 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Wed, 18 Dec 2024 16:30:14 -0800 Subject: [PATCH 127/278] CTest: split checksum analysis from test analysis, expose arguments (#5456) Prototype of implementation to see if this can achieve goals such as: 1. run test and analysis locally (possibly without CTest) without worrying about checksums 2. avoid duplicate code for default regression analysis with custom parameters (e.g., tolerance, output format) 3. minimize work needed to implement checksum regression analysis for new tests This PR replaces #5447, see https://github.com/ECP-WarpX/WarpX/pull/5447#issuecomment-2471636621. ## Old usage 1. Add this to the test analysis script: ```python import os import sys ... ... sys.path.insert(1, "../../../../warpx/Regression/Checksum/") from checksumAPI import evaluate_checksum ... ... # compare checksums evaluate_checksum( test_name=os.path.split(os.getcwd())[1], output_file=sys.argv[1], ) ``` 2. Add this to the CMakeLists.txt file: ```cmake add_warpx_test( test_1d_laser_acceleration_fluid_boosted # name 1 # dims 2 # nprocs inputs_test_1d_laser_acceleration_fluid_boosted # inputs analysis_1d_fluid_boosted.py # analysis diags/diag1000001 # output OFF # dependency ) ``` ## New usage 1. Create a soft link to the default regression analysis script from the test directory (we already do this when there isn't a custom analysis script, see comment in the follow-up section below): ```console ln -s ../../analysis_default_regression.py analysis_default_regression.py ``` 2. Add this to the CMakeLists.txt file: ```cmake add_warpx_test( test_1d_laser_acceleration_fluid_boosted # name 1 # dims 2 # nprocs inputs_test_1d_laser_acceleration_fluid_boosted # inputs "analysis_1d_fluid_boosted.py diags/diag1000001" # analysis "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) ``` ## Notes - The updated default regression analysis script has the following usage: ```console usage: analysis_default_regression.py [-h] [--path PATH] [--rtol RTOL] [--skip-fields] [--skip-particles] options: -h, --help show this help message and exit --path PATH path to output file(s) --rtol RTOL relative tolerance to compare checksums --skip-fields skip fields when comparing checksums --skip-particles skip particles when comparing checksums ``` - The checksum files that changed (as opposed to the ones that were added from scratch) changed because the corresponding analysis scripts were not performing any checksum analysis. The files had been added but they were not used by the test analysis, hence they were outdated. ## To-do - [x] Add missing checksum files or update existing checksum files that were not used - [x] Update documentation ## Follow-up - Improve documentation even more: - do we need all user-facing features of `checksumAPI` or are some obsolete? - can we merge documentation for testing and checksums into one section? - Check/fix custom tolerances (`git grep "# checksum" Examples/ | grep "rtol"`) - Add logic to reset tolerances based on environment variables (e.g., to run all tests in single-precision) - Update custom test analysis scripts so that they do not need to take the output path as argument (would be difficult to maintain as a convention, though, as these scripts are up to each PR author) - Try (again) to make it work with direct path rather than soft link (did not work when we tried in the past) --- Docs/source/developers/testing.rst | 38 +++-- Examples/CMakeLists.txt | 73 +++++++-- .../beam_beam_collision/CMakeLists.txt | 4 +- .../analysis_default_openpmd_regression.py | 1 - .../analysis_default_regression.py | 1 + .../capacitive_discharge/CMakeLists.txt | 24 +-- .../capacitive_discharge/analysis_1d.py | 12 -- .../capacitive_discharge/analysis_2d.py | 21 --- .../capacitive_discharge/analysis_dsmc.py | 12 -- .../free_electron_laser/CMakeLists.txt | 4 +- .../analysis_default_regression.py | 1 + .../free_electron_laser/analysis_fel.py | 11 -- .../laser_acceleration/CMakeLists.txt | 52 +++---- .../analysis_1d_fluid_boosted.py | 10 -- .../laser_acceleration/analysis_openpmd_rz.py | 11 -- .../analysis_refined_injection.py | 10 -- .../laser_ion/CMakeLists.txt | 8 +- .../laser_ion/analysis_default_regression.py | 1 + .../laser_ion/analysis_test_laser_ion.py | 11 -- .../plasma_acceleration/CMakeLists.txt | 32 ++-- .../plasma_mirror/CMakeLists.txt | 4 +- .../spacecraft_charging/CMakeLists.txt | 4 +- .../spacecraft_charging/analysis.py | 10 -- .../analysis_default_regression.py | 1 + .../CMakeLists.txt | 4 +- .../thomson_parabola_spectrometer/analysis.py | 2 + .../analysis_default_openpmd_regression.py | 1 - .../analysis_default_regression.py | 1 + .../uniform_plasma/CMakeLists.txt | 12 +- .../Tests/accelerator_lattice/CMakeLists.txt | 12 +- .../Tests/accelerator_lattice/analysis.py | 9 -- .../analysis_default_regression.py | 1 + Examples/Tests/boosted_diags/CMakeLists.txt | 4 +- Examples/Tests/boosted_diags/analysis.py | 10 -- .../analysis_default_regression.py | 1 + Examples/Tests/boundaries/CMakeLists.txt | 4 +- Examples/Tests/boundaries/analysis.py | 9 -- .../boundaries/analysis_default_regression.py | 1 + Examples/Tests/btd_rz/CMakeLists.txt | 4 +- Examples/Tests/btd_rz/analysis.py | 14 -- .../btd_rz/analysis_default_regression.py | 1 + .../collider_relevant_diags/CMakeLists.txt | 4 +- .../Tests/collider_relevant_diags/analysis.py | 10 -- .../analysis_default_regression.py | 1 + Examples/Tests/collision/CMakeLists.txt | 24 +-- .../Tests/collision/analysis_collision_1d.py | 10 -- .../Tests/collision/analysis_collision_2d.py | 9 -- .../Tests/collision/analysis_collision_3d.py | 10 -- .../analysis_collision_3d_isotropization.py | 10 -- .../Tests/collision/analysis_collision_rz.py | 11 -- .../collision/analysis_default_regression.py | 1 + Examples/Tests/diff_lumi_diag/CMakeLists.txt | 8 +- Examples/Tests/diff_lumi_diag/analysis.py | 11 -- .../analysis_default_regression.py | 1 + Examples/Tests/divb_cleaning/CMakeLists.txt | 4 +- Examples/Tests/divb_cleaning/analysis.py | 10 -- .../analysis_default_regression.py | 1 + Examples/Tests/dive_cleaning/CMakeLists.txt | 8 +- .../analysis_default_regression.py | 1 + .../CMakeLists.txt | 4 +- .../analysis.py | 12 -- .../analysis_default_regression.py | 1 + .../electrostatic_dirichlet_bc/CMakeLists.txt | 8 +- .../electrostatic_dirichlet_bc/analysis.py | 14 -- .../analysis_default_regression.py | 1 + .../Tests/electrostatic_sphere/CMakeLists.txt | 24 +-- .../analysis_default_regression.py | 1 + .../analysis_electrostatic_sphere.py | 9 -- .../electrostatic_sphere_eb/CMakeLists.txt | 20 +-- .../Tests/electrostatic_sphere_eb/analysis.py | 11 -- .../electrostatic_sphere_eb/analysis_rz.py | 11 -- .../electrostatic_sphere_eb/analysis_rz_mr.py | 11 -- .../embedded_boundary_cube/CMakeLists.txt | 12 +- .../analysis_default_regression.py | 1 + .../embedded_boundary_cube/analysis_fields.py | 9 -- .../analysis_fields_2d.py | 10 -- .../CMakeLists.txt | 4 +- .../analysis_default_regression.py | 1 + .../analysis_fields.py | 11 -- .../CMakeLists.txt | 4 +- .../embedded_boundary_python_api/analysis.py | 19 --- .../analysis_default_regression.py | 1 + .../CMakeLists.txt | 8 +- .../analysis_default_regression.py | 1 + .../analysis_fields_2d.py | 10 -- .../analysis_fields_3d.py | 10 -- Examples/Tests/embedded_circle/CMakeLists.txt | 4 +- Examples/Tests/embedded_circle/analysis.py | 14 -- .../analysis_default_regression.py | 1 + .../CMakeLists.txt | 4 +- .../analysis.py | 12 -- .../analysis_default_regression.py | 1 + .../Tests/field_ionization/CMakeLists.txt | 12 +- Examples/Tests/field_ionization/analysis.py | 9 -- .../analysis_default_regression.py | 1 + Examples/Tests/field_probe/CMakeLists.txt | 4 +- Examples/Tests/field_probe/analysis.py | 12 -- .../analysis_default_regression.py | 1 + Examples/Tests/flux_injection/CMakeLists.txt | 20 +-- .../analysis_default_regression.py | 1 + .../analysis_flux_injection_3d.py | 10 -- .../analysis_flux_injection_from_eb.py | 8 - .../analysis_flux_injection_rz.py | 10 -- Examples/Tests/gaussian_beam/CMakeLists.txt | 8 +- Examples/Tests/gaussian_beam/analysis.py | 14 +- Examples/Tests/implicit/CMakeLists.txt | 24 +-- Examples/Tests/implicit/analysis_1d.py | 10 -- Examples/Tests/implicit/analysis_2d_psatd.py | 11 -- .../implicit/analysis_default_regression.py | 1 + .../Tests/implicit/analysis_vandb_jfnk_2d.py | 13 -- .../Tests/initial_distribution/CMakeLists.txt | 4 +- .../Tests/initial_distribution/analysis.py | 12 -- .../analysis_default_regression.py | 1 + .../initial_plasma_profile/CMakeLists.txt | 4 +- .../Tests/initial_plasma_profile/analysis.py | 21 --- .../analysis_default_regression.py | 1 + Examples/Tests/ion_stopping/CMakeLists.txt | 4 +- Examples/Tests/ion_stopping/analysis.py | 10 -- .../analysis_default_regression.py | 1 + Examples/Tests/langmuir/CMakeLists.txt | 144 +++++++++--------- Examples/Tests/langmuir/analysis_1d.py | 9 -- Examples/Tests/langmuir/analysis_2d.py | 9 -- Examples/Tests/langmuir/analysis_3d.py | 9 -- Examples/Tests/langmuir/analysis_rz.py | 9 -- Examples/Tests/langmuir_fluids/CMakeLists.txt | 16 +- Examples/Tests/langmuir_fluids/analysis_1d.py | 10 -- Examples/Tests/langmuir_fluids/analysis_2d.py | 10 -- Examples/Tests/langmuir_fluids/analysis_3d.py | 10 -- .../analysis_default_regression.py | 1 + Examples/Tests/langmuir_fluids/analysis_rz.py | 13 -- Examples/Tests/larmor/CMakeLists.txt | 4 +- Examples/Tests/laser_injection/CMakeLists.txt | 20 +-- Examples/Tests/laser_injection/analysis_1d.py | 10 -- Examples/Tests/laser_injection/analysis_2d.py | 10 -- Examples/Tests/laser_injection/analysis_3d.py | 12 -- .../analysis_default_regression.py | 1 + .../laser_injection_from_file/CMakeLists.txt | 42 ++--- .../laser_injection_from_file/analysis_1d.py | 10 -- .../analysis_1d_boost.py | 10 -- .../laser_injection_from_file/analysis_2d.py | 10 -- .../analysis_2d_binary.py | 10 -- .../laser_injection_from_file/analysis_3d.py | 10 -- .../analysis_default_regression.py | 1 + .../analysis_from_RZ_file.py | 10 -- .../laser_injection_from_file/analysis_rz.py | 10 -- Examples/Tests/laser_on_fine/CMakeLists.txt | 4 +- .../Tests/load_external_field/CMakeLists.txt | 24 +-- .../Tests/load_external_field/analysis_3d.py | 10 -- .../analysis_default_regression.py | 1 + .../Tests/load_external_field/analysis_rz.py | 10 -- .../Tests/magnetostatic_eb/CMakeLists.txt | 12 +- .../Tests/magnetostatic_eb/analysis_rz.py | 14 -- .../Tests/maxwell_hybrid_qed/CMakeLists.txt | 4 +- .../analysis_default_regression.py | 1 + .../Tests/nci_fdtd_stability/CMakeLists.txt | 8 +- .../analysis_default_regression.py | 1 + .../nci_fdtd_stability/analysis_ncicorr.py | 10 -- .../Tests/nci_psatd_stability/CMakeLists.txt | 68 ++++----- .../nci_psatd_stability/analysis_galilean.py | 10 -- .../nci_psatd_stability/analysis_multiJ.py | 9 -- .../Tests/nodal_electrostatic/CMakeLists.txt | 4 +- .../Tests/nodal_electrostatic/analysis.py | 12 -- .../analysis_default_regression.py | 1 + Examples/Tests/nuclear_fusion/CMakeLists.txt | 24 +-- .../analysis_default_regression.py | 1 + ...sis_deuterium_deuterium_3d_intraspecies.py | 12 -- .../analysis_proton_boron_fusion.py | 12 +- .../analysis_two_product_fusion.py | 12 +- .../Tests/ohm_solver_em_modes/CMakeLists.txt | 8 +- .../Tests/ohm_solver_em_modes/analysis.py | 13 -- .../analysis_default_regression.py | 1 + .../Tests/ohm_solver_em_modes/analysis_rz.py | 14 -- .../CMakeLists.txt | 4 +- .../ohm_solver_ion_Landau_damping/analysis.py | 13 -- .../analysis_default_regression.py | 1 + .../CMakeLists.txt | 4 +- .../analysis.py | 13 -- .../analysis_default_regression.py | 1 + .../CMakeLists.txt | 4 +- .../analysis.py | 13 -- .../analysis_default_regression.py | 1 + .../open_bc_poisson_solver/CMakeLists.txt | 8 +- .../Tests/open_bc_poisson_solver/analysis.py | 11 -- .../analysis_default_regression.py | 1 + .../CMakeLists.txt | 4 +- .../particle_boundary_interaction/analysis.py | 10 -- .../analysis_default_regression.py | 1 + .../particle_boundary_process/CMakeLists.txt | 8 +- .../analysis_absorption.py | 10 -- .../particle_boundary_scrape/CMakeLists.txt | 8 +- .../analysis_default_regression.py | 1 + .../Tests/particle_data_python/CMakeLists.txt | 12 +- .../Tests/particle_data_python/analysis.py | 14 -- .../particle_fields_diags/CMakeLists.txt | 10 +- .../analysis_default_regression.py | 1 + .../analysis_particle_diags_impl.py | 13 -- Examples/Tests/particle_pusher/CMakeLists.txt | 4 +- Examples/Tests/particle_pusher/analysis.py | 10 -- .../analysis_default_regression.py | 1 + .../particle_thermal_boundary/CMakeLists.txt | 4 +- .../particle_thermal_boundary/analysis.py | 12 -- .../analysis_default_regression.py | 1 + .../Tests/particles_in_pml/CMakeLists.txt | 16 +- .../analysis_default_regression.py | 1 + .../analysis_particles_in_pml.py | 9 -- .../pass_mpi_communicator/CMakeLists.txt | 4 +- .../analysis_default_regression.py | 1 + Examples/Tests/pec/CMakeLists.txt | 16 +- Examples/Tests/pec/analysis_pec.py | 10 -- Examples/Tests/pec/analysis_pec_mr.py | 10 -- Examples/Tests/photon_pusher/CMakeLists.txt | 4 +- Examples/Tests/photon_pusher/analysis.py | 10 -- .../analysis_default_regression.py | 1 + Examples/Tests/plasma_lens/CMakeLists.txt | 20 +-- Examples/Tests/plasma_lens/analysis.py | 12 -- .../analysis_default_regression.py | 1 + Examples/Tests/pml/CMakeLists.txt | 32 ++-- Examples/Tests/pml/analysis_pml_ckc.py | 9 -- Examples/Tests/pml/analysis_pml_psatd.py | 8 - Examples/Tests/pml/analysis_pml_psatd_rz.py | 9 -- Examples/Tests/pml/analysis_pml_yee.py | 9 -- .../Tests/point_of_contact_eb/CMakeLists.txt | 8 +- .../Tests/point_of_contact_eb/analysis.py | 12 -- .../analysis_default_regression.py | 1 + .../projection_divb_cleaner/CMakeLists.txt | 12 +- .../Tests/projection_divb_cleaner/analysis.py | 10 -- Examples/Tests/python_wrappers/CMakeLists.txt | 4 +- Examples/Tests/qed/CMakeLists.txt | 40 ++--- .../Tests/qed/analysis_breit_wheeler_opmd.py | 11 -- .../Tests/qed/analysis_breit_wheeler_yt.py | 12 +- .../Tests/qed/analysis_default_regression.py | 1 + Examples/Tests/qed/analysis_quantum_sync.py | 12 +- Examples/Tests/qed/analysis_schwinger.py | 9 -- .../Tests/radiation_reaction/CMakeLists.txt | 4 +- Examples/Tests/radiation_reaction/analysis.py | 10 -- .../analysis_default_regression.py | 1 + Examples/Tests/reduced_diags/CMakeLists.txt | 20 +-- .../analysis_default_regression.py | 1 + .../analysis_reduced_diags_impl.py | 11 -- ...alysis_reduced_diags_load_balance_costs.py | 13 -- .../analysis_reduced_diags_single.py | 16 -- .../CMakeLists.txt | 4 +- .../analysis.py | 10 -- .../analysis_default_regression.py | 1 + .../Tests/repelling_particles/CMakeLists.txt | 4 +- .../Tests/repelling_particles/analysis.py | 10 -- .../analysis_default_regression.py | 1 + Examples/Tests/resampling/CMakeLists.txt | 12 +- Examples/Tests/resampling/analysis.py | 10 -- Examples/Tests/restart/CMakeLists.txt | 36 ++--- Examples/Tests/restart/analysis_restart.py | 21 --- Examples/Tests/restart_eb/CMakeLists.txt | 10 +- Examples/Tests/rigid_injection/CMakeLists.txt | 8 +- .../analysis_default_regression.py | 1 + .../analysis_rigid_injection_btd.py | 10 -- .../analysis_rigid_injection_lab.py | 9 -- Examples/Tests/scraping/CMakeLists.txt | 8 +- .../scraping/analysis_default_regression.py | 1 + Examples/Tests/scraping/analysis_rz.py | 11 -- Examples/Tests/silver_mueller/CMakeLists.txt | 16 +- Examples/Tests/silver_mueller/analysis.py | 9 -- .../analysis_default_regression.py | 1 + Examples/Tests/single_particle/CMakeLists.txt | 4 +- Examples/Tests/single_particle/analysis.py | 9 -- .../analysis_default_regression.py | 1 + .../CMakeLists.txt | 8 +- .../space_charge_initialization/analysis.py | 10 -- .../analysis_default_regression.py | 1 + Examples/Tests/subcycling/CMakeLists.txt | 4 +- Examples/Tests/vay_deposition/CMakeLists.txt | 8 +- Examples/Tests/vay_deposition/analysis.py | 10 -- .../analysis_default_regression.py | 1 + .../analysis_default_openpmd_regression.py | 26 ---- Examples/analysis_default_regression.py | 87 +++++++++-- Examples/analysis_default_restart.py | 16 +- .../test_2d_collision_xz_picmi.json | 29 ++++ .../test_2d_dirichlet_bc_picmi.json | 5 + .../test_2d_dive_cleaning.json | 30 ++-- .../test_2d_maxwell_hybrid_qed_solver.json | 8 +- .../test_2d_particle_attr_access_picmi.json | 14 ++ ..._2d_particle_attr_access_unique_picmi.json | 14 ++ .../test_2d_qed_breit_wheeler_opmd.json | 134 ++++++++++++++++ .../test_3d_dive_cleaning.json | 38 ++--- .../test_3d_particle_scrape.json | 10 ++ .../test_3d_particle_scrape_picmi.json | 10 ++ .../test_3d_plasma_lens_picmi.json | 21 +++ .../test_3d_qed_breit_wheeler_opmd.json | 134 ++++++++++++++++ ...diags_load_balance_costs_timers_picmi.json | 22 +++ .../benchmarks_json/test_rz_scraping.json | 9 ++ .../test_rz_scraping_filter.json | 17 +++ 290 files changed, 1259 insertions(+), 1983 deletions(-) delete mode 120000 Examples/Physics_applications/beam_beam_collision/analysis_default_openpmd_regression.py create mode 120000 Examples/Physics_applications/beam_beam_collision/analysis_default_regression.py delete mode 100755 Examples/Physics_applications/capacitive_discharge/analysis_2d.py create mode 120000 Examples/Physics_applications/free_electron_laser/analysis_default_regression.py create mode 120000 Examples/Physics_applications/laser_ion/analysis_default_regression.py create mode 120000 Examples/Physics_applications/spacecraft_charging/analysis_default_regression.py mode change 100644 => 100755 Examples/Physics_applications/thomson_parabola_spectrometer/analysis.py delete mode 120000 Examples/Physics_applications/thomson_parabola_spectrometer/analysis_default_openpmd_regression.py create mode 120000 Examples/Physics_applications/thomson_parabola_spectrometer/analysis_default_regression.py create mode 120000 Examples/Tests/accelerator_lattice/analysis_default_regression.py create mode 120000 Examples/Tests/boosted_diags/analysis_default_regression.py create mode 120000 Examples/Tests/boundaries/analysis_default_regression.py create mode 120000 Examples/Tests/btd_rz/analysis_default_regression.py create mode 120000 Examples/Tests/collider_relevant_diags/analysis_default_regression.py create mode 120000 Examples/Tests/collision/analysis_default_regression.py create mode 120000 Examples/Tests/diff_lumi_diag/analysis_default_regression.py create mode 120000 Examples/Tests/divb_cleaning/analysis_default_regression.py create mode 120000 Examples/Tests/dive_cleaning/analysis_default_regression.py create mode 120000 Examples/Tests/effective_potential_electrostatic/analysis_default_regression.py create mode 120000 Examples/Tests/electrostatic_dirichlet_bc/analysis_default_regression.py create mode 120000 Examples/Tests/electrostatic_sphere/analysis_default_regression.py create mode 120000 Examples/Tests/embedded_boundary_cube/analysis_default_regression.py create mode 120000 Examples/Tests/embedded_boundary_diffraction/analysis_default_regression.py delete mode 100755 Examples/Tests/embedded_boundary_python_api/analysis.py create mode 120000 Examples/Tests/embedded_boundary_python_api/analysis_default_regression.py create mode 120000 Examples/Tests/embedded_boundary_rotated_cube/analysis_default_regression.py delete mode 100755 Examples/Tests/embedded_circle/analysis.py create mode 120000 Examples/Tests/embedded_circle/analysis_default_regression.py create mode 120000 Examples/Tests/energy_conserving_thermal_plasma/analysis_default_regression.py create mode 120000 Examples/Tests/field_ionization/analysis_default_regression.py create mode 120000 Examples/Tests/field_probe/analysis_default_regression.py create mode 120000 Examples/Tests/flux_injection/analysis_default_regression.py create mode 120000 Examples/Tests/implicit/analysis_default_regression.py create mode 120000 Examples/Tests/initial_distribution/analysis_default_regression.py delete mode 100755 Examples/Tests/initial_plasma_profile/analysis.py create mode 120000 Examples/Tests/initial_plasma_profile/analysis_default_regression.py create mode 120000 Examples/Tests/ion_stopping/analysis_default_regression.py create mode 120000 Examples/Tests/langmuir_fluids/analysis_default_regression.py create mode 120000 Examples/Tests/laser_injection/analysis_default_regression.py create mode 120000 Examples/Tests/laser_injection_from_file/analysis_default_regression.py create mode 120000 Examples/Tests/load_external_field/analysis_default_regression.py delete mode 100755 Examples/Tests/magnetostatic_eb/analysis_rz.py create mode 120000 Examples/Tests/maxwell_hybrid_qed/analysis_default_regression.py create mode 120000 Examples/Tests/nci_fdtd_stability/analysis_default_regression.py create mode 120000 Examples/Tests/nodal_electrostatic/analysis_default_regression.py create mode 120000 Examples/Tests/nuclear_fusion/analysis_default_regression.py create mode 120000 Examples/Tests/ohm_solver_em_modes/analysis_default_regression.py create mode 120000 Examples/Tests/ohm_solver_ion_Landau_damping/analysis_default_regression.py create mode 120000 Examples/Tests/ohm_solver_ion_beam_instability/analysis_default_regression.py create mode 120000 Examples/Tests/ohm_solver_magnetic_reconnection/analysis_default_regression.py create mode 120000 Examples/Tests/open_bc_poisson_solver/analysis_default_regression.py create mode 120000 Examples/Tests/particle_boundary_interaction/analysis_default_regression.py create mode 120000 Examples/Tests/particle_boundary_scrape/analysis_default_regression.py delete mode 100755 Examples/Tests/particle_data_python/analysis.py create mode 120000 Examples/Tests/particle_fields_diags/analysis_default_regression.py create mode 120000 Examples/Tests/particle_pusher/analysis_default_regression.py create mode 120000 Examples/Tests/particle_thermal_boundary/analysis_default_regression.py create mode 120000 Examples/Tests/particles_in_pml/analysis_default_regression.py create mode 120000 Examples/Tests/pass_mpi_communicator/analysis_default_regression.py create mode 120000 Examples/Tests/photon_pusher/analysis_default_regression.py create mode 120000 Examples/Tests/plasma_lens/analysis_default_regression.py create mode 120000 Examples/Tests/point_of_contact_eb/analysis_default_regression.py create mode 120000 Examples/Tests/qed/analysis_default_regression.py create mode 120000 Examples/Tests/radiation_reaction/analysis_default_regression.py create mode 120000 Examples/Tests/reduced_diags/analysis_default_regression.py delete mode 100755 Examples/Tests/reduced_diags/analysis_reduced_diags_single.py create mode 120000 Examples/Tests/relativistic_space_charge_initialization/analysis_default_regression.py create mode 120000 Examples/Tests/repelling_particles/analysis_default_regression.py delete mode 100755 Examples/Tests/restart/analysis_restart.py create mode 120000 Examples/Tests/rigid_injection/analysis_default_regression.py create mode 120000 Examples/Tests/scraping/analysis_default_regression.py create mode 120000 Examples/Tests/silver_mueller/analysis_default_regression.py create mode 120000 Examples/Tests/single_particle/analysis_default_regression.py create mode 120000 Examples/Tests/space_charge_initialization/analysis_default_regression.py create mode 120000 Examples/Tests/vay_deposition/analysis_default_regression.py delete mode 100755 Examples/analysis_default_openpmd_regression.py create mode 100644 Regression/Checksum/benchmarks_json/test_2d_collision_xz_picmi.json create mode 100644 Regression/Checksum/benchmarks_json/test_2d_dirichlet_bc_picmi.json create mode 100644 Regression/Checksum/benchmarks_json/test_2d_particle_attr_access_picmi.json create mode 100644 Regression/Checksum/benchmarks_json/test_2d_particle_attr_access_unique_picmi.json create mode 100644 Regression/Checksum/benchmarks_json/test_2d_qed_breit_wheeler_opmd.json create mode 100644 Regression/Checksum/benchmarks_json/test_3d_particle_scrape.json create mode 100644 Regression/Checksum/benchmarks_json/test_3d_particle_scrape_picmi.json create mode 100644 Regression/Checksum/benchmarks_json/test_3d_plasma_lens_picmi.json create mode 100644 Regression/Checksum/benchmarks_json/test_3d_qed_breit_wheeler_opmd.json create mode 100644 Regression/Checksum/benchmarks_json/test_3d_reduced_diags_load_balance_costs_timers_picmi.json create mode 100644 Regression/Checksum/benchmarks_json/test_rz_scraping_filter.json diff --git a/Docs/source/developers/testing.rst b/Docs/source/developers/testing.rst index 111e3e7d7cb..57194b54642 100644 --- a/Docs/source/developers/testing.rst +++ b/Docs/source/developers/testing.rst @@ -140,8 +140,8 @@ A new test can be added by adding a corresponding entry in ``CMakeLists.txt`` as 1 # dims 2 # nprocs inputs_test_1d_laser_acceleration # inputs - analysis.py # analysis - diags/diag1000100 # output (plotfile) + "analysis.py diags/diag1000100" # analysis + "analysis_default_regression.py --path diags/diag1000100" # checksum OFF # dependency ) @@ -154,8 +154,8 @@ A new test can be added by adding a corresponding entry in ``CMakeLists.txt`` as 2 # dims 2 # nprocs inputs_test_2d_laser_acceleration_picmi.py # inputs - analysis.py # analysis - diags/diag1000100 # output (plotfile) + "analysis.py diags/diag1000100" # analysis + "analysis_default_regression.py --path diags/diag1000100" # checksum OFF # dependency ) @@ -168,14 +168,14 @@ A new test can be added by adding a corresponding entry in ``CMakeLists.txt`` as 3 # dims 2 # nprocs inputs_test_3d_laser_acceleration_restart # inputs - analysis_default_restart.py # analysis - diags/diag1000100 # output (plotfile) + "analysis_default_restart.py diags/diag1000100" # analysis + "analysis_default_regression.py --path diags/diag1000100" # checksum test_3d_laser_acceleration # dependency ) Note that the restart has an explicit dependency, namely it can run only provided that the original test, from which the restart checkpoint files will be read, runs first. -* A more complex example. Add the **PICMI test** ``test_rz_laser_acceleration_picmi``, with custom command-line arguments ``--test`` and ``dir``, and openPMD time series output: +* A more complex example. Add the **PICMI test** ``test_rz_laser_acceleration_picmi``, with custom command-line arguments ``--test`` and ``dir``, openPMD time series output, and custom command line arguments for the checksum comparison: .. code-block:: cmake @@ -184,11 +184,13 @@ A new test can be added by adding a corresponding entry in ``CMakeLists.txt`` as RZ # dims 2 # nprocs "inputs_test_rz_laser_acceleration_picmi.py --test --dir 1" # inputs - analysis.py # analysis - diags/diag1/ # output (openPMD time series) + "analysis.py diags/diag1/" # analysis + "analysis_default_regression.py --path diags/diag1/ --skip-particles --rtol 1e-7" # checksum OFF # dependency ) +The ``analysis`` and ``checksum`` commands passed as arguments to ``add_warpx_test`` can be set to ``OFF`` if the intention is to skip the respective analysis for a given test. + If you need a new Python package dependency for testing, please add it in `Regression/requirements.txt `__. Sometimes two or more tests share a large number of input parameters. @@ -196,6 +198,24 @@ The shared input parameters can be collected in a "base" input file that can be If the new test is added in a new directory that did not exist before, please add the name of that directory with the command ``add_subdirectory`` in `Physics_applications/CMakeLists.txt `__ or `Tests/CMakeLists.txt `__, depending on where the new test directory is located. +If not already present, the default regression analysis script ``analysis_default_regression.py`` in the examples above must be linked from `Examples/analysis_default_regression.py `__, by executing once the following command from the test directory: + + .. code-block:: bash + + ln -s ../../analysis_default_regression.py analysis_default_regression.py + +Here is the help message of the default regression analysis script, including usage and list of available options and arguments: + + .. code-block:: bash + + usage: analysis_default_regression.py [-h] [--path PATH] [--rtol RTOL] [--skip-fields] [--skip-particles] + options: + -h, --help show this help message and exit + --path PATH path to output file(s) + --rtol RTOL relative tolerance to compare checksums + --skip-fields skip fields when comparing checksums + --skip-particles skip particles when comparing checksums + Naming conventions for automated tests -------------------------------------- diff --git a/Examples/CMakeLists.txt b/Examples/CMakeLists.txt index 728c2142932..c4303aaee0b 100644 --- a/Examples/CMakeLists.txt +++ b/Examples/CMakeLists.txt @@ -21,8 +21,8 @@ endif() # dims: 1,2,RZ,3 # nprocs: 1 or 2 (maybe refactor later on to just depend on WarpX_MPI) # inputs: inputs file or PICMI script, WarpX_MPI decides w/ or w/o MPI -# analysis: analysis script, always run without MPI -# output: output file(s) to analyze +# analysis: custom test analysis command, always run without MPI +# checksum: default regression analysis command (checksum benchmark) # dependency: name of base test that must run first # function(add_warpx_test @@ -31,7 +31,7 @@ function(add_warpx_test nprocs inputs analysis - output + checksum dependency ) # cannot run MPI tests w/o MPI build @@ -72,14 +72,25 @@ function(add_warpx_test separate_arguments(ANALYSIS_LIST UNIX_COMMAND "${analysis}") list(GET ANALYSIS_LIST 0 ANALYSIS_FILE) cmake_path(SET ANALYSIS_FILE "${CMAKE_CURRENT_SOURCE_DIR}/${ANALYSIS_FILE}") - # TODO Enable lines below to handle command-line arguments - #list(LENGTH ANALYSIS_LIST ANALYSIS_LIST_LENGTH) - #if(ANALYSIS_LIST_LENGTH GREATER 1) - # list(SUBLIST ANALYSIS_LIST 1 -1 ANALYSIS_ARGS) - # list(JOIN ANALYSIS_ARGS " " ANALYSIS_ARGS) - #else() - # set(ANALYSIS_ARGS "") - #endif() + list(LENGTH ANALYSIS_LIST ANALYSIS_LIST_LENGTH) + if(ANALYSIS_LIST_LENGTH GREATER 1) + list(SUBLIST ANALYSIS_LIST 1 -1 ANALYSIS_ARGS) + list(JOIN ANALYSIS_ARGS " " ANALYSIS_ARGS) + else() + set(ANALYSIS_ARGS "") + endif() + + # get checksum script and optional command-line arguments + separate_arguments(CHECKSUM_LIST UNIX_COMMAND "${checksum}") + list(GET CHECKSUM_LIST 0 CHECKSUM_FILE) + cmake_path(SET CHECKSUM_FILE "${CMAKE_CURRENT_SOURCE_DIR}/${CHECKSUM_FILE}") + list(LENGTH CHECKSUM_LIST CHECKSUM_LIST_LENGTH) + if(CHECKSUM_LIST_LENGTH GREATER 1) + list(SUBLIST CHECKSUM_LIST 1 -1 CHECKSUM_ARGS) + list(JOIN CHECKSUM_ARGS " " CHECKSUM_ARGS) + else() + set(CHECKSUM_ARGS "") + endif() # Python test? set(python OFF) @@ -175,11 +186,14 @@ function(add_warpx_test # test analysis if(analysis) + # for argparse, do not pass command-line arguments as one quoted string + separate_arguments(ANALYSIS_ARGS UNIX_COMMAND "${ANALYSIS_ARGS}") add_test( NAME ${name}.analysis COMMAND - ${THIS_Python_SCRIPT_EXE} ${ANALYSIS_FILE} - ${output} + ${THIS_Python_SCRIPT_EXE} + ${ANALYSIS_FILE} + ${ANALYSIS_ARGS} WORKING_DIRECTORY ${THIS_WORKING_DIR} ) # test analysis depends on test run @@ -187,13 +201,37 @@ function(add_warpx_test # FIXME Use helper function to handle Windows exceptions set(PYTHONPATH "$ENV{PYTHONPATH}:${CMAKE_PYTHON_OUTPUT_DIRECTORY}") # add paths for custom Python modules - set(PYTHONPATH "${PYTHONPATH}:${WarpX_SOURCE_DIR}/Regression/Checksum") set(PYTHONPATH "${PYTHONPATH}:${WarpX_SOURCE_DIR}/Regression/PostProcessingUtils") set(PYTHONPATH "${PYTHONPATH}:${WarpX_SOURCE_DIR}/Tools/Parser") set(PYTHONPATH "${PYTHONPATH}:${WarpX_SOURCE_DIR}/Tools/PostProcessing") set_property(TEST ${name}.analysis APPEND PROPERTY ENVIRONMENT "PYTHONPATH=${PYTHONPATH}") endif() + # checksum analysis + if(checksum) + # for argparse, do not pass command-line arguments as one quoted string + separate_arguments(CHECKSUM_ARGS UNIX_COMMAND "${CHECKSUM_ARGS}") + add_test( + NAME ${name}.checksum + COMMAND + ${THIS_Python_SCRIPT_EXE} + ${CHECKSUM_FILE} + ${CHECKSUM_ARGS} + WORKING_DIRECTORY ${THIS_WORKING_DIR} + ) + # test analysis depends on test run + set_property(TEST ${name}.checksum APPEND PROPERTY DEPENDS "${name}.run") + if(analysis) + # checksum analysis depends on test analysis + set_property(TEST ${name}.checksum APPEND PROPERTY DEPENDS "${name}.analysis") + endif() + # FIXME Use helper function to handle Windows exceptions + set(PYTHONPATH "$ENV{PYTHONPATH}:${CMAKE_PYTHON_OUTPUT_DIRECTORY}") + # add paths for custom Python modules + set(PYTHONPATH "${PYTHONPATH}:${WarpX_SOURCE_DIR}/Regression/Checksum") + set_property(TEST ${name}.checksum APPEND PROPERTY ENVIRONMENT "PYTHONPATH=${PYTHONPATH}") + endif() + # CI: remove test directory after run if(WarpX_TEST_CLEANUP) add_test( @@ -206,6 +244,10 @@ function(add_warpx_test # test cleanup depends on test analysis set_property(TEST ${name}.cleanup APPEND PROPERTY DEPENDS "${name}.analysis") endif() + if(checksum) + # test cleanup depends on test analysis + set_property(TEST ${name}.cleanup APPEND PROPERTY DEPENDS "${name}.checksum") + endif() endif() # Do we depend on another test? @@ -215,6 +257,9 @@ function(add_warpx_test if(analysis) set_property(TEST ${name}.run APPEND PROPERTY DEPENDS "${dependency}.analysis") endif() + if(checksum) + set_property(TEST ${name}.run APPEND PROPERTY DEPENDS "${dependency}.checksum") + endif() if(WarpX_TEST_CLEANUP) # do not clean up dependency test before current test is completed set_property(TEST ${dependency}.cleanup APPEND PROPERTY DEPENDS "${name}.cleanup") diff --git a/Examples/Physics_applications/beam_beam_collision/CMakeLists.txt b/Examples/Physics_applications/beam_beam_collision/CMakeLists.txt index 09e96f04d7f..fbdb6dd221f 100644 --- a/Examples/Physics_applications/beam_beam_collision/CMakeLists.txt +++ b/Examples/Physics_applications/beam_beam_collision/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_beam_beam_collision # inputs - analysis_default_openpmd_regression.py # analysis - diags/diag1/ # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1/" # checksum OFF # dependency ) label_warpx_test(test_3d_beam_beam_collision slow) diff --git a/Examples/Physics_applications/beam_beam_collision/analysis_default_openpmd_regression.py b/Examples/Physics_applications/beam_beam_collision/analysis_default_openpmd_regression.py deleted file mode 120000 index 73e5ec47001..00000000000 --- a/Examples/Physics_applications/beam_beam_collision/analysis_default_openpmd_regression.py +++ /dev/null @@ -1 +0,0 @@ -../../analysis_default_openpmd_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/beam_beam_collision/analysis_default_regression.py b/Examples/Physics_applications/beam_beam_collision/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Physics_applications/beam_beam_collision/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/capacitive_discharge/CMakeLists.txt b/Examples/Physics_applications/capacitive_discharge/CMakeLists.txt index 5af1d0a0664..5403e374849 100644 --- a/Examples/Physics_applications/capacitive_discharge/CMakeLists.txt +++ b/Examples/Physics_applications/capacitive_discharge/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 1 # dims 2 # nprocs "inputs_base_1d_picmi.py --test --pythonsolver" # inputs - analysis_1d.py # analysis - diags/diag1000050 # output + "analysis_1d.py" # analysis + "analysis_default_regression.py --path diags/diag1000050" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 1 # dims 2 # nprocs "inputs_base_1d_picmi.py --test --dsmc" # inputs - analysis_dsmc.py # analysis - diags/diag1000050 # output + "analysis_dsmc.py" # analysis + "analysis_default_regression.py --path diags/diag1000050" # checksum OFF # dependency ) @@ -26,19 +26,19 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_background_mcc # inputs - analysis_default_regression.py # analysis - diags/diag1000050 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000050" # checksum OFF # dependency ) -# FIXME: can we make this a single precision for now? +# FIXME: can we make this single precision for now? #add_warpx_test( # test_2d_background_mcc_dp_psp # name # 2 # dims # 2 # nprocs -## inputs_test_2d_background_mcc_dp_psp # inputs -# analysis_default_regression.py # analysis -# diags/diag1000050 # output +# inputs_test_2d_background_mcc_dp_psp # inputs +# OFF # analysis +# "analysis_default_regression.py --path diags/diag1000050" # checksum # OFF # dependency #) @@ -47,7 +47,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_background_mcc_picmi.py # inputs - analysis_2d.py # analysis - diags/diag1000050 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000050 --rtol 5e-3" # checksum OFF # dependency ) diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_1d.py b/Examples/Physics_applications/capacitive_discharge/analysis_1d.py index e9043e5dc01..82d98c38210 100755 --- a/Examples/Physics_applications/capacitive_discharge/analysis_1d.py +++ b/Examples/Physics_applications/capacitive_discharge/analysis_1d.py @@ -2,14 +2,8 @@ # Copyright 2022 Modern Electron, David Grote -import os -import sys - import numpy as np -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # fmt: off ref_density = np.array([ 1.27989677e+14, 2.23601330e+14, 2.55400265e+14, 2.55664972e+14, @@ -51,9 +45,3 @@ density_data = np.load("ion_density_case_1.npy") print(repr(density_data)) assert np.allclose(density_data, ref_density) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_2d.py b/Examples/Physics_applications/capacitive_discharge/analysis_2d.py deleted file mode 100755 index d4845ffb718..00000000000 --- a/Examples/Physics_applications/capacitive_discharge/analysis_2d.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2021 Modern Electron - -# This script checks that the inputs_test_2d_background_mcc_picmi.py run more-or-less matches the -# results from the non-PICMI run. The PICMI run is using an external Poisson -# solver that directly solves the Poisson equation using matrix inversion -# rather than the iterative approach from the MLMG solver. - -import os -import sys - -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - rtol=5e-3, -) diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py b/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py index 1458924b35c..cdaa6bed58f 100755 --- a/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py +++ b/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py @@ -2,14 +2,8 @@ # 2023 TAE Technologies -import os -import sys - import numpy as np -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # fmt: off ref_density = np.array([ 1.27942709e+14, 2.23579371e+14, 2.55384387e+14, 2.55660663e+14, @@ -51,9 +45,3 @@ density_data = np.load("ion_density_case_1.npy") print(repr(density_data)) assert np.allclose(density_data, ref_density) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Physics_applications/free_electron_laser/CMakeLists.txt b/Examples/Physics_applications/free_electron_laser/CMakeLists.txt index f5bc8d857d2..168f06c9859 100644 --- a/Examples/Physics_applications/free_electron_laser/CMakeLists.txt +++ b/Examples/Physics_applications/free_electron_laser/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 1 # dims 2 # nprocs inputs_test_1d_fel # inputs - analysis_fel.py # analysis - diags/diag_labframe # output + "analysis_fel.py diags/diag_labframe" # analysis + "analysis_default_regression.py --path diags/diag_labframe" # checksum OFF # dependency ) diff --git a/Examples/Physics_applications/free_electron_laser/analysis_default_regression.py b/Examples/Physics_applications/free_electron_laser/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Physics_applications/free_electron_laser/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/free_electron_laser/analysis_fel.py b/Examples/Physics_applications/free_electron_laser/analysis_fel.py index 3ab80d195c0..b96ddd47147 100755 --- a/Examples/Physics_applications/free_electron_laser/analysis_fel.py +++ b/Examples/Physics_applications/free_electron_laser/analysis_fel.py @@ -17,16 +17,12 @@ lab-frame diagnostics and boosted-frame diagnostics. """ -import os import sys import numpy as np from openpmd_viewer import OpenPMDTimeSeries from scipy.constants import c, e, m_e -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Physical parameters of the test gamma_bunch = 100.6 Bu = 0.5 @@ -136,10 +132,3 @@ def extract_peak_E_boost(iteration): lambda_radiation_lab = lambda_radiation_boost / (2 * gamma_boost) lambda_expected = lambda_u / (2 * gamma_boost**2) assert abs(lambda_radiation_lab - lambda_expected) / lambda_expected < 0.01 - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - output_format="openpmd", -) diff --git a/Examples/Physics_applications/laser_acceleration/CMakeLists.txt b/Examples/Physics_applications/laser_acceleration/CMakeLists.txt index 46e97a53d54..28b0e30c2b4 100644 --- a/Examples/Physics_applications/laser_acceleration/CMakeLists.txt +++ b/Examples/Physics_applications/laser_acceleration/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 1 # dims 2 # nprocs inputs_test_1d_laser_acceleration # inputs - analysis_default_regression.py # analysis - diags/diag1000100 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000100" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 1 # dims 2 # nprocs inputs_test_1d_laser_acceleration_fluid_boosted # inputs - analysis_1d_fluid_boosted.py # analysis - diags/diag1000001 # output + "analysis_1d_fluid_boosted.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) label_warpx_test(test_1d_laser_acceleration_fluid_boosted slow) @@ -27,8 +27,8 @@ add_warpx_test( 1 # dims 2 # nprocs inputs_test_1d_laser_acceleration_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000100 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000100" # checksum OFF # dependency ) @@ -37,8 +37,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_laser_acceleration_boosted # inputs - analysis_default_regression.py # analysis - diags/diag1000002 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000002" # checksum OFF # dependency ) @@ -47,8 +47,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_laser_acceleration_mr # inputs - analysis_default_regression.py # analysis - diags/diag1000200 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000200" # checksum OFF # dependency ) @@ -57,8 +57,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_laser_acceleration_mr_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000200 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000200" # checksum OFF # dependency ) @@ -67,8 +67,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_refined_injection # inputs - analysis_refined_injection.py # analysis - diags/diag1000200 # output + "analysis_refined_injection.py diags/diag1000200" # analysis + "analysis_default_regression.py --path diags/diag1000200" # checksum OFF # dependency ) @@ -77,8 +77,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_laser_acceleration # inputs - analysis_default_openpmd_regression.py # analysis - diags/diag1/ # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1/" # checksum OFF # dependency ) @@ -87,8 +87,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_laser_acceleration_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000100 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000100" # checksum OFF # dependency ) @@ -97,8 +97,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_laser_acceleration_single_precision_comms # inputs - analysis_default_openpmd_regression.py # analysis - diags/diag1/ # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1/" # checksum OFF # dependency ) @@ -107,8 +107,8 @@ add_warpx_test( RZ # dims 2 # nprocs inputs_test_rz_laser_acceleration # inputs - analysis_default_regression.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) @@ -117,8 +117,8 @@ add_warpx_test( RZ # dims 2 # nprocs inputs_test_rz_laser_acceleration_opmd # inputs - analysis_openpmd_rz.py # analysis - diags/diag1/ # output + "analysis_openpmd_rz.py diags/diag1/" # analysis + "analysis_default_regression.py --path diags/diag1/" # checksum OFF # dependency ) @@ -127,8 +127,8 @@ add_warpx_test( RZ # dims 2 # nprocs inputs_test_rz_laser_acceleration_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) label_warpx_test(test_rz_laser_acceleration_picmi slow) diff --git a/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid_boosted.py b/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid_boosted.py index 03369d48adf..bd45f30edbb 100755 --- a/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid_boosted.py +++ b/Examples/Physics_applications/laser_acceleration/analysis_1d_fluid_boosted.py @@ -10,7 +10,6 @@ # This is a script that analyses the simulation results from # the script `inputs_1d`. This simulates a 1D WFA with Pondermotive Envelope: # REF: (Equations 20-23) https://journals.aps.org/rmp/pdf/10.1103/RevModPhys.81.1229 -import os import sys import matplotlib @@ -24,9 +23,6 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # this will be the name of the plot file fn = sys.argv[1] @@ -187,9 +183,3 @@ def odefcn(phi, xi, kp, a0, c, tau, xi_0, lambda_laser): print("tolerance_rel: " + str(tolerance_rel)) assert error_rel < tolerance_rel - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py b/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py index 0e07ddf914c..f136ffeb1d4 100755 --- a/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py +++ b/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py @@ -1,14 +1,10 @@ #!/usr/bin/env python3 -import os import sys import numpy as np import openpmd_api as io -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - filename = sys.argv[1] series = io.Series(f"{filename}/openpmd_%T.h5", io.Access.read_only) @@ -67,10 +63,3 @@ assert ( (electron_meanz > 0) and (beam_meanz < 0) ), "problem with openPMD+RZ. Maybe openPMD+RZ mixed up the order of rho_ diagnostics?" - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - output_format="openpmd", -) diff --git a/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py b/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py index d481075c112..8df5e422ddb 100755 --- a/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py +++ b/Examples/Physics_applications/laser_acceleration/analysis_refined_injection.py @@ -9,16 +9,12 @@ # This script tests the "warpx.refine_plasma=1" option by comparing # the actual number of electrons at step 200 to the expected value -import os import sys import yt yt.funcs.mylog.setLevel(50) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # this will be the name of the plot file fn = sys.argv[1] @@ -58,9 +54,3 @@ rho_slice = rho[13:51, 475] # Test uniformity up to 0.5% relative variation assert rho_slice.std() < 0.005 * abs(rho_slice.mean()) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Physics_applications/laser_ion/CMakeLists.txt b/Examples/Physics_applications/laser_ion/CMakeLists.txt index 66d53165290..cc67bef685c 100644 --- a/Examples/Physics_applications/laser_ion/CMakeLists.txt +++ b/Examples/Physics_applications/laser_ion/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_laser_ion_acc # inputs - analysis_test_laser_ion.py # analysis - diags/diagInst/ # output + "analysis_test_laser_ion.py diags/diagInst/" # analysis + "analysis_default_regression.py --path diags/diagInst/" # checksum OFF # dependency ) @@ -16,7 +16,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_laser_ion_acc_picmi.py # inputs - analysis_test_laser_ion.py # analysis - diags/diagInst/ # output + "analysis_test_laser_ion.py diags/diagInst/" # analysis + "analysis_default_regression.py --path diags/diagInst/" # checksum OFF # dependency ) diff --git a/Examples/Physics_applications/laser_ion/analysis_default_regression.py b/Examples/Physics_applications/laser_ion/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Physics_applications/laser_ion/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/laser_ion/analysis_test_laser_ion.py b/Examples/Physics_applications/laser_ion/analysis_test_laser_ion.py index d2106d33803..360d5d48b5f 100755 --- a/Examples/Physics_applications/laser_ion/analysis_test_laser_ion.py +++ b/Examples/Physics_applications/laser_ion/analysis_test_laser_ion.py @@ -6,9 +6,6 @@ import numpy as np import openpmd_api as io -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - def load_field_from_iteration( series, iteration: int, field: str, coord: str = None @@ -67,14 +64,6 @@ def compare_time_avg_with_instantaneous_diags(dir_inst: str, dir_avg: str): if __name__ == "__main__": - # NOTE: works only in the example directory due to relative path import - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - output_format="openpmd", - ) - # TODO: implement intervals parser for PICMI that allows more complex output periods test_name = os.path.split(os.getcwd())[1] if "picmi" not in test_name: diff --git a/Examples/Physics_applications/plasma_acceleration/CMakeLists.txt b/Examples/Physics_applications/plasma_acceleration/CMakeLists.txt index 00a0f80b457..68e81e4b9e4 100644 --- a/Examples/Physics_applications/plasma_acceleration/CMakeLists.txt +++ b/Examples/Physics_applications/plasma_acceleration/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 1 # dims 2 # nprocs inputs_test_1d_plasma_acceleration_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1001000 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1001000" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_plasma_acceleration_boosted # inputs - analysis_default_regression.py # analysis - diags/diag1000020 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_plasma_acceleration_mr # inputs - analysis_default_regression.py # analysis - diags/diag1000400 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000400" # checksum OFF # dependency ) @@ -36,8 +36,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_plasma_acceleration_mr_momentum_conserving # inputs - analysis_default_regression.py # analysis - diags/diag1000400 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000400" # checksum OFF # dependency ) @@ -46,8 +46,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_plasma_acceleration_boosted # inputs - analysis_default_regression.py # analysis - diags/diag1000005 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000005" # checksum OFF # dependency ) @@ -56,8 +56,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_plasma_acceleration_boosted_hybrid # inputs - analysis_default_regression.py # analysis - diags/diag1000025 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000025" # checksum OFF # dependency ) @@ -66,8 +66,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_plasma_acceleration_mr_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000002 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000002" # checksum OFF # dependency ) @@ -76,7 +76,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_plasma_acceleration_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) diff --git a/Examples/Physics_applications/plasma_mirror/CMakeLists.txt b/Examples/Physics_applications/plasma_mirror/CMakeLists.txt index 073245f758a..0d183ebbf4c 100644 --- a/Examples/Physics_applications/plasma_mirror/CMakeLists.txt +++ b/Examples/Physics_applications/plasma_mirror/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_plasma_mirror # inputs - analysis_default_regression.py # analysis - diags/diag1000020 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum OFF # dependency ) diff --git a/Examples/Physics_applications/spacecraft_charging/CMakeLists.txt b/Examples/Physics_applications/spacecraft_charging/CMakeLists.txt index 95349e525cc..f48cba16496 100644 --- a/Examples/Physics_applications/spacecraft_charging/CMakeLists.txt +++ b/Examples/Physics_applications/spacecraft_charging/CMakeLists.txt @@ -7,8 +7,8 @@ add_warpx_test( RZ # dims 2 # nprocs inputs_test_rz_spacecraft_charging_picmi.py # inputs - analysis.py # analysis - diags/diag1/ # output + "analysis.py diags/diag1/" # analysis + "analysis_default_regression.py --path diags/diag1/" # checksum OFF # dependency ) endif() diff --git a/Examples/Physics_applications/spacecraft_charging/analysis.py b/Examples/Physics_applications/spacecraft_charging/analysis.py index 1795f5dfb6e..8e13657b62e 100755 --- a/Examples/Physics_applications/spacecraft_charging/analysis.py +++ b/Examples/Physics_applications/spacecraft_charging/analysis.py @@ -12,7 +12,6 @@ by the following exponential function: phi(t)=v0(1-exp(-t/tau)) """ -import os import sys import matplotlib.pyplot as plt @@ -22,8 +21,6 @@ from scipy.optimize import curve_fit yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum filename = sys.argv[1] ts = OpenPMDTimeSeries(filename) @@ -74,10 +71,3 @@ def func(x, v0, tau): assert (diff_v0 < tolerance_v0) and ( diff_tau < tolerance_tau ), "Test spacecraft_charging did not pass" - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - output_format="openpmd", -) diff --git a/Examples/Physics_applications/spacecraft_charging/analysis_default_regression.py b/Examples/Physics_applications/spacecraft_charging/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Physics_applications/spacecraft_charging/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/thomson_parabola_spectrometer/CMakeLists.txt b/Examples/Physics_applications/thomson_parabola_spectrometer/CMakeLists.txt index 93b5d338fec..4a285ca0872 100644 --- a/Examples/Physics_applications/thomson_parabola_spectrometer/CMakeLists.txt +++ b/Examples/Physics_applications/thomson_parabola_spectrometer/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 3 # dims 1 # nprocs inputs_test_3d_thomson_parabola_spectrometer # inputs - analysis_default_openpmd_regression.py # analysis - diags/diag1 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1" # checksum OFF # dependency ) diff --git a/Examples/Physics_applications/thomson_parabola_spectrometer/analysis.py b/Examples/Physics_applications/thomson_parabola_spectrometer/analysis.py old mode 100644 new mode 100755 index 3485ffc6712..6f61ed92c72 --- a/Examples/Physics_applications/thomson_parabola_spectrometer/analysis.py +++ b/Examples/Physics_applications/thomson_parabola_spectrometer/analysis.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np diff --git a/Examples/Physics_applications/thomson_parabola_spectrometer/analysis_default_openpmd_regression.py b/Examples/Physics_applications/thomson_parabola_spectrometer/analysis_default_openpmd_regression.py deleted file mode 120000 index 73e5ec47001..00000000000 --- a/Examples/Physics_applications/thomson_parabola_spectrometer/analysis_default_openpmd_regression.py +++ /dev/null @@ -1 +0,0 @@ -../../analysis_default_openpmd_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/thomson_parabola_spectrometer/analysis_default_regression.py b/Examples/Physics_applications/thomson_parabola_spectrometer/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Physics_applications/thomson_parabola_spectrometer/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Physics_applications/uniform_plasma/CMakeLists.txt b/Examples/Physics_applications/uniform_plasma/CMakeLists.txt index 79dec989c1f..6d0f37ab726 100644 --- a/Examples/Physics_applications/uniform_plasma/CMakeLists.txt +++ b/Examples/Physics_applications/uniform_plasma/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_uniform_plasma # inputs - analysis_default_regression.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_uniform_plasma # inputs - analysis_default_regression.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) @@ -26,7 +26,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_uniform_plasma_restart # inputs - analysis_default_restart.py # analysis - diags/diag1000010 # output + "analysis_default_restart.py diags/diag1000010" # analysis + "analysis_default_regression.py --path diags/diag1000010 --rtol 1e-12" # checksum test_3d_uniform_plasma # dependency ) diff --git a/Examples/Tests/accelerator_lattice/CMakeLists.txt b/Examples/Tests/accelerator_lattice/CMakeLists.txt index f3a28d30d4a..accccde34d0 100644 --- a/Examples/Tests/accelerator_lattice/CMakeLists.txt +++ b/Examples/Tests/accelerator_lattice/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_hard_edged_quadrupoles # inputs - analysis.py # analysis - diags/diag1000050 # output + "analysis.py diags/diag1000050" # analysis + "analysis_default_regression.py --path diags/diag1000050" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_hard_edged_quadrupoles_boosted # inputs - analysis.py # analysis - diags/diag1000050 # output + "analysis.py diags/diag1000050" # analysis + "analysis_default_regression.py --path diags/diag1000050" # checksum OFF # dependency ) @@ -26,7 +26,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_hard_edged_quadrupoles_moving # inputs - analysis.py # analysis - diags/diag1000050 # output + "analysis.py diags/diag1000050" # analysis + "analysis_default_regression.py --path diags/diag1000050" # checksum OFF # dependency ) diff --git a/Examples/Tests/accelerator_lattice/analysis.py b/Examples/Tests/accelerator_lattice/analysis.py index b208d086d8c..f53d54cbe12 100755 --- a/Examples/Tests/accelerator_lattice/analysis.py +++ b/Examples/Tests/accelerator_lattice/analysis.py @@ -15,7 +15,6 @@ The motion is slow enough that relativistic effects are ignored. """ -import os import sys import numpy as np @@ -23,8 +22,6 @@ from scipy.constants import c, e, m_e yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum filename = sys.argv[1] ds = yt.load(filename) @@ -130,9 +127,3 @@ def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): assert abs(np.abs((ux - ux_sim) / ux)) < 0.002, Exception( "error in x particle velocity" ) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/accelerator_lattice/analysis_default_regression.py b/Examples/Tests/accelerator_lattice/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/accelerator_lattice/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/boosted_diags/CMakeLists.txt b/Examples/Tests/boosted_diags/CMakeLists.txt index 8deb7f2bee2..b749d7153ea 100644 --- a/Examples/Tests/boosted_diags/CMakeLists.txt +++ b/Examples/Tests/boosted_diags/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_laser_acceleration_btd # inputs - analysis.py # analysis - diags/diag1000003 # output + "analysis.py diags/diag1000003" # analysis + "analysis_default_regression.py --path diags/diag1000003" # checksum OFF # dependency ) diff --git a/Examples/Tests/boosted_diags/analysis.py b/Examples/Tests/boosted_diags/analysis.py index 0d4794a8894..3c26b343d78 100755 --- a/Examples/Tests/boosted_diags/analysis.py +++ b/Examples/Tests/boosted_diags/analysis.py @@ -16,7 +16,6 @@ between the full back-transformed diagnostic and the reduced diagnostic (i.e., x-z slice) . """ -import os import sys import numpy as np @@ -26,9 +25,6 @@ yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - filename = sys.argv[1] # Tolerances to check consistency between legacy BTD and new BTD @@ -55,9 +51,3 @@ ts = OpenPMDTimeSeries("./diags/diag2/") (w,) = ts.get_particle(["w"], species="beam", iteration=3) assert (400 < len(w)) & (len(w) < 600) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/boosted_diags/analysis_default_regression.py b/Examples/Tests/boosted_diags/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/boosted_diags/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/boundaries/CMakeLists.txt b/Examples/Tests/boundaries/CMakeLists.txt index fccd45e2ebf..00a53742cb9 100644 --- a/Examples/Tests/boundaries/CMakeLists.txt +++ b/Examples/Tests/boundaries/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_particle_boundaries # inputs - analysis.py # analysis - diags/diag1000008 # output + "analysis.py diags/diag1000008" # analysis + "analysis_default_regression.py --path diags/diag1000008" # checksum OFF # dependency ) diff --git a/Examples/Tests/boundaries/analysis.py b/Examples/Tests/boundaries/analysis.py index ce3251ea406..9630c07d0ab 100755 --- a/Examples/Tests/boundaries/analysis.py +++ b/Examples/Tests/boundaries/analysis.py @@ -14,7 +14,6 @@ and checks that they end up in the correct place (or are deleted). """ -import os import sys import numpy as np @@ -22,8 +21,6 @@ from scipy.constants import c, m_e yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum # The min and max size of the box along the three axis. dmin = -1.0 @@ -110,9 +107,3 @@ def do_periodic(x): assert np.all( np.abs((zz - zza) / zz) < 1.0e-15 ), "Periodic particle position not correct" - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/boundaries/analysis_default_regression.py b/Examples/Tests/boundaries/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/boundaries/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/btd_rz/CMakeLists.txt b/Examples/Tests/btd_rz/CMakeLists.txt index 6a85f653c65..3c4bfffb609 100644 --- a/Examples/Tests/btd_rz/CMakeLists.txt +++ b/Examples/Tests/btd_rz/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( RZ # dims 2 # nprocs inputs_test_rz_btd # inputs - analysis.py # analysis - diags/diag1000289 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000289" # checksum OFF # dependency ) diff --git a/Examples/Tests/btd_rz/analysis.py b/Examples/Tests/btd_rz/analysis.py index 87f74599105..c3f4f0243fa 100755 --- a/Examples/Tests/btd_rz/analysis.py +++ b/Examples/Tests/btd_rz/analysis.py @@ -8,17 +8,11 @@ # fields recorded by the backtransformed diagnostics have the right amplitude, # wavelength, and envelope (i.e. gaussian envelope with the right duration. -import os -import sys - import numpy as np from openpmd_viewer import OpenPMDTimeSeries from scipy.constants import c, e, m_e from scipy.optimize import curve_fit -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - def gaussian_laser(z, a0, z0_phase, z0_prop, ctau, lambda0): """ @@ -34,8 +28,6 @@ def fit_function(z, z0_phase): return gaussian_laser(z, a0, z0_phase, z0_b + Lprop_b, ctau0, lambda0) -plotfile = sys.argv[1] - # The values must be consistent with the values provided in the simulation input t_current = 80e-15 # Time of the snapshot1 z0_antenna = -1.0e-6 # position of laser @@ -57,9 +49,3 @@ def fit_function(z, z0_phase): ## Check that the a0 agrees within 5% of the predicted value assert np.allclose(Ex, Ex_fit, atol=0.18 * Ex.max()) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/btd_rz/analysis_default_regression.py b/Examples/Tests/btd_rz/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/btd_rz/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/collider_relevant_diags/CMakeLists.txt b/Examples/Tests/collider_relevant_diags/CMakeLists.txt index 338f66970bc..d7bd38a9475 100644 --- a/Examples/Tests/collider_relevant_diags/CMakeLists.txt +++ b/Examples/Tests/collider_relevant_diags/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_collider_diagnostics # inputs - analysis.py # analysis - diags/diag1000001 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) diff --git a/Examples/Tests/collider_relevant_diags/analysis.py b/Examples/Tests/collider_relevant_diags/analysis.py index 232bc47af21..17e63e69076 100755 --- a/Examples/Tests/collider_relevant_diags/analysis.py +++ b/Examples/Tests/collider_relevant_diags/analysis.py @@ -1,6 +1,5 @@ #!/usr/bin/env python3 -import os import sys import numpy as np @@ -8,9 +7,6 @@ import pandas as pd from scipy.constants import c, e, hbar, m_e -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - sys.path.append("../../../../warpx/Tools/Parser/") from input_file_parser import parse_input_file @@ -179,9 +175,3 @@ def dL_dt(): # dL/dt dL_dt_cr = df[[col for col in df.columns if "dL_dt" in col]].to_numpy() assert np.allclose(dL_dt_cr, dL_dt(), rtol=1e-8) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/collider_relevant_diags/analysis_default_regression.py b/Examples/Tests/collider_relevant_diags/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/collider_relevant_diags/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/collision/CMakeLists.txt b/Examples/Tests/collision/CMakeLists.txt index 36f8a1cb1d6..522dafbfbfb 100644 --- a/Examples/Tests/collision/CMakeLists.txt +++ b/Examples/Tests/collision/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 1 # dims 2 # nprocs inputs_test_1d_collision_z # inputs - analysis_collision_1d.py # analysis - diags/diag1000600 # output + "analysis_collision_1d.py diags/diag1000600" # analysis + "analysis_default_regression.py --path diags/diag1000600" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 2 # dims 1 # nprocs inputs_test_2d_collision_xz # inputs - analysis_collision_2d.py # analysis - diags/diag1000150 # output + "analysis_collision_2d.py diags/diag1000150" # analysis + "analysis_default_regression.py --path diags/diag1000150" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( 2 # dims 1 # nprocs inputs_test_2d_collision_xz_picmi.py # inputs - analysis_collision_2d.py # analysis - diags/diag1000150 # output + "analysis_collision_2d.py diags/diag1000150" # analysis + "analysis_default_regression.py --path diags/diag1000150" # checksum OFF # dependency ) @@ -36,8 +36,8 @@ add_warpx_test( 3 # dims 1 # nprocs inputs_test_3d_collision_iso # inputs - analysis_collision_3d_isotropization.py # analysis - diags/diag1000100 # output + "analysis_collision_3d_isotropization.py diags/diag1000100" # analysis + "analysis_default_regression.py --path diags/diag1000100" # checksum OFF # dependency ) @@ -46,8 +46,8 @@ add_warpx_test( 3 # dims 1 # nprocs inputs_test_3d_collision_xyz # inputs - analysis_collision_3d.py # analysis - diags/diag1000150 # output + "analysis_collision_3d.py diags/diag1000150" # analysis + "analysis_default_regression.py --path diags/diag1000150" # checksum OFF # dependency ) @@ -56,7 +56,7 @@ add_warpx_test( RZ # dims 1 # nprocs inputs_test_rz_collision # inputs - analysis_collision_rz.py # analysis - diags/diag1000150 # output + "analysis_collision_rz.py diags/diag1000150" # analysis + "analysis_default_regression.py --path diags/diag1000150 --skip-particles" # checksum OFF # dependency ) diff --git a/Examples/Tests/collision/analysis_collision_1d.py b/Examples/Tests/collision/analysis_collision_1d.py index 97ddee0591d..d5cf8b1cebd 100755 --- a/Examples/Tests/collision/analysis_collision_1d.py +++ b/Examples/Tests/collision/analysis_collision_1d.py @@ -15,16 +15,12 @@ # Both populations belong to the same carbon12 ion species. # See test T1b from JCP 413 (2020) by D. Higginson, et al. # -import os import sys import numpy as np import yt from scipy.constants import e -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # this will be the name of the plot file last_fn = sys.argv[1] ds = yt.load(last_fn) @@ -123,9 +119,3 @@ print("TApar at 30ps error = ", error) print("tolerance = ", tolerance) assert error < tolerance - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/collision/analysis_collision_2d.py b/Examples/Tests/collision/analysis_collision_2d.py index 7ce3e4cdf2e..7e2746be752 100755 --- a/Examples/Tests/collision/analysis_collision_2d.py +++ b/Examples/Tests/collision/analysis_collision_2d.py @@ -32,9 +32,6 @@ import post_processing_utils import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - test_name = os.path.split(os.getcwd())[1] tolerance = 0.001 @@ -120,9 +117,3 @@ post_processing_utils.check_random_filter( last_fn, random_filter_fn, random_fraction, dim, species_name ) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/collision/analysis_collision_3d.py b/Examples/Tests/collision/analysis_collision_3d.py index 59c625d3cb8..c160d020cdc 100755 --- a/Examples/Tests/collision/analysis_collision_3d.py +++ b/Examples/Tests/collision/analysis_collision_3d.py @@ -25,16 +25,12 @@ import glob import math -import os import sys import numpy import post_processing_utils import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - tolerance = 0.001 ng = 512 @@ -110,9 +106,3 @@ post_processing_utils.check_random_filter( last_fn, random_filter_fn, random_fraction, dim, species_name ) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/collision/analysis_collision_3d_isotropization.py b/Examples/Tests/collision/analysis_collision_3d_isotropization.py index 2cfe7f9fffd..2656c5bac4d 100755 --- a/Examples/Tests/collision/analysis_collision_3d_isotropization.py +++ b/Examples/Tests/collision/analysis_collision_3d_isotropization.py @@ -11,16 +11,12 @@ # https://smileipic.github.io/tutorials/advanced_collisions.html # https://smileipic.github.io/Smilei/Understand/collisions.html#test-cases-for-collisions -import os import sys import numpy as np import scipy.constants as sc import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - e = sc.e pi = sc.pi ep0 = sc.epsilon_0 @@ -63,9 +59,3 @@ print(f"error = {error}") print(f"tolerance = {tolerance}") assert error < tolerance - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/collision/analysis_collision_rz.py b/Examples/Tests/collision/analysis_collision_rz.py index 2df2f6500d2..b37887943f8 100755 --- a/Examples/Tests/collision/analysis_collision_rz.py +++ b/Examples/Tests/collision/analysis_collision_rz.py @@ -16,16 +16,12 @@ # tolerance: 1.0e-30 # Possible running time: ~ 1.0 s -import os import sys from glob import glob import numpy as np import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - tolerance = 1.0e-15 last_fn = sys.argv[1] @@ -54,10 +50,3 @@ print("error = ", error) print("tolerance = ", tolerance) assert error < tolerance - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - do_particles=False, -) diff --git a/Examples/Tests/collision/analysis_default_regression.py b/Examples/Tests/collision/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/collision/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/diff_lumi_diag/CMakeLists.txt b/Examples/Tests/diff_lumi_diag/CMakeLists.txt index 481847a023d..f16449a976c 100644 --- a/Examples/Tests/diff_lumi_diag/CMakeLists.txt +++ b/Examples/Tests/diff_lumi_diag/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_diff_lumi_diag_leptons # inputs - analysis.py # analysis - diags/diag1000080 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000080 --rtol 1e-2" # checksum OFF # dependency ) @@ -16,7 +16,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_diff_lumi_diag_photons # inputs - analysis.py # analysis - diags/diag1000080 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000080 --rtol 1e-2" # checksum OFF # dependency ) diff --git a/Examples/Tests/diff_lumi_diag/analysis.py b/Examples/Tests/diff_lumi_diag/analysis.py index 41501b1915d..cadb21023ab 100755 --- a/Examples/Tests/diff_lumi_diag/analysis.py +++ b/Examples/Tests/diff_lumi_diag/analysis.py @@ -5,14 +5,10 @@ # In that case, the differential luminosity can be calculated analytically. import os -import sys import numpy as np from read_raw_data import read_reduced_diags_histogram -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Extract the differential luminosity from the file _, _, E_bin, bin_data = read_reduced_diags_histogram( "./diags/reducedfiles/DifferentialLuminosity_beam1_beam2.txt" @@ -55,10 +51,3 @@ print("Relative error: ", error) print("Tolerance: ", tol) assert error < tol - -# compare checksums -evaluate_checksum( - test_name=test_name, - output_file=sys.argv[1], - rtol=1e-2, -) diff --git a/Examples/Tests/diff_lumi_diag/analysis_default_regression.py b/Examples/Tests/diff_lumi_diag/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/diff_lumi_diag/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/divb_cleaning/CMakeLists.txt b/Examples/Tests/divb_cleaning/CMakeLists.txt index d4aae31472e..d851a7ca322 100644 --- a/Examples/Tests/divb_cleaning/CMakeLists.txt +++ b/Examples/Tests/divb_cleaning/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_divb_cleaning # inputs - analysis.py # analysis - diags/diag1000400 # output + "analysis.py diags/diag1000400" # analysis + "analysis_default_regression.py --path diags/diag1000400" # checksum OFF # dependency ) diff --git a/Examples/Tests/divb_cleaning/analysis.py b/Examples/Tests/divb_cleaning/analysis.py index d72226a01cc..6fcd8f6f755 100755 --- a/Examples/Tests/divb_cleaning/analysis.py +++ b/Examples/Tests/divb_cleaning/analysis.py @@ -8,15 +8,11 @@ import sys -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import os - import numpy as np import yt yt.funcs.mylog.setLevel(50) -from checksumAPI import evaluate_checksum from scipy.constants import c # Name of the last plotfile @@ -51,9 +47,3 @@ tolerance = 1e-1 assert rel_error < tolerance - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/divb_cleaning/analysis_default_regression.py b/Examples/Tests/divb_cleaning/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/divb_cleaning/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/dive_cleaning/CMakeLists.txt b/Examples/Tests/dive_cleaning/CMakeLists.txt index c23c2aef539..c5fe87baad0 100644 --- a/Examples/Tests/dive_cleaning/CMakeLists.txt +++ b/Examples/Tests/dive_cleaning/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_dive_cleaning # inputs - analysis.py # analysis - diags/diag1000128 # output + "analysis.py diags/diag1000128" # analysis + "analysis_default_regression.py --path diags/diag1000128" # checksum OFF # dependency ) @@ -16,7 +16,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_dive_cleaning # inputs - analysis.py # analysis - diags/diag1000128 # output + "analysis.py diags/diag1000128" # analysis + "analysis_default_regression.py --path diags/diag1000128" # checksum OFF # dependency ) diff --git a/Examples/Tests/dive_cleaning/analysis_default_regression.py b/Examples/Tests/dive_cleaning/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/dive_cleaning/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/effective_potential_electrostatic/CMakeLists.txt b/Examples/Tests/effective_potential_electrostatic/CMakeLists.txt index a6545e8c5f3..528ee6d1e08 100644 --- a/Examples/Tests/effective_potential_electrostatic/CMakeLists.txt +++ b/Examples/Tests/effective_potential_electrostatic/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_effective_potential_electrostatic_picmi.py # inputs - analysis.py # analysis - diags/field_diag/ # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/field_diag/" # checksum OFF # dependency ) diff --git a/Examples/Tests/effective_potential_electrostatic/analysis.py b/Examples/Tests/effective_potential_electrostatic/analysis.py index 5aa9b045af0..b51cd129252 100755 --- a/Examples/Tests/effective_potential_electrostatic/analysis.py +++ b/Examples/Tests/effective_potential_electrostatic/analysis.py @@ -7,9 +7,6 @@ # --- with the analytically calculated density based on the input parameters # --- of the test simulation at each output timestep. -import os -import sys - import dill import matplotlib.pyplot as plt import numpy as np @@ -79,12 +76,3 @@ def get_radial_function(field, info): plt.grid() plt.legend() plt.show() - -if len(sys.argv) > 1: - sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - import checksumAPI - - filename = sys.argv[1] - - test_name = os.path.split(os.getcwd())[1] - checksumAPI.evaluate_checksum(test_name, filename, output_format="openpmd") diff --git a/Examples/Tests/effective_potential_electrostatic/analysis_default_regression.py b/Examples/Tests/effective_potential_electrostatic/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/effective_potential_electrostatic/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/electrostatic_dirichlet_bc/CMakeLists.txt b/Examples/Tests/electrostatic_dirichlet_bc/CMakeLists.txt index 1325d1a6bf5..039181096a8 100644 --- a/Examples/Tests/electrostatic_dirichlet_bc/CMakeLists.txt +++ b/Examples/Tests/electrostatic_dirichlet_bc/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_dirichlet_bc # inputs - analysis.py # analysis - diags/diag1000100 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000100" # checksum OFF # dependency ) @@ -16,7 +16,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_dirichlet_bc_picmi.py # inputs - analysis.py # analysis - diags/diag1000100 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000100" # checksum OFF # dependency ) diff --git a/Examples/Tests/electrostatic_dirichlet_bc/analysis.py b/Examples/Tests/electrostatic_dirichlet_bc/analysis.py index 1b8f6923c1c..82fe061c3a8 100755 --- a/Examples/Tests/electrostatic_dirichlet_bc/analysis.py +++ b/Examples/Tests/electrostatic_dirichlet_bc/analysis.py @@ -14,16 +14,10 @@ # Possible running time: ~ 19 s import glob -import os -import re -import sys import numpy as np import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - files = sorted(glob.glob("diags/diag1*"))[1:] assert len(files) > 0 @@ -45,11 +39,3 @@ assert np.allclose(potentials_lo, expected_potentials_lo, rtol=0.1) assert np.allclose(potentials_hi, expected_potentials_hi, rtol=0.1) - -# compare checksums -test_name = os.path.split(os.getcwd())[1] -test_name = re.sub("_picmi", "", test_name) # same checksums for PICMI test -evaluate_checksum( - test_name=test_name, - output_file=sys.argv[1], -) diff --git a/Examples/Tests/electrostatic_dirichlet_bc/analysis_default_regression.py b/Examples/Tests/electrostatic_dirichlet_bc/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/electrostatic_dirichlet_bc/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/electrostatic_sphere/CMakeLists.txt b/Examples/Tests/electrostatic_sphere/CMakeLists.txt index 3d17c4462f8..fc69ac8ba6e 100644 --- a/Examples/Tests/electrostatic_sphere/CMakeLists.txt +++ b/Examples/Tests/electrostatic_sphere/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_electrostatic_sphere # inputs - analysis_electrostatic_sphere.py # analysis - diags/diag1000030 # output + "analysis_electrostatic_sphere.py diags/diag1000030" # analysis + "analysis_default_regression.py --path diags/diag1000030" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_electrostatic_sphere_lab_frame # inputs - analysis_electrostatic_sphere.py # analysis - diags/diag1000030 # output + "analysis_electrostatic_sphere.py diags/diag1000030" # analysis + "analysis_default_regression.py --path diags/diag1000030" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_electrostatic_sphere_lab_frame_mr_emass_10 # inputs - analysis_electrostatic_sphere.py # analysis - diags/diag1000002 # output + "analysis_electrostatic_sphere.py diags/diag1000002" # analysis + "analysis_default_regression.py --path diags/diag1000002" # checksum OFF # dependency ) @@ -36,8 +36,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_electrostatic_sphere_rel_nodal # inputs - analysis_electrostatic_sphere.py # analysis - diags/diag1000030 # output + "analysis_electrostatic_sphere.py diags/diag1000030" # analysis + "analysis_default_regression.py --path diags/diag1000030" # checksum OFF # dependency ) @@ -46,8 +46,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_electrostatic_sphere_adaptive # inputs - analysis_electrostatic_sphere.py # analysis - diags/diag1000054 # output + "analysis_electrostatic_sphere.py diags/diag1000054" # analysis + "analysis_default_regression.py --path diags/diag1000054" # checksum OFF # dependency ) @@ -56,7 +56,7 @@ add_warpx_test( RZ # dims 2 # nprocs inputs_test_rz_electrostatic_sphere # inputs - analysis_electrostatic_sphere.py # analysis - diags/diag1000030 # output + "analysis_electrostatic_sphere.py diags/diag1000030" # analysis + "analysis_default_regression.py --path diags/diag1000030" # checksum OFF # dependency ) diff --git a/Examples/Tests/electrostatic_sphere/analysis_default_regression.py b/Examples/Tests/electrostatic_sphere/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/electrostatic_sphere/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py b/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py index dd15a6492f1..2176dcbd7c4 100755 --- a/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py +++ b/Examples/Tests/electrostatic_sphere/analysis_electrostatic_sphere.py @@ -28,9 +28,6 @@ from scipy.constants import c from scipy.optimize import fsolve -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - yt.funcs.mylog.setLevel(0) # Open plotfile specified in command line @@ -192,9 +189,3 @@ def return_energies(iteration): assert abs((Ek_i + Ep_i) - (Ek_f + Ep_f)) < 0.003 * ( Ek_i + Ep_i ) # Check conservation of energy - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/electrostatic_sphere_eb/CMakeLists.txt b/Examples/Tests/electrostatic_sphere_eb/CMakeLists.txt index 7f7b1389119..0511212c4d5 100644 --- a/Examples/Tests/electrostatic_sphere_eb/CMakeLists.txt +++ b/Examples/Tests/electrostatic_sphere_eb/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_EB) 3 # dims 2 # nprocs inputs_test_3d_electrostatic_sphere_eb # inputs - analysis.py # analysis - diags/diag1000001 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) endif() @@ -19,8 +19,8 @@ if(WarpX_EB) 3 # dims 2 # nprocs inputs_test_3d_electrostatic_sphere_eb_mixed_bc # inputs - analysis_default_regression.py # analysis - diags/diag1000001 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) endif() @@ -31,8 +31,8 @@ if(WarpX_EB) 3 # dims 2 # nprocs inputs_test_3d_electrostatic_sphere_eb_picmi.py # inputs - analysis.py # analysis - diags/diag1000002 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000002" # checksum OFF # dependency ) endif() @@ -43,8 +43,8 @@ if(WarpX_EB) RZ # dims 2 # nprocs inputs_test_rz_electrostatic_sphere_eb # inputs - analysis_rz.py # analysis - diags/diag1000001 # output + "analysis_rz.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001 --skip-particles" # checksum OFF # dependency ) endif() @@ -55,8 +55,8 @@ if(WarpX_EB) RZ # dims 2 # nprocs inputs_test_rz_electrostatic_sphere_eb_mr # inputs - analysis_rz_mr.py # analysis - diags/diag1/ # output + "analysis_rz_mr.py diags/diag1/" # analysis + "analysis_default_regression.py --path diags/diag1/" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/electrostatic_sphere_eb/analysis.py b/Examples/Tests/electrostatic_sphere_eb/analysis.py index e12070119ac..114db2871ee 100755 --- a/Examples/Tests/electrostatic_sphere_eb/analysis.py +++ b/Examples/Tests/electrostatic_sphere_eb/analysis.py @@ -4,13 +4,8 @@ # using the same reference file as for the non-PICMI test since the two # tests are otherwise the same. -import os -import sys - -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") # Check reduced diagnostics for charge on EB import numpy as np -from checksumAPI import evaluate_checksum from scipy.constants import epsilon_0 # Theoretical charge on the embedded boundary, for sphere at potential phi_0 @@ -27,9 +22,3 @@ data_eighth = np.loadtxt("diags/reducedfiles/eb_charge_one_eighth.txt") q_sim_eighth = data_eighth[1, 2] assert abs((q_sim_eighth - q_th / 8) / (q_th / 8)) < 0.06 - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py b/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py index e3976c95e68..be9033e2b14 100755 --- a/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py +++ b/Examples/Tests/electrostatic_sphere_eb/analysis_rz.py @@ -16,16 +16,12 @@ # tolerance: 0.004 # Possible running time: < 1 s -import os import sys import numpy as np import yt from unyt import m -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - tolerance = 0.0041 fn = sys.argv[1] @@ -66,10 +62,3 @@ print("max error of Er = ", errmax_Er) print("tolerance = ", tolerance) assert errmax_phi < tolerance and errmax_Er < tolerance - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - do_particles=False, -) diff --git a/Examples/Tests/electrostatic_sphere_eb/analysis_rz_mr.py b/Examples/Tests/electrostatic_sphere_eb/analysis_rz_mr.py index 586b35fc7a4..55365bd4c76 100755 --- a/Examples/Tests/electrostatic_sphere_eb/analysis_rz_mr.py +++ b/Examples/Tests/electrostatic_sphere_eb/analysis_rz_mr.py @@ -12,15 +12,11 @@ # Thus the analytical solution has the form: # phi(r) = A+B*log(r), Er(r) = -B/r. -import os import sys import numpy as np from openpmd_viewer import OpenPMDTimeSeries -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - tolerance = 0.004 print(f"tolerance = {tolerance}") @@ -109,10 +105,3 @@ def get_error_per_lev(ts, level): nlevels = 0 if level_fields == [] else int(level_fields[-1][-1]) for level in range(nlevels + 1): get_error_per_lev(ts, level) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - output_format="openpmd", -) diff --git a/Examples/Tests/embedded_boundary_cube/CMakeLists.txt b/Examples/Tests/embedded_boundary_cube/CMakeLists.txt index 0044ed04ec8..ac509955088 100644 --- a/Examples/Tests/embedded_boundary_cube/CMakeLists.txt +++ b/Examples/Tests/embedded_boundary_cube/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_EB) 2 # dims 1 # nprocs inputs_test_2d_embedded_boundary_cube # inputs - analysis_fields_2d.py # analysis - diags/diag1000114 # output + "analysis_fields_2d.py diags/diag1000114" # analysis + "analysis_default_regression.py --path diags/diag1000114" # checksum OFF # dependency ) endif() @@ -19,8 +19,8 @@ if(WarpX_EB) 3 # dims 1 # nprocs inputs_test_3d_embedded_boundary_cube # inputs - analysis_fields.py # analysis - diags/diag1000208 # output + "analysis_fields.py diags/diag1000208" # analysis + "analysis_default_regression.py --path diags/diag1000208" # checksum OFF # dependency ) endif() @@ -31,8 +31,8 @@ if(WarpX_EB) 3 # dims 1 # nprocs inputs_test_3d_embedded_boundary_cube_macroscopic # inputs - analysis_fields.py # analysis - diags/diag1000208 # output + "analysis_fields.py diags/diag1000208" # analysis + "analysis_default_regression.py --path diags/diag1000208" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/embedded_boundary_cube/analysis_default_regression.py b/Examples/Tests/embedded_boundary_cube/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/embedded_boundary_cube/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/embedded_boundary_cube/analysis_fields.py b/Examples/Tests/embedded_boundary_cube/analysis_fields.py index 3202ccfaca2..4cb4a60f603 100755 --- a/Examples/Tests/embedded_boundary_cube/analysis_fields.py +++ b/Examples/Tests/embedded_boundary_cube/analysis_fields.py @@ -8,9 +8,6 @@ import yt from scipy.constants import c, mu_0, pi -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # This is a script that analyses the simulation results from # the script `inputs_3d`. This simulates a TMmnp mode in a PEC cubic resonator. # The magnetic field in the simulation is given (in theory) by: @@ -109,9 +106,3 @@ Bz_sim = data[("mesh", "Bz")].to_ndarray() rel_err_z = np.sqrt(np.sum(np.square(Bz_sim - Bz_th)) / np.sum(np.square(Bz_th))) assert rel_err_z < rel_tol_err - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py b/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py index 454d78169b7..bb35ad93cb8 100755 --- a/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py +++ b/Examples/Tests/embedded_boundary_cube/analysis_fields_2d.py @@ -1,15 +1,11 @@ #!/usr/bin/env python3 -import os import sys import numpy as np import yt from scipy.constants import c, mu_0, pi -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # This is a script that analyses the simulation results from # the script `inputs_3d`. This simulates a TMmnp mode in a PEC cubic resonator. # The magnetic field in the simulation is given (in theory) by: @@ -62,9 +58,3 @@ # Compute relative l^2 error on Ey Ey_sim = data["Ey"].to_ndarray() rel_err_y = np.sqrt(np.sum(np.square(Ey_sim / c - By_th)) / np.sum(np.square(By_th))) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/embedded_boundary_diffraction/CMakeLists.txt b/Examples/Tests/embedded_boundary_diffraction/CMakeLists.txt index 6297cf1fa5c..456e9f9b630 100644 --- a/Examples/Tests/embedded_boundary_diffraction/CMakeLists.txt +++ b/Examples/Tests/embedded_boundary_diffraction/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_EB) RZ # dims 2 # nprocs inputs_test_rz_embedded_boundary_diffraction # inputs - analysis_fields.py # analysis - diags/diag1/ # output + "analysis_fields.py diags/diag1/" # analysis + "analysis_default_regression.py --path diags/diag1/" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/embedded_boundary_diffraction/analysis_default_regression.py b/Examples/Tests/embedded_boundary_diffraction/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/embedded_boundary_diffraction/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py b/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py index 8f0b7818516..599bcea71f9 100755 --- a/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py +++ b/Examples/Tests/embedded_boundary_diffraction/analysis_fields.py @@ -7,16 +7,12 @@ theta_diffraction = 1.22 * lambda / d """ -import os import sys import numpy as np from openpmd_viewer import OpenPMDTimeSeries from scipy.ndimage import gaussian_filter1d -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - filename = sys.argv[1] ts = OpenPMDTimeSeries(filename) @@ -41,10 +37,3 @@ def r_first_minimum(iz): # Check that this corresponds to the prediction from the Airy pattern theta_diffraction = np.arcsin(1.22 * 0.1 / 0.4) / 2 assert np.all(abs(r[50:] - theta_diffraction * info.z[50:]) < 0.03) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - output_format="openpmd", -) diff --git a/Examples/Tests/embedded_boundary_python_api/CMakeLists.txt b/Examples/Tests/embedded_boundary_python_api/CMakeLists.txt index fe820c76f22..3e79e526218 100644 --- a/Examples/Tests/embedded_boundary_python_api/CMakeLists.txt +++ b/Examples/Tests/embedded_boundary_python_api/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_EB) 3 # dims 1 # nprocs inputs_test_3d_embedded_boundary_picmi.py # inputs - analysis.py # analysis - diags/diag1000002 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000002" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/embedded_boundary_python_api/analysis.py b/Examples/Tests/embedded_boundary_python_api/analysis.py deleted file mode 100755 index 7fda682f618..00000000000 --- a/Examples/Tests/embedded_boundary_python_api/analysis.py +++ /dev/null @@ -1,19 +0,0 @@ -#!/usr/bin/env python3 - -# This script just checks that the PICMI file executed successfully. -# If it did there will be a plotfile for the final step. - -import os -import sys - -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - -step = int(sys.argv[1][-5:]) -assert step == 2 - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/embedded_boundary_python_api/analysis_default_regression.py b/Examples/Tests/embedded_boundary_python_api/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/embedded_boundary_python_api/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/embedded_boundary_rotated_cube/CMakeLists.txt b/Examples/Tests/embedded_boundary_rotated_cube/CMakeLists.txt index fcfe97905d8..cb7fa405210 100644 --- a/Examples/Tests/embedded_boundary_rotated_cube/CMakeLists.txt +++ b/Examples/Tests/embedded_boundary_rotated_cube/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_EB) 2 # dims 1 # nprocs inputs_test_2d_embedded_boundary_rotated_cube # inputs - analysis_fields_2d.py # analysis - diags/diag1000068 # output + "analysis_fields_2d.py diags/diag1000068" # analysis + "analysis_default_regression.py --path diags/diag1000068" # checksum OFF # dependency ) endif() @@ -19,8 +19,8 @@ if(WarpX_EB) 3 # dims 1 # nprocs inputs_test_3d_embedded_boundary_rotated_cube # inputs - analysis_fields_3d.py # analysis - diags/diag1000111 # output + "analysis_fields_3d.py diags/diag1000111" # analysis + "analysis_default_regression.py --path diags/diag1000111" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/embedded_boundary_rotated_cube/analysis_default_regression.py b/Examples/Tests/embedded_boundary_rotated_cube/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/embedded_boundary_rotated_cube/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py index 451913fd54d..dbb74b174e7 100755 --- a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py +++ b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_2d.py @@ -1,15 +1,11 @@ #!/usr/bin/env python3 -import os import sys import numpy as np import yt from scipy.constants import c, mu_0, pi -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # This is a script that analyses the simulation results from # the script `inputs_3d`. This simulates a TMmnp mode in a PEC cubic resonator. # The magnetic field in the simulation is given (in theory) by: @@ -67,9 +63,3 @@ # Compute relative l^2 error on By rel_err_y = np.sqrt(np.sum(np.square(By_sim - By_th)) / np.sum(np.square(By_th))) assert rel_err_y < rel_tol_err - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_3d.py b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_3d.py index 838c9c82479..00d1ba2280f 100755 --- a/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_3d.py +++ b/Examples/Tests/embedded_boundary_rotated_cube/analysis_fields_3d.py @@ -7,16 +7,12 @@ # License: BSD-3-Clause-LBNL -import os import sys import numpy as np import yt from scipy.constants import c, mu_0, pi -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # This is a script that analyses the simulation results from # the script `inputs_3d`. This simulates a TMmnp mode in a PEC cubic resonator rotated by pi/8. # The magnetic field in the simulation is given (in theory) by: @@ -143,9 +139,3 @@ np.sum(np.square(Bz_sim[:, :, :, 0] - Bz_th)) / np.sum(np.square(Bz_th)) ) assert rel_err_z < rel_tol_err - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/embedded_circle/CMakeLists.txt b/Examples/Tests/embedded_circle/CMakeLists.txt index 4b9ee426569..1a0577da82e 100644 --- a/Examples/Tests/embedded_circle/CMakeLists.txt +++ b/Examples/Tests/embedded_circle/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_EB) 2 # dims 2 # nprocs inputs_test_2d_embedded_circle # inputs - analysis.py # analysis - diags/diag1000011 + OFF # analysis + "analysis_default_regression.py --path diags/diag1000011 --rtol 1e-2" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/embedded_circle/analysis.py b/Examples/Tests/embedded_circle/analysis.py deleted file mode 100755 index d1bb04fedb6..00000000000 --- a/Examples/Tests/embedded_circle/analysis.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys - -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - rtol=1e-2, -) diff --git a/Examples/Tests/embedded_circle/analysis_default_regression.py b/Examples/Tests/embedded_circle/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/embedded_circle/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/energy_conserving_thermal_plasma/CMakeLists.txt b/Examples/Tests/energy_conserving_thermal_plasma/CMakeLists.txt index c89d439b75e..a925cc537f4 100644 --- a/Examples/Tests/energy_conserving_thermal_plasma/CMakeLists.txt +++ b/Examples/Tests/energy_conserving_thermal_plasma/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_energy_conserving_thermal_plasma # inputs - analysis.py # analysis - diags/diag1000500 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000500" # checksum OFF # dependency ) diff --git a/Examples/Tests/energy_conserving_thermal_plasma/analysis.py b/Examples/Tests/energy_conserving_thermal_plasma/analysis.py index 0d29f85e7eb..5991d888e20 100755 --- a/Examples/Tests/energy_conserving_thermal_plasma/analysis.py +++ b/Examples/Tests/energy_conserving_thermal_plasma/analysis.py @@ -12,14 +12,8 @@ # than other gathering scheme. This tests checks that the energy does not increase by # more than 0.3% over the duration of the simulatoin. -import os -import sys - import numpy as np -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Get energy as a function of time, from reduced diagnostics EFdata = np.genfromtxt("./diags/reducedfiles/EF.txt") # Field energy EPdata = np.genfromtxt("./diags/reducedfiles/EP.txt") # Particle energy @@ -29,9 +23,3 @@ print(abs(E - E[0]) / E[0]) # Check that the energy is conserved to 0.3% assert np.all(abs(E - E[0]) / E[0] < 0.003) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/energy_conserving_thermal_plasma/analysis_default_regression.py b/Examples/Tests/energy_conserving_thermal_plasma/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/energy_conserving_thermal_plasma/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/field_ionization/CMakeLists.txt b/Examples/Tests/field_ionization/CMakeLists.txt index 9154173ac5f..71e34dbc5fc 100644 --- a/Examples/Tests/field_ionization/CMakeLists.txt +++ b/Examples/Tests/field_ionization/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_ionization_boost # inputs - analysis.py # analysis - diags/diag1000420 # output + "analysis.py diags/diag1000420" # analysis + "analysis_default_regression.py --path diags/diag1000420" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_ionization_lab # inputs - analysis.py # analysis - diags/diag1001600 # output + "analysis.py diags/diag1001600" # analysis + "analysis_default_regression.py --path diags/diag1001600" # checksum OFF # dependency ) @@ -26,7 +26,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_ionization_picmi.py # inputs - analysis.py # analysis - diags/diag1001600 # output + "analysis.py diags/diag1001600" # analysis + "analysis_default_regression.py --path diags/diag1001600" # checksum OFF # dependency ) diff --git a/Examples/Tests/field_ionization/analysis.py b/Examples/Tests/field_ionization/analysis.py index a02c293601b..bafc47f2145 100755 --- a/Examples/Tests/field_ionization/analysis.py +++ b/Examples/Tests/field_ionization/analysis.py @@ -18,15 +18,12 @@ ions are N5+, in agreement with theory from Chen's article. """ -import os import sys import numpy as np import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum # Open plotfile specified in command line, and get ion's ionization level. filename = sys.argv[1] @@ -106,9 +103,3 @@ print("particle_orig_z has reasonable values") except yt.utilities.exceptions.YTFieldNotFound: pass # The backtransformed diagnostic version of the test does not have orig_z - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/field_ionization/analysis_default_regression.py b/Examples/Tests/field_ionization/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/field_ionization/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/field_probe/CMakeLists.txt b/Examples/Tests/field_probe/CMakeLists.txt index bbddbd7839e..8b052dc3b66 100644 --- a/Examples/Tests/field_probe/CMakeLists.txt +++ b/Examples/Tests/field_probe/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_EB) 2 # dims 2 # nprocs inputs_test_2d_field_probe # inputs - analysis.py # analysis - diags/diag1000544 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000544" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/field_probe/analysis.py b/Examples/Tests/field_probe/analysis.py index e974e284b65..57085fb7cdc 100755 --- a/Examples/Tests/field_probe/analysis.py +++ b/Examples/Tests/field_probe/analysis.py @@ -18,15 +18,9 @@ which can be solved analytically. """ -import os -import sys - import numpy as np import pandas as pd -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - filename = "diags/reducedfiles/FP_line.txt" # Open data file @@ -65,9 +59,3 @@ def I_envelope(x, lam=0.2e-6, a=0.3e-6, D=1.7e-6): print("Average error greater than 2.5%") assert averror < 2.5 - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/field_probe/analysis_default_regression.py b/Examples/Tests/field_probe/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/field_probe/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/flux_injection/CMakeLists.txt b/Examples/Tests/flux_injection/CMakeLists.txt index 0929fc3d4c4..000d5c74917 100644 --- a/Examples/Tests/flux_injection/CMakeLists.txt +++ b/Examples/Tests/flux_injection/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_flux_injection # inputs - analysis_flux_injection_3d.py # analysis - diags/diag1000002 # output + "analysis_flux_injection_3d.py diags/diag1000002" # analysis + "analysis_default_regression.py --path diags/diag1000002" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( RZ # dims 2 # nprocs inputs_test_rz_flux_injection # inputs - analysis_flux_injection_rz.py # analysis - diags/diag1000120 # output + "analysis_flux_injection_rz.py diags/diag1000120" # analysis + "analysis_default_regression.py --path diags/diag1000120" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_flux_injection_from_eb # inputs - analysis_flux_injection_from_eb.py # analysis - diags/diag1000010 # output + "analysis_flux_injection_from_eb.py diags/diag1000010" # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) @@ -36,8 +36,8 @@ add_warpx_test( RZ # dims 2 # nprocs inputs_test_rz_flux_injection_from_eb # inputs - analysis_flux_injection_from_eb.py # analysis - diags/diag1000010 # output + "analysis_flux_injection_from_eb.py diags/diag1000010" # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) @@ -46,7 +46,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_flux_injection_from_eb # inputs - analysis_flux_injection_from_eb.py # analysis - diags/diag1000010 # output + "analysis_flux_injection_from_eb.py diags/diag1000010" # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) diff --git a/Examples/Tests/flux_injection/analysis_default_regression.py b/Examples/Tests/flux_injection/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/flux_injection/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/flux_injection/analysis_flux_injection_3d.py b/Examples/Tests/flux_injection/analysis_flux_injection_3d.py index dc89780703d..53baf9511f4 100755 --- a/Examples/Tests/flux_injection/analysis_flux_injection_3d.py +++ b/Examples/Tests/flux_injection/analysis_flux_injection_3d.py @@ -21,7 +21,6 @@ of space) """ -import os import sys import matplotlib.pyplot as plt @@ -30,9 +29,6 @@ from scipy.constants import c, m_e, m_p from scipy.special import erf -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - yt.funcs.mylog.setLevel(0) # Open plotfile specified in command line @@ -146,9 +142,3 @@ def compare_gaussian_flux(u, w, u_th, u_m, label=""): plt.tight_layout() plt.savefig("Distribution.png") - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py b/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py index c9e1c6df42c..0f2a37eea71 100755 --- a/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py +++ b/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py @@ -13,7 +13,6 @@ the particle distributions are consistent with the expected distributions. """ -import os import re import sys @@ -23,9 +22,6 @@ from scipy.constants import c, m_e from scipy.special import erf -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - yt.funcs.mylog.setLevel(0) # Open plotfile specified in command line @@ -155,7 +151,3 @@ def compare_gaussian_flux(u, w, u_th, u_m, label=""): plt.tight_layout() plt.savefig("Distribution.png") - -# Verify checksum -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/flux_injection/analysis_flux_injection_rz.py b/Examples/Tests/flux_injection/analysis_flux_injection_rz.py index 33b487cc36b..170fb08128d 100755 --- a/Examples/Tests/flux_injection/analysis_flux_injection_rz.py +++ b/Examples/Tests/flux_injection/analysis_flux_injection_rz.py @@ -25,15 +25,11 @@ - The total number of electrons corresponds to the expected flux. """ -import os import sys import numpy as np import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - yt.funcs.mylog.setLevel(0) # Open plotfile specified in command line @@ -56,9 +52,3 @@ assert np.allclose(w.sum(), n_tot, rtol=0.05) # Check that the particles are at the right radius assert np.all((r >= 1.48) & (r <= 1.92)) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/gaussian_beam/CMakeLists.txt b/Examples/Tests/gaussian_beam/CMakeLists.txt index ae0cf57ed15..2a1f4918458 100644 --- a/Examples/Tests/gaussian_beam/CMakeLists.txt +++ b/Examples/Tests/gaussian_beam/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_focusing_gaussian_beam # inputs - analysis.py # analysis - diags/diag1000000 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000000" # checksum OFF # dependency ) @@ -16,7 +16,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_gaussian_beam_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) diff --git a/Examples/Tests/gaussian_beam/analysis.py b/Examples/Tests/gaussian_beam/analysis.py index a2278b2cf7a..a5a6caf8e42 100755 --- a/Examples/Tests/gaussian_beam/analysis.py +++ b/Examples/Tests/gaussian_beam/analysis.py @@ -7,15 +7,9 @@ # License: BSD-3-Clause-LBNL -import os -import sys - import numpy as np -from scipy.constants import c, eV, m_e, micro, nano - -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum from openpmd_viewer import OpenPMDTimeSeries +from scipy.constants import c, eV, m_e, micro, nano GeV = 1e9 * eV energy = 125.0 * GeV @@ -67,9 +61,3 @@ def s(z, sigma0, emit): assert np.allclose(sx, sx_theory, rtol=0.051, atol=0) assert np.allclose(sy, sy_theory, rtol=0.038, atol=0) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/implicit/CMakeLists.txt b/Examples/Tests/implicit/CMakeLists.txt index eeb1ff87804..e4bde9bbeaf 100644 --- a/Examples/Tests/implicit/CMakeLists.txt +++ b/Examples/Tests/implicit/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 1 # dims 2 # nprocs inputs_test_1d_semi_implicit_picard # inputs - analysis_1d.py # analysis - diags/diag1000100 # output + "analysis_1d.py" # analysis + "analysis_default_regression.py --path diags/diag1000100" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 1 # dims 2 # nprocs inputs_test_1d_theta_implicit_picard # inputs - analysis_1d.py # analysis - diags/diag1000100 # output + "analysis_1d.py" # analysis + "analysis_default_regression.py --path diags/diag1000100" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_theta_implicit_jfnk_vandb # inputs - analysis_vandb_jfnk_2d.py # analysis - diags/diag1000020 # output + "analysis_vandb_jfnk_2d.py diags/diag1000020" # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum OFF # dependency ) @@ -36,8 +36,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_theta_implicit_jfnk_vandb_filtered # inputs - analysis_vandb_jfnk_2d.py # analysis - diags/diag1000020 # output + "analysis_vandb_jfnk_2d.py diags/diag1000020" # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum OFF # dependency ) @@ -46,8 +46,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_theta_implicit_jfnk_vandb_picmi.py # inputs - analysis_vandb_jfnk_2d.py # analysis - diags/diag1000020 # output + "analysis_vandb_jfnk_2d.py diags/diag1000020" # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum OFF # dependency ) @@ -57,8 +57,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_theta_implicit_strang_psatd # inputs - analysis_2d_psatd.py # analysis - diags/diag1000020 # output + "analysis_2d_psatd.py" # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/implicit/analysis_1d.py b/Examples/Tests/implicit/analysis_1d.py index 665fcaac951..aa54cd279ce 100755 --- a/Examples/Tests/implicit/analysis_1d.py +++ b/Examples/Tests/implicit/analysis_1d.py @@ -11,13 +11,9 @@ # the script `inputs_1d`. This simulates a 1D periodic plasma using the implicit solver. import os import re -import sys import numpy as np -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - field_energy = np.loadtxt("diags/reducedfiles/field_energy.txt", skiprows=1) particle_energy = np.loadtxt("diags/reducedfiles/particle_energy.txt", skiprows=1) @@ -37,9 +33,3 @@ print(f"tolerance: {tolerance_rel}") assert max_delta_E < tolerance_rel - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/implicit/analysis_2d_psatd.py b/Examples/Tests/implicit/analysis_2d_psatd.py index 3ccc3880189..507fc6f2c4a 100755 --- a/Examples/Tests/implicit/analysis_2d_psatd.py +++ b/Examples/Tests/implicit/analysis_2d_psatd.py @@ -10,17 +10,9 @@ # This is a script that analyses the simulation results from the script `inputs_vandb_2d`. # This simulates a 2D periodic plasma using the implicit solver # with the Villasenor deposition using shape factor 2. -import os -import sys import numpy as np -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import checksumAPI - -# this will be the name of the plot file -fn = sys.argv[1] - field_energy = np.loadtxt("diags/reducedfiles/field_energy.txt", skiprows=1) particle_energy = np.loadtxt("diags/reducedfiles/particle_energy.txt", skiprows=1) @@ -36,6 +28,3 @@ print(f"tolerance: {tolerance_rel_energy}") assert max_delta_E < tolerance_rel_energy - -test_name = os.path.split(os.getcwd())[1] -checksumAPI.evaluate_checksum(test_name, fn) diff --git a/Examples/Tests/implicit/analysis_default_regression.py b/Examples/Tests/implicit/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/implicit/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/implicit/analysis_vandb_jfnk_2d.py b/Examples/Tests/implicit/analysis_vandb_jfnk_2d.py index 29a2c870574..dcbacdfde1f 100755 --- a/Examples/Tests/implicit/analysis_vandb_jfnk_2d.py +++ b/Examples/Tests/implicit/analysis_vandb_jfnk_2d.py @@ -10,19 +10,12 @@ # This is a script that analyses the simulation results from the script `inputs_vandb_2d`. # This simulates a 2D periodic plasma using the implicit solver # with the Villasenor deposition using shape factor 2. -import os import sys import numpy as np import yt from scipy.constants import e, epsilon_0 -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - -# this will be the name of the plot file -fn = sys.argv[1] - field_energy = np.loadtxt("diags/reducedfiles/field_energy.txt", skiprows=1) particle_energy = np.loadtxt("diags/reducedfiles/particle_energy.txt", skiprows=1) @@ -65,9 +58,3 @@ print(f"tolerance: {tolerance_rel_charge}") assert drho_rms < tolerance_rel_charge - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/initial_distribution/CMakeLists.txt b/Examples/Tests/initial_distribution/CMakeLists.txt index 04af9708cb2..06fce4dddcb 100644 --- a/Examples/Tests/initial_distribution/CMakeLists.txt +++ b/Examples/Tests/initial_distribution/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 3 # dims 1 # nprocs inputs_test_3d_initial_distribution # inputs - analysis.py # analysis - diags/diag1000001 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) diff --git a/Examples/Tests/initial_distribution/analysis.py b/Examples/Tests/initial_distribution/analysis.py index 834934df255..8b2c8ca74e2 100755 --- a/Examples/Tests/initial_distribution/analysis.py +++ b/Examples/Tests/initial_distribution/analysis.py @@ -18,17 +18,11 @@ # 9 denotes gaussian_parser distribution w/ spatially-varying mean and thermal velocity # The distribution is obtained through reduced diagnostic ParticleHistogram. -import os -import sys - import numpy as np import scipy.constants as scc import scipy.special as scs from read_raw_data import read_reduced_diags, read_reduced_diags_histogram -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # print tolerance tolerance = 0.02 print("Tolerance:", tolerance) @@ -448,9 +442,3 @@ def Gaussian(mean, sigma, u): print("gaussian_parse_momentum_function velocity difference:", f9_error) assert f9_error < tolerance - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/initial_distribution/analysis_default_regression.py b/Examples/Tests/initial_distribution/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/initial_distribution/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/initial_plasma_profile/CMakeLists.txt b/Examples/Tests/initial_plasma_profile/CMakeLists.txt index eb45e64dfab..064bbc29907 100644 --- a/Examples/Tests/initial_plasma_profile/CMakeLists.txt +++ b/Examples/Tests/initial_plasma_profile/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_parabolic_channel_initialization # inputs - analysis.py # analysis - diags/diag1000001 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000001 --skip-particles --rtol 1e-4" # checksum OFF # dependency ) diff --git a/Examples/Tests/initial_plasma_profile/analysis.py b/Examples/Tests/initial_plasma_profile/analysis.py deleted file mode 100755 index d372bd30a93..00000000000 --- a/Examples/Tests/initial_plasma_profile/analysis.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2020 Michael Rowan -# -# This file is part of WarpX. -# -# License: BSD-3-Clause-LBNL - -import os -import sys - -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - rtol=1e-4, - do_particles=False, -) diff --git a/Examples/Tests/initial_plasma_profile/analysis_default_regression.py b/Examples/Tests/initial_plasma_profile/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/initial_plasma_profile/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/ion_stopping/CMakeLists.txt b/Examples/Tests/ion_stopping/CMakeLists.txt index 83e15287e18..666b28244dd 100644 --- a/Examples/Tests/ion_stopping/CMakeLists.txt +++ b/Examples/Tests/ion_stopping/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 3 # dims 1 # nprocs inputs_test_3d_ion_stopping # inputs - analysis.py # analysis - diags/diag1000010 # output + "analysis.py diags/diag1000010" # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) diff --git a/Examples/Tests/ion_stopping/analysis.py b/Examples/Tests/ion_stopping/analysis.py index 45983538025..6b92bb304a5 100755 --- a/Examples/Tests/ion_stopping/analysis.py +++ b/Examples/Tests/ion_stopping/analysis.py @@ -11,16 +11,12 @@ # is used in the C++ to check the resulting # particle energies. -import os import sys import numpy as np import yt from scipy.constants import e, epsilon_0, k, m_e, m_p -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Define constants using the WarpX names for the evals below q_e = e kb = k @@ -193,9 +189,3 @@ def stopping_from_ions(dt, ni, Ti, mi, Zi, Zb, ion_mass, ion_energy): assert np.all(error2 < tolerance) assert np.all(error3 < tolerance) assert np.all(error4 < tolerance) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/ion_stopping/analysis_default_regression.py b/Examples/Tests/ion_stopping/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/ion_stopping/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/langmuir/CMakeLists.txt b/Examples/Tests/langmuir/CMakeLists.txt index b259083c695..c01fed9125a 100644 --- a/Examples/Tests/langmuir/CMakeLists.txt +++ b/Examples/Tests/langmuir/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 1 # dims 2 # nprocs inputs_test_1d_langmuir_multi # inputs - analysis_1d.py # analysis - diags/diag1000080 # output + "analysis_1d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_langmuir_multi # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_langmuir_multi_mr # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) @@ -36,8 +36,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_langmuir_multi_mr_anisotropic # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) @@ -46,8 +46,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_langmuir_multi_mr_momentum_conserving # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) @@ -57,8 +57,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_langmuir_multi_mr_psatd # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) endif() @@ -68,8 +68,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_langmuir_multi_nodal # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) @@ -78,8 +78,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_langmuir_multi_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000040 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) @@ -89,8 +89,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_langmuir_multi_psatd # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) endif() @@ -101,8 +101,8 @@ if(WarpX_FFT) 2 # dims 1 # nprocs inputs_test_2d_langmuir_multi_psatd_current_correction # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) endif() @@ -113,8 +113,8 @@ if(WarpX_FFT) 2 # dims 1 # nprocs inputs_test_2d_langmuir_multi_psatd_current_correction_nodal # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) endif() @@ -125,8 +125,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_langmuir_multi_psatd_momentum_conserving # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) endif() @@ -137,8 +137,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_langmuir_multi_psatd_multiJ # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) endif() @@ -149,8 +149,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_langmuir_multi_psatd_multiJ_nodal # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) endif() @@ -161,8 +161,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_langmuir_multi_psatd_nodal # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) endif() @@ -173,8 +173,8 @@ if(WarpX_FFT) 2 # dims 1 # nprocs inputs_test_2d_langmuir_multi_psatd_vay_deposition # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) endif() @@ -185,8 +185,8 @@ if(WarpX_FFT) 2 # dims 1 # nprocs inputs_test_2d_langmuir_multi_psatd_vay_deposition_nodal # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) endif() @@ -197,8 +197,8 @@ if(WarpX_FFT) 2 # dims 1 # nprocs inputs_test_2d_langmuir_multi_psatd_vay_deposition_particle_shape_4 # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) endif() @@ -208,8 +208,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_langmuir_multi # inputs - analysis_3d.py # analysis - diags/diag1000040 # output + "analysis_3d.py diags/diag1000040" # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) @@ -218,8 +218,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_langmuir_multi_nodal # inputs - analysis_3d.py # analysis - diags/diag1000040 # output + "analysis_3d.py diags/diag1000040" # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) @@ -228,8 +228,8 @@ add_warpx_test( 3 # dims 1 # nprocs inputs_test_3d_langmuir_multi_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000040 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) @@ -239,8 +239,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_langmuir_multi_psatd # inputs - analysis_3d.py # analysis - diags/diag1000040 # output + "analysis_3d.py diags/diag1000040" # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) endif() @@ -251,8 +251,8 @@ if(WarpX_FFT) 3 # dims 1 # nprocs inputs_test_3d_langmuir_multi_psatd_current_correction # inputs - analysis_3d.py # analysis - diags/diag1000040 # output + "analysis_3d.py diags/diag1000040" # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) endif() @@ -263,8 +263,8 @@ if(WarpX_FFT) 3 # dims 1 # nprocs inputs_test_3d_langmuir_multi_psatd_current_correction_nodal # inputs - analysis_3d.py # analysis - diags/diag1000040 # output + "analysis_3d.py diags/diag1000040" # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) endif() @@ -275,8 +275,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_langmuir_multi_psatd_div_cleaning # inputs - analysis_3d.py # analysis - diags/diag1000040 # output + "analysis_3d.py diags/diag1000040" # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) endif() @@ -287,8 +287,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_langmuir_multi_psatd_momentum_conserving # inputs - analysis_3d.py # analysis - diags/diag1000040 # output + "analysis_3d.py diags/diag1000040" # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) endif() @@ -299,8 +299,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_langmuir_multi_psatd_multiJ # inputs - analysis_3d.py # analysis - diags/diag1000040 # output + "analysis_3d.py diags/diag1000040" # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) endif() @@ -311,8 +311,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_langmuir_multi_psatd_multiJ_nodal # inputs - analysis_3d.py # analysis - diags/diag1000040 # output + "analysis_3d.py diags/diag1000040" # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) endif() @@ -323,8 +323,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_langmuir_multi_psatd_nodal # inputs - analysis_3d.py # analysis - diags/diag1000040 # output + "analysis_3d.py diags/diag1000040" # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) endif() @@ -335,8 +335,8 @@ if(WarpX_FFT) 3 # dims 1 # nprocs inputs_test_3d_langmuir_multi_psatd_vay_deposition # inputs - analysis_3d.py # analysis - diags/diag1000040 # output + "analysis_3d.py diags/diag1000040" # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) endif() @@ -347,8 +347,8 @@ if(WarpX_FFT) 3 # dims 1 # nprocs inputs_test_3d_langmuir_multi_psatd_vay_deposition_nodal # inputs - analysis_3d.py # analysis - diags/diag1000040 # output + "analysis_3d.py diags/diag1000040" # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) label_warpx_test(test_3d_langmuir_multi_psatd_vay_deposition_nodal slow) @@ -359,8 +359,8 @@ add_warpx_test( RZ # dims 2 # nprocs inputs_test_rz_langmuir_multi # inputs - analysis_rz.py # analysis - diags/diag1000080 # output + "analysis_rz.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) @@ -369,8 +369,8 @@ add_warpx_test( RZ # dims 2 # nprocs inputs_test_rz_langmuir_multi_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000040 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) @@ -380,8 +380,8 @@ if(WarpX_FFT) RZ # dims 2 # nprocs inputs_test_rz_langmuir_multi_psatd # inputs - analysis_rz.py # analysis - diags/diag1000080 # output + "analysis_rz.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) endif() @@ -392,8 +392,8 @@ if(WarpX_FFT) RZ # dims 1 # nprocs inputs_test_rz_langmuir_multi_psatd_current_correction # inputs - analysis_rz.py # analysis - diags/diag1000080 # output + "analysis_rz.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) endif() @@ -404,8 +404,8 @@ if(WarpX_FFT) RZ # dims 2 # nprocs inputs_test_rz_langmuir_multi_psatd_multiJ # inputs - analysis_rz.py # analysis - diags/diag1000080 # output + "analysis_rz.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/langmuir/analysis_1d.py b/Examples/Tests/langmuir/analysis_1d.py index 8eefd95b4f7..60a088d1309 100755 --- a/Examples/Tests/langmuir/analysis_1d.py +++ b/Examples/Tests/langmuir/analysis_1d.py @@ -26,9 +26,6 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # test name test_name = os.path.split(os.getcwd())[1] @@ -125,9 +122,3 @@ def get_theoretical_field(field, t): print("error_rel = {}".format(error_rel)) print("tolerance = {}".format(tolerance)) assert error_rel < tolerance - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/langmuir/analysis_2d.py b/Examples/Tests/langmuir/analysis_2d.py index 31995e896a5..3aa246008a6 100755 --- a/Examples/Tests/langmuir/analysis_2d.py +++ b/Examples/Tests/langmuir/analysis_2d.py @@ -26,9 +26,6 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # test name test_name = os.path.split(os.getcwd())[1] @@ -162,9 +159,3 @@ def get_theoretical_field(field, t): print("error_rel = {}".format(error_rel)) print("tolerance = {}".format(tolerance)) assert error_rel < tolerance - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/langmuir/analysis_3d.py b/Examples/Tests/langmuir/analysis_3d.py index 05f1c585ec0..75b9c5ba71c 100755 --- a/Examples/Tests/langmuir/analysis_3d.py +++ b/Examples/Tests/langmuir/analysis_3d.py @@ -26,9 +26,6 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # test name test_name = os.path.split(os.getcwd())[1] @@ -211,9 +208,3 @@ def get_theoretical_field(field, t): print("error_rel = {}".format(error_rel)) print("tolerance = {}".format(tolerance)) assert error_rel < tolerance - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/langmuir/analysis_rz.py b/Examples/Tests/langmuir/analysis_rz.py index 64f8cfb6313..a0697b93ab9 100755 --- a/Examples/Tests/langmuir/analysis_rz.py +++ b/Examples/Tests/langmuir/analysis_rz.py @@ -29,9 +29,6 @@ import post_processing_utils from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # this will be the name of the plot file fn = sys.argv[1] @@ -182,9 +179,3 @@ def Ez(z, r, epsilon, k0, w0, wp, t): post_processing_utils.check_random_filter( fn, random_filter_fn, random_fraction, dim, species_name ) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/langmuir_fluids/CMakeLists.txt b/Examples/Tests/langmuir_fluids/CMakeLists.txt index 054e9c80d3a..df6732200c6 100644 --- a/Examples/Tests/langmuir_fluids/CMakeLists.txt +++ b/Examples/Tests/langmuir_fluids/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 1 # dims 2 # nprocs inputs_test_1d_langmuir_fluid # inputs - analysis_1d.py # analysis - diags/diag1000080 # output + "analysis_1d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_langmuir_fluid # inputs - analysis_2d.py # analysis - diags/diag1000080 # output + "analysis_2d.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_langmuir_fluid # inputs - analysis_3d.py # analysis - diags/diag1000040 # output + "analysis_3d.py diags/diag1000040" # analysis + "analysis_default_regression.py --path diags/diag1000040" # checksum OFF # dependency ) @@ -36,7 +36,7 @@ add_warpx_test( RZ # dims 2 # nprocs inputs_test_rz_langmuir_fluid # inputs - analysis_rz.py # analysis - diags/diag1000080 # output + "analysis_rz.py diags/diag1000080" # analysis + "analysis_default_regression.py --path diags/diag1000080" # checksum OFF # dependency ) diff --git a/Examples/Tests/langmuir_fluids/analysis_1d.py b/Examples/Tests/langmuir_fluids/analysis_1d.py index c448303783f..f60c76660b5 100755 --- a/Examples/Tests/langmuir_fluids/analysis_1d.py +++ b/Examples/Tests/langmuir_fluids/analysis_1d.py @@ -11,7 +11,6 @@ # the script `inputs.multi.rt`. This simulates a 1D periodic plasma wave. # The electric field in the simulation is given (in theory) by: # $$ E_z = \epsilon \,\frac{m_e c^2 k_z}{q_e}\sin(k_z z)\sin( \omega_p t)$$ -import os import sys import matplotlib @@ -25,9 +24,6 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # this will be the name of the plot file fn = sys.argv[1] @@ -147,9 +143,3 @@ def get_theoretical_rho_field(field, t): print("tolerance_rel: " + str(tolerance_rel)) assert error_rel < tolerance_rel - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/langmuir_fluids/analysis_2d.py b/Examples/Tests/langmuir_fluids/analysis_2d.py index d8ba50a9df1..46b9948884c 100755 --- a/Examples/Tests/langmuir_fluids/analysis_2d.py +++ b/Examples/Tests/langmuir_fluids/analysis_2d.py @@ -13,7 +13,6 @@ # $$ E_x = \epsilon \,\frac{m_e c^2 k_x}{q_e}\sin(k_x x)\cos(k_y y)\cos(k_z z)\sin( \omega_p t)$$ # $$ E_y = \epsilon \,\frac{m_e c^2 k_y}{q_e}\cos(k_x x)\sin(k_y y)\cos(k_z z)\sin( \omega_p t)$$ # $$ E_z = \epsilon \,\frac{m_e c^2 k_z}{q_e}\cos(k_x x)\cos(k_y y)\sin(k_z z)\sin( \omega_p t)$$ -import os import sys import matplotlib.pyplot as plt @@ -25,9 +24,6 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # this will be the name of the plot file fn = sys.argv[1] @@ -177,9 +173,3 @@ def get_theoretical_rho_field(field, t): print("tolerance_rel: " + str(tolerance_rel)) assert error_rel < tolerance_rel - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/langmuir_fluids/analysis_3d.py b/Examples/Tests/langmuir_fluids/analysis_3d.py index 899dc72424b..6a15c175843 100755 --- a/Examples/Tests/langmuir_fluids/analysis_3d.py +++ b/Examples/Tests/langmuir_fluids/analysis_3d.py @@ -13,7 +13,6 @@ # $$ E_x = \epsilon \,\frac{m_e c^2 k_x}{q_e}\sin(k_x x)\cos(k_y y)\cos(k_z z)\sin( \omega_p t)$$ # $$ E_y = \epsilon \,\frac{m_e c^2 k_y}{q_e}\cos(k_x x)\sin(k_y y)\cos(k_z z)\sin( \omega_p t)$$ # $$ E_z = \epsilon \,\frac{m_e c^2 k_z}{q_e}\cos(k_x x)\cos(k_y y)\sin(k_z z)\sin( \omega_p t)$$ -import os import sys import matplotlib.pyplot as plt @@ -25,9 +24,6 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # this will be the name of the plot file fn = sys.argv[1] @@ -211,9 +207,3 @@ def get_theoretical_rho_field(field, t): print("tolerance_rel: " + str(tolerance_rel)) assert error_rel < tolerance_rel - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/langmuir_fluids/analysis_default_regression.py b/Examples/Tests/langmuir_fluids/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/langmuir_fluids/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/langmuir_fluids/analysis_rz.py b/Examples/Tests/langmuir_fluids/analysis_rz.py index 0e918a6ab31..de6853db556 100755 --- a/Examples/Tests/langmuir_fluids/analysis_rz.py +++ b/Examples/Tests/langmuir_fluids/analysis_rz.py @@ -14,7 +14,6 @@ # Unrelated to the Langmuir waves, we also test the plotfile particle filter function in this # analysis script. import os -import re import sys import matplotlib @@ -28,17 +27,11 @@ import numpy as np from scipy.constants import c, e, epsilon_0, m_e -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # this will be the name of the plot file fn = sys.argv[1] test_name = os.path.split(os.getcwd())[1] -# Parse test name and check if current correction (psatd.current_correction) is applied -current_correction = True if re.search("current_correction", fn) else False - # Parameters (these parameters must match the parameters in `inputs.multi.rz.rt`) epsilon = 0.01 n = 2.0e24 @@ -225,9 +218,3 @@ def rho(z, r, epsilon, k0, w0, wp, t): print("tolerance_rel: " + str(tolerance_rel)) assert error_rel < tolerance_rel - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/larmor/CMakeLists.txt b/Examples/Tests/larmor/CMakeLists.txt index 6a3368a4fca..f089b4dc958 100644 --- a/Examples/Tests/larmor/CMakeLists.txt +++ b/Examples/Tests/larmor/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_larmor # inputs - analysis_default_regression.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) diff --git a/Examples/Tests/laser_injection/CMakeLists.txt b/Examples/Tests/laser_injection/CMakeLists.txt index a15075bb43e..30d18c6d063 100644 --- a/Examples/Tests/laser_injection/CMakeLists.txt +++ b/Examples/Tests/laser_injection/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 1 # dims 2 # nprocs inputs_test_1d_laser_injection # inputs - analysis_1d.py # analysis - diags/diag1000240 # output + "analysis_1d.py diags/diag1000240" # analysis + "analysis_default_regression.py --path diags/diag1000240" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_laser_injection # inputs - analysis_2d.py # analysis - diags/diag1000240 # output + "analysis_2d.py diags/diag1000240" # analysis + "analysis_default_regression.py --path diags/diag1000240" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_laser_injection # inputs - analysis_3d.py # analysis - diags/diag1000020 # output + "analysis_3d.py" # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum OFF # dependency ) @@ -36,8 +36,8 @@ add_warpx_test( 1 # dims 2 # nprocs inputs_test_1d_laser_injection_implicit # inputs - analysis_1d.py # analysis - diags/diag1000240 # output + "analysis_1d.py diags/diag1000240" # analysis + "analysis_default_regression.py --path diags/diag1000240" # checksum OFF # dependency ) @@ -46,7 +46,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_laser_injection_implicit # inputs - analysis_2d.py # analysis - diags/diag1000240 # output + "analysis_2d.py diags/diag1000240" # analysis + "analysis_default_regression.py --path diags/diag1000240" # checksum OFF # dependency ) diff --git a/Examples/Tests/laser_injection/analysis_1d.py b/Examples/Tests/laser_injection/analysis_1d.py index 5ce7065c967..98ce6ca47c5 100755 --- a/Examples/Tests/laser_injection/analysis_1d.py +++ b/Examples/Tests/laser_injection/analysis_1d.py @@ -12,7 +12,6 @@ # the simulation and it compares it with theory. It also checks that the # central frequency of the Fourier transform is the expected one. -import os import sys import matplotlib @@ -23,9 +22,6 @@ import numpy as np from scipy.signal import hilbert -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Maximum acceptable error for this test relative_error_threshold = 0.05 @@ -192,12 +188,6 @@ def main(): check_laser(filename_end) - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - ) - if __name__ == "__main__": main() diff --git a/Examples/Tests/laser_injection/analysis_2d.py b/Examples/Tests/laser_injection/analysis_2d.py index 5e2d9ebf280..33b87823ebd 100755 --- a/Examples/Tests/laser_injection/analysis_2d.py +++ b/Examples/Tests/laser_injection/analysis_2d.py @@ -17,7 +17,6 @@ # the simulation and it compares it with theory. It also checks that the # central frequency of the Fourier transform is the expected one. -import os import sys import matplotlib @@ -29,9 +28,6 @@ from mpl_toolkits.axes_grid1 import make_axes_locatable from scipy.signal import hilbert -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Maximum acceptable error for this test relative_error_threshold = 0.05 @@ -225,12 +221,6 @@ def main(): check_laser(filename_end) - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - ) - if __name__ == "__main__": main() diff --git a/Examples/Tests/laser_injection/analysis_3d.py b/Examples/Tests/laser_injection/analysis_3d.py index 153b721b526..2ce123169d5 100755 --- a/Examples/Tests/laser_injection/analysis_3d.py +++ b/Examples/Tests/laser_injection/analysis_3d.py @@ -8,26 +8,14 @@ # License: BSD-3-Clause-LBNL -import os -import sys - import matplotlib matplotlib.use("Agg") import matplotlib.pyplot as plt import numpy as np -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # you can save an image to be displayed on the website t = np.arange(0.0, 2.0, 0.01) s = 1 + np.sin(2 * np.pi * t) plt.plot(t, s) plt.savefig("laser_analysis.png") - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/laser_injection/analysis_default_regression.py b/Examples/Tests/laser_injection/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/laser_injection/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/laser_injection_from_file/CMakeLists.txt b/Examples/Tests/laser_injection_from_file/CMakeLists.txt index 4b4024b9029..d585160bc8f 100644 --- a/Examples/Tests/laser_injection_from_file/CMakeLists.txt +++ b/Examples/Tests/laser_injection_from_file/CMakeLists.txt @@ -7,7 +7,7 @@ add_warpx_test( 1 # nprocs inputs_test_1d_laser_injection_from_lasy_file_prepare.py # inputs OFF # analysis - OFF # output + OFF # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 1 # dims 1 # nprocs inputs_test_1d_laser_injection_from_lasy_file # inputs - analysis_1d.py # analysis - diags/diag1000251 # output + "analysis_1d.py diags/diag1000251" # analysis + "analysis_default_regression.py --path diags/diag1000251" # checksum test_1d_laser_injection_from_lasy_file_prepare # dependency ) @@ -27,7 +27,7 @@ add_warpx_test( 1 # nprocs inputs_test_1d_laser_injection_from_lasy_file_boost_prepare.py # inputs OFF # analysis - OFF # output + OFF # checksum OFF # dependency ) @@ -36,8 +36,8 @@ add_warpx_test( 1 # dims 1 # nprocs inputs_test_1d_laser_injection_from_lasy_file_boost # inputs - analysis_1d_boost.py # analysis - diags/diag1000001 # output + "analysis_1d_boost.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum test_1d_laser_injection_from_lasy_file_boost_prepare # dependency ) @@ -47,7 +47,7 @@ add_warpx_test( 1 # nprocs inputs_test_2d_laser_injection_from_binary_file_prepare.py # inputs OFF # analysis - OFF # output + OFF # checksum OFF # dependency ) @@ -56,8 +56,8 @@ add_warpx_test( 2 # dims 1 # nprocs inputs_test_2d_laser_injection_from_binary_file # inputs - analysis_2d_binary.py # analysis - diags/diag1000250 # output + "analysis_2d_binary.py diags/diag1000250" # analysis + "analysis_default_regression.py --path diags/diag1000250" # checksum test_2d_laser_injection_from_binary_file_prepare # dependency ) @@ -67,7 +67,7 @@ add_warpx_test( 1 # nprocs inputs_test_2d_laser_injection_from_lasy_file_prepare.py # inputs OFF # analysis - OFF # output + OFF # checksum OFF # dependency ) @@ -76,8 +76,8 @@ add_warpx_test( 2 # dims 1 # nprocs inputs_test_2d_laser_injection_from_lasy_file # inputs - analysis_2d.py # analysis - diags/diag1000251 # output + "analysis_2d.py diags/diag1000251" # analysis + "analysis_default_regression.py --path diags/diag1000251" # checksum test_2d_laser_injection_from_lasy_file_prepare # dependency ) @@ -87,7 +87,7 @@ add_warpx_test( 1 # nprocs inputs_test_3d_laser_injection_from_lasy_file_prepare.py # inputs OFF # analysis - OFF # output + OFF # checksum OFF # dependency ) @@ -96,8 +96,8 @@ add_warpx_test( 3 # dims 1 # nprocs inputs_test_3d_laser_injection_from_lasy_file # inputs - analysis_3d.py # analysis - diags/diag1000251 # output + "analysis_3d.py diags/diag1000251" # analysis + "analysis_default_regression.py --path diags/diag1000251" # checksum test_3d_laser_injection_from_lasy_file_prepare # dependency ) @@ -107,7 +107,7 @@ add_warpx_test( 1 # nprocs inputs_test_rz_laser_injection_from_lasy_file_prepare.py # inputs OFF # analysis - OFF # output + OFF # checksum OFF # dependency ) @@ -116,8 +116,8 @@ add_warpx_test( RZ # dims 1 # nprocs inputs_test_rz_laser_injection_from_lasy_file # inputs - analysis_rz.py # analysis - diags/diag1000252 # output + "analysis_rz.py diags/diag1000252" # analysis + "analysis_default_regression.py --path diags/diag1000252" # checksum test_rz_laser_injection_from_lasy_file_prepare # dependency ) @@ -127,7 +127,7 @@ add_warpx_test( 1 # nprocs inputs_test_rz_laser_injection_from_RZ_lasy_file_prepare.py # inputs OFF # analysis - OFF # output + OFF # checksum OFF # dependency ) @@ -136,7 +136,7 @@ add_warpx_test( RZ # dims 1 # nprocs inputs_test_rz_laser_injection_from_RZ_lasy_file # inputs - analysis_from_RZ_file.py # analysis - diags/diag1000612 # output + "analysis_from_RZ_file.py diags/diag1000612" # analysis + "analysis_default_regression.py --path diags/diag1000612" # checksum test_rz_laser_injection_from_RZ_lasy_file_prepare # dependency ) diff --git a/Examples/Tests/laser_injection_from_file/analysis_1d.py b/Examples/Tests/laser_injection_from_file/analysis_1d.py index c6542ed1ac8..bd6a78f5949 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_1d.py +++ b/Examples/Tests/laser_injection_from_file/analysis_1d.py @@ -13,7 +13,6 @@ # - Compute the theory for laser envelope at time T # - Compare theory and simulation in 1D, for both envelope and central frequency -import os import sys import matplotlib @@ -27,9 +26,6 @@ yt.funcs.mylog.setLevel(50) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -112,9 +108,3 @@ def gauss_env(T, Z): relative_error_freq = np.abs(freq - exp_freq) / exp_freq print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py b/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py index e410369cb45..b51b32714de 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py +++ b/Examples/Tests/laser_injection_from_file/analysis_1d_boost.py @@ -13,7 +13,6 @@ # - Compute the theory for laser envelope at time T # - Compare theory and simulation in 1D, for both envelope and central frequency -import os import sys import matplotlib @@ -27,9 +26,6 @@ yt.funcs.mylog.setLevel(50) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -112,9 +108,3 @@ def gauss_env(T, Z): relative_error_freq = np.abs(freq - exp_freq) / exp_freq print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/laser_injection_from_file/analysis_2d.py b/Examples/Tests/laser_injection_from_file/analysis_2d.py index 1e6704f55a5..21f5c186b7a 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_2d.py +++ b/Examples/Tests/laser_injection_from_file/analysis_2d.py @@ -13,7 +13,6 @@ # - Compute the theory for laser envelope at time T # - Compare theory and simulation in 2D, for both envelope and central frequency -import os import sys import matplotlib @@ -27,9 +26,6 @@ yt.funcs.mylog.setLevel(50) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -138,9 +134,3 @@ def gauss_env(T, X, Y, Z): relative_error_freq = np.abs(freq - exp_freq) / exp_freq print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py b/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py index 7fc14824471..d223026a073 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py +++ b/Examples/Tests/laser_injection_from_file/analysis_2d_binary.py @@ -13,7 +13,6 @@ # - Compute the theory for laser envelope at time T # - Compare theory and simulation in 2D, for both envelope and central frequency -import os import sys import matplotlib @@ -26,9 +25,6 @@ yt.funcs.mylog.setLevel(50) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -145,9 +141,3 @@ def gauss_env(T, XX, ZZ): relative_error_freq = np.abs(freq - exp_freq) / exp_freq print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/laser_injection_from_file/analysis_3d.py b/Examples/Tests/laser_injection_from_file/analysis_3d.py index 3921e3d5930..cc19b742134 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_3d.py +++ b/Examples/Tests/laser_injection_from_file/analysis_3d.py @@ -13,7 +13,6 @@ # - Compute the theory for laser envelope at time T # - Compare theory and simulation in 3D, for both envelope and central frequency -import os import sys import matplotlib @@ -27,9 +26,6 @@ yt.funcs.mylog.setLevel(50) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -144,9 +140,3 @@ def gauss_env(T, X, Y, Z): relative_error_freq = np.abs(freq - exp_freq) / exp_freq print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/laser_injection_from_file/analysis_default_regression.py b/Examples/Tests/laser_injection_from_file/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/laser_injection_from_file/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py b/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py index f797ddb5d90..041e2917ece 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py +++ b/Examples/Tests/laser_injection_from_file/analysis_from_RZ_file.py @@ -13,7 +13,6 @@ # - Compute the theory for laser envelope at time T # - Compare theory and simulation in RZ, for both envelope and central frequency -import os import sys import matplotlib @@ -28,9 +27,6 @@ yt.funcs.mylog.setLevel(50) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -146,9 +142,3 @@ def laguerre_env(T, X, Y, Z, p, m): relative_error_freq = np.abs(freq - exp_freq) / exp_freq print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/laser_injection_from_file/analysis_rz.py b/Examples/Tests/laser_injection_from_file/analysis_rz.py index c37c6d8b3c2..1ad73b34c4b 100755 --- a/Examples/Tests/laser_injection_from_file/analysis_rz.py +++ b/Examples/Tests/laser_injection_from_file/analysis_rz.py @@ -13,7 +13,6 @@ # - Compute the theory for laser envelope at time T # - Compare theory and simulation in RZ, for both envelope and central frequency -import os import sys import matplotlib @@ -27,9 +26,6 @@ yt.funcs.mylog.setLevel(50) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Maximum acceptable error for this test relative_error_threshold = 0.065 @@ -139,9 +135,3 @@ def gauss_env(T, X, Y, Z): relative_error_freq = np.abs(freq - exp_freq) / exp_freq print("Relative error frequency: ", relative_error_freq) assert relative_error_freq < relative_error_threshold - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/laser_on_fine/CMakeLists.txt b/Examples/Tests/laser_on_fine/CMakeLists.txt index 479374137df..9d9e48e54be 100644 --- a/Examples/Tests/laser_on_fine/CMakeLists.txt +++ b/Examples/Tests/laser_on_fine/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_laser_on_fine # inputs - analysis_default_regression.py # analysis - diags/diag1000050 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000050" # checksum OFF # dependency ) diff --git a/Examples/Tests/load_external_field/CMakeLists.txt b/Examples/Tests/load_external_field/CMakeLists.txt index 0713dc877df..8641f307e16 100644 --- a/Examples/Tests/load_external_field/CMakeLists.txt +++ b/Examples/Tests/load_external_field/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 3 # dims 1 # nprocs inputs_test_3d_load_external_field_grid_picmi.py # inputs - analysis_3d.py # analysis - diags/diag1000300 # output + "analysis_3d.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 3 # dims 1 # nprocs inputs_test_3d_load_external_field_particle_picmi.py # inputs - analysis_3d.py # analysis - diags/diag1000300 # output + "analysis_3d.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( RZ # dims 1 # nprocs inputs_test_rz_load_external_field_grid # inputs - analysis_rz.py # analysis - diags/diag1000300 # output + "analysis_rz.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300" # checksum OFF # dependency ) @@ -36,8 +36,8 @@ add_warpx_test( RZ # dims 1 # nprocs inputs_test_rz_load_external_field_grid_restart # inputs - analysis_default_restart.py # analysis - diags/diag1000300 # output + "analysis_default_restart.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300" # checksum test_rz_load_external_field_grid # dependency ) @@ -46,8 +46,8 @@ add_warpx_test( RZ # dims 1 # nprocs inputs_test_rz_load_external_field_particles # inputs - analysis_rz.py # analysis - diags/diag1000300 # output + "analysis_rz.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300" # checksum OFF # dependency ) @@ -56,7 +56,7 @@ add_warpx_test( RZ # dims 1 # nprocs inputs_test_rz_load_external_field_particles_restart # inputs - analysis_default_restart.py # analysis - diags/diag1000300 # output + "analysis_default_restart.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300" # checksum test_rz_load_external_field_particles # dependency ) diff --git a/Examples/Tests/load_external_field/analysis_3d.py b/Examples/Tests/load_external_field/analysis_3d.py index 05cba3ea7bd..433a4bad5e8 100755 --- a/Examples/Tests/load_external_field/analysis_3d.py +++ b/Examples/Tests/load_external_field/analysis_3d.py @@ -16,15 +16,11 @@ # tolerance: 1.0e-8 # Possible running time: 2.756646401 s -import os import sys import numpy as np import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - tolerance = 1.0e-8 x0 = 0.12238072 y0 = 0.00965394 @@ -43,9 +39,3 @@ print("error = ", error) print("tolerance = ", tolerance) assert error < tolerance - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/load_external_field/analysis_default_regression.py b/Examples/Tests/load_external_field/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/load_external_field/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/load_external_field/analysis_rz.py b/Examples/Tests/load_external_field/analysis_rz.py index 7de160cdd50..e5601647d4a 100755 --- a/Examples/Tests/load_external_field/analysis_rz.py +++ b/Examples/Tests/load_external_field/analysis_rz.py @@ -16,15 +16,11 @@ # tolerance: 1.0e-8 # Possible running time: 0.327827743 s -import os import sys import numpy as np import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - tolerance = 1.0e-8 r0 = 0.12402005 z0 = 4.3632492 @@ -40,9 +36,3 @@ print("error = ", error) print("tolerance = ", tolerance) assert error < tolerance - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/magnetostatic_eb/CMakeLists.txt b/Examples/Tests/magnetostatic_eb/CMakeLists.txt index 3eb2da03136..5c0a87fd10a 100644 --- a/Examples/Tests/magnetostatic_eb/CMakeLists.txt +++ b/Examples/Tests/magnetostatic_eb/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_EB) 3 # dims 1 # nprocs inputs_test_3d_magnetostatic_eb # inputs - analysis_default_regression.py # analysis - diags/diag1000001 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) endif() @@ -19,8 +19,8 @@ if(WarpX_EB) 3 # dims 1 # nprocs inputs_test_3d_magnetostatic_eb_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000001 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) endif() @@ -31,8 +31,8 @@ if(WarpX_EB) RZ # dims 1 # nprocs inputs_test_rz_magnetostatic_eb_picmi.py # inputs - analysis_rz.py # analysis - diags/diag1000001 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000001 --skip-particles" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/magnetostatic_eb/analysis_rz.py b/Examples/Tests/magnetostatic_eb/analysis_rz.py deleted file mode 100755 index f31069ad230..00000000000 --- a/Examples/Tests/magnetostatic_eb/analysis_rz.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys - -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - do_particles=False, -) diff --git a/Examples/Tests/maxwell_hybrid_qed/CMakeLists.txt b/Examples/Tests/maxwell_hybrid_qed/CMakeLists.txt index 2c65c0a6ecb..c02d8abc567 100644 --- a/Examples/Tests/maxwell_hybrid_qed/CMakeLists.txt +++ b/Examples/Tests/maxwell_hybrid_qed/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_maxwell_hybrid_qed_solver # inputs - analysis.py # analysis - diags/diag1000300 # output + "analysis.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/maxwell_hybrid_qed/analysis_default_regression.py b/Examples/Tests/maxwell_hybrid_qed/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/maxwell_hybrid_qed/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/nci_fdtd_stability/CMakeLists.txt b/Examples/Tests/nci_fdtd_stability/CMakeLists.txt index e58e5bfb58f..9af4034c3ca 100644 --- a/Examples/Tests/nci_fdtd_stability/CMakeLists.txt +++ b/Examples/Tests/nci_fdtd_stability/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_nci_corrector # inputs - analysis_ncicorr.py # analysis - diags/diag1000600 # output + "analysis_ncicorr.py diags/diag1000600" # analysis + "analysis_default_regression.py --path diags/diag1000600" # checksum OFF # dependency ) @@ -16,7 +16,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_nci_corrector_mr # inputs - analysis_ncicorr.py # analysis - diags/diag1000600 # output + "analysis_ncicorr.py diags/diag1000600" # analysis + "analysis_default_regression.py --path diags/diag1000600" # checksum OFF # dependency ) diff --git a/Examples/Tests/nci_fdtd_stability/analysis_default_regression.py b/Examples/Tests/nci_fdtd_stability/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/nci_fdtd_stability/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py b/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py index ad635bf0fbe..290cdea819f 100755 --- a/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py +++ b/Examples/Tests/nci_fdtd_stability/analysis_ncicorr.py @@ -8,7 +8,6 @@ # License: BSD-3-Clause-LBNL -import os import re import sys @@ -18,9 +17,6 @@ yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - fn = sys.argv[1] use_MR = re.search("nci_correctorMR", fn) is not None @@ -49,9 +45,3 @@ print("energy from this run: %s" % energy) assert energy < energy_threshold - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/nci_psatd_stability/CMakeLists.txt b/Examples/Tests/nci_psatd_stability/CMakeLists.txt index f2b4ceae8ba..210fb13f542 100644 --- a/Examples/Tests/nci_psatd_stability/CMakeLists.txt +++ b/Examples/Tests/nci_psatd_stability/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_FFT) 2 # dims 1 # nprocs inputs_test_2d_averaged_galilean_psatd # inputs - analysis_galilean.py # analysis - diags/diag1000400 # output + "analysis_galilean.py diags/diag1000400" # analysis + "analysis_default_regression.py --path diags/diag1000400 --rtol 1e-8" # checksum OFF # dependency ) endif() @@ -19,8 +19,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_averaged_galilean_psatd_hybrid # inputs - analysis_galilean.py # analysis - diags/diag1000400 # output + "analysis_galilean.py diags/diag1000400" # analysis + "analysis_default_regression.py --path diags/diag1000400 --rtol 1e-8" # checksum OFF # dependency ) endif() @@ -31,8 +31,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_comoving_psatd_hybrid # inputs - analysis_default_regression.py # analysis - diags/diag1000400 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000400" # checksum OFF # dependency ) endif() @@ -43,8 +43,8 @@ if(WarpX_FFT) 2 # dims 1 # nprocs inputs_test_2d_galilean_psatd # inputs - analysis_galilean.py # analysis - diags/diag1000400 # output + "analysis_galilean.py diags/diag1000400" # analysis + "analysis_default_regression.py --path diags/diag1000400 --rtol 1e-8" # checksum OFF # dependency ) endif() @@ -55,8 +55,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_galilean_psatd_current_correction # inputs - analysis_galilean.py # analysis - diags/diag1000400 # output + "analysis_galilean.py diags/diag1000400" # analysis + "analysis_default_regression.py --path diags/diag1000400 --rtol 1e-8" # checksum OFF # dependency ) endif() @@ -67,8 +67,8 @@ if(WarpX_FFT) 2 # dims 1 # nprocs inputs_test_2d_galilean_psatd_current_correction_psb # inputs - analysis_galilean.py # analysis - diags/diag1000400 # output + "analysis_galilean.py diags/diag1000400" # analysis + "analysis_default_regression.py --path diags/diag1000400 --rtol 1e-8" # checksum OFF # dependency ) endif() @@ -79,8 +79,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_galilean_psatd_hybrid # inputs - analysis_default_regression.py # analysis - diags/diag1000400 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000400" # checksum OFF # dependency ) endif() @@ -91,8 +91,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_averaged_galilean_psatd # inputs - analysis_galilean.py # analysis - diags/diag1000160 # output + "analysis_galilean.py diags/diag1000160" # analysis + "analysis_default_regression.py --path diags/diag1000160 --rtol 1e-8" # checksum OFF # dependency ) endif() @@ -103,8 +103,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_averaged_galilean_psatd_hybrid # inputs - analysis_galilean.py # analysis - diags/diag1000160 # output + "analysis_galilean.py diags/diag1000160" # analysis + "analysis_default_regression.py --path diags/diag1000160 --rtol 1e-8" # checksum OFF # dependency ) endif() @@ -115,8 +115,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_galilean_psatd # inputs - analysis_galilean.py # analysis - diags/diag1000300 # output + "analysis_galilean.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300 --rtol 1e-8" # checksum OFF # dependency ) endif() @@ -127,8 +127,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_galilean_psatd_current_correction # inputs - analysis_galilean.py # analysis - diags/diag1000300 # output + "analysis_galilean.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300 --rtol 1e-8" # checksum OFF # dependency ) endif() @@ -139,8 +139,8 @@ if(WarpX_FFT) 3 # dims 1 # nprocs inputs_test_3d_galilean_psatd_current_correction_psb # inputs - analysis_galilean.py # analysis - diags/diag1000300 # output + "analysis_galilean.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300 --rtol 1e-8" # checksum OFF # dependency ) endif() @@ -151,8 +151,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_uniform_plasma_multiJ # inputs - analysis_multiJ.py # analysis - diags/diag1000300 # output + "analysis_multiJ.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300" # checksum OFF # dependency ) endif() @@ -163,8 +163,8 @@ if(WarpX_FFT) RZ # dims 1 # nprocs inputs_test_rz_galilean_psatd # inputs - analysis_galilean.py # analysis - diags/diag1000400 # output + "analysis_galilean.py diags/diag1000400" # analysis + "analysis_default_regression.py --path diags/diag1000400 --rtol 1e-8" # checksum OFF # dependency ) endif() @@ -175,8 +175,8 @@ if(WarpX_FFT) RZ # dims 2 # nprocs inputs_test_rz_galilean_psatd_current_correction # inputs - analysis_galilean.py # analysis - diags/diag1000400 # output + "analysis_galilean.py diags/diag1000400" # analysis + "analysis_default_regression.py --path diags/diag1000400 --rtol 1e-8" # checksum OFF # dependency ) endif() @@ -187,8 +187,8 @@ if(WarpX_FFT) RZ # dims 1 # nprocs inputs_test_rz_galilean_psatd_current_correction_psb # inputs - analysis_galilean.py # analysis - diags/diag1000400 # output + "analysis_galilean.py diags/diag1000400" # analysis + "analysis_default_regression.py --path diags/diag1000400 --rtol 1e-8" # checksum OFF # dependency ) endif() @@ -199,8 +199,8 @@ if(WarpX_FFT) RZ # dims 2 # nprocs inputs_test_rz_multiJ_psatd # inputs - analysis_default_regression.py # analysis - diags/diag1000025 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000025" # checksum OFF # dependency ) label_warpx_test(test_rz_multiJ_psatd slow) diff --git a/Examples/Tests/nci_psatd_stability/analysis_galilean.py b/Examples/Tests/nci_psatd_stability/analysis_galilean.py index 99f14d91371..43bdaedabbf 100755 --- a/Examples/Tests/nci_psatd_stability/analysis_galilean.py +++ b/Examples/Tests/nci_psatd_stability/analysis_galilean.py @@ -13,7 +13,6 @@ (suppressed by the Galilean PSATD method, without or with averaging, respectively). """ -import os import re import sys @@ -22,8 +21,6 @@ import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -117,10 +114,3 @@ print(f"err_charge = {err_charge}") print(f"tol_charge = {tol_charge}") assert err_charge < tol_charge - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - rtol=1e-8, -) diff --git a/Examples/Tests/nci_psatd_stability/analysis_multiJ.py b/Examples/Tests/nci_psatd_stability/analysis_multiJ.py index 6dcfb6565fe..19ba722781b 100755 --- a/Examples/Tests/nci_psatd_stability/analysis_multiJ.py +++ b/Examples/Tests/nci_psatd_stability/analysis_multiJ.py @@ -10,7 +10,6 @@ both J and rho constant in time, and with divergence cleaning). """ -import os import sys import numpy as np @@ -18,8 +17,6 @@ import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -49,9 +46,3 @@ print(f"err_energy = {err_energy}") print(f"tol_energy = {tol_energy}") assert err_energy < tol_energy - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/nodal_electrostatic/CMakeLists.txt b/Examples/Tests/nodal_electrostatic/CMakeLists.txt index a6b3f5b0102..026ab1a34bc 100644 --- a/Examples/Tests/nodal_electrostatic/CMakeLists.txt +++ b/Examples/Tests/nodal_electrostatic/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 3 # dims 1 # nprocs inputs_test_3d_nodal_electrostatic_solver # inputs - analysis.py # analysis - diags/diag1000010 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) label_warpx_test(test_3d_nodal_electrostatic_solver slow) diff --git a/Examples/Tests/nodal_electrostatic/analysis.py b/Examples/Tests/nodal_electrostatic/analysis.py index f015d525280..b6d22d60a79 100755 --- a/Examples/Tests/nodal_electrostatic/analysis.py +++ b/Examples/Tests/nodal_electrostatic/analysis.py @@ -1,13 +1,7 @@ #!/usr/bin/env python3 -import os -import sys - import numpy as np -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # check that the maximum chi value is small fname = "diags/reducedfiles/ParticleExtrema_beam_p.txt" chi_max = np.loadtxt(fname)[:, 19] @@ -17,9 +11,3 @@ fname = "diags/reducedfiles/ParticleNumber.txt" pho_num = np.loadtxt(fname)[:, 7] assert pho_num.all() == 0.0 - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/nodal_electrostatic/analysis_default_regression.py b/Examples/Tests/nodal_electrostatic/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/nodal_electrostatic/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/nuclear_fusion/CMakeLists.txt b/Examples/Tests/nuclear_fusion/CMakeLists.txt index c3ee8848e59..74d937601bd 100644 --- a/Examples/Tests/nuclear_fusion/CMakeLists.txt +++ b/Examples/Tests/nuclear_fusion/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_proton_boron_fusion # inputs - analysis_proton_boron_fusion.py # analysis - diags/diag1000001 # output + "analysis_proton_boron_fusion.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_deuterium_deuterium_fusion # inputs - analysis_two_product_fusion.py # analysis - diags/diag1000001 # output + "analysis_two_product_fusion.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( 3 # dims 1 # nprocs inputs_test_3d_deuterium_deuterium_fusion_intraspecies # inputs - analysis_deuterium_deuterium_3d_intraspecies.py # analysis - diags/diag1000010 # output + "analysis_deuterium_deuterium_3d_intraspecies.py" # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) @@ -36,8 +36,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_deuterium_tritium_fusion # inputs - analysis_two_product_fusion.py # analysis - diags/diag1000001 # output + "analysis_two_product_fusion.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) @@ -46,8 +46,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_proton_boron_fusion # inputs - analysis_proton_boron_fusion.py # analysis - diags/diag1000001 # output + "analysis_proton_boron_fusion.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) @@ -56,7 +56,7 @@ add_warpx_test( RZ # dims 2 # nprocs inputs_test_rz_deuterium_tritium_fusion # inputs - analysis_two_product_fusion.py # analysis - diags/diag1000001 # output + "analysis_two_product_fusion.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) diff --git a/Examples/Tests/nuclear_fusion/analysis_default_regression.py b/Examples/Tests/nuclear_fusion/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/nuclear_fusion/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py b/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py index 8ae0e768815..2b06a8c3f25 100755 --- a/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py +++ b/Examples/Tests/nuclear_fusion/analysis_deuterium_deuterium_3d_intraspecies.py @@ -23,14 +23,8 @@ # Nuclear fusion, 32(4), p.611. # DOI: https://doi.org/10.1088/0029-5515/32/4/I07 -import os -import sys - import numpy as np -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Load data from reduced diagnostics (physical time and neutron weights) time = np.loadtxt("./reduced_diags/particle_number.txt", usecols=1) neutron = np.loadtxt("./reduced_diags/particle_number.txt", usecols=9) @@ -48,9 +42,3 @@ print("error = ", error) print("tolerance = ", tolerance) assert error < tolerance - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py b/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py index c69080ac726..917cd86f258 100755 --- a/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py +++ b/Examples/Tests/nuclear_fusion/analysis_proton_boron_fusion.py @@ -5,16 +5,12 @@ # # License: BSD-3-Clause-LBNL -import os import re import sys -import yt - -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import numpy as np import scipy.constants as scc -from checksumAPI import evaluate_checksum +import yt ## This script performs various checks for the proton boron nuclear fusion module. The simulation ## that we check is made of 5 different tests, each with different proton, boron and alpha species. @@ -880,12 +876,6 @@ def main(): rho_end = field_data_end["rho"].to_ndarray() check_charge_conservation(rho_start, rho_end) - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - ) - if __name__ == "__main__": main() diff --git a/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py b/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py index 1a458a25e4a..38ab699a3a4 100755 --- a/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py +++ b/Examples/Tests/nuclear_fusion/analysis_two_product_fusion.py @@ -5,16 +5,12 @@ # # License: BSD-3-Clause-LBNL -import os import re import sys -import yt - -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import numpy as np import scipy.constants as scc -from checksumAPI import evaluate_checksum +import yt ## This script performs various checks for the fusion module. The simulation ## that we check is made of 2 different tests, each with different reactant and product species. @@ -555,12 +551,6 @@ def main(): rho_end = field_data_end["rho"].to_ndarray() check_charge_conservation(rho_start, rho_end) - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - ) - if __name__ == "__main__": main() diff --git a/Examples/Tests/ohm_solver_em_modes/CMakeLists.txt b/Examples/Tests/ohm_solver_em_modes/CMakeLists.txt index a08c321d88d..03843fe29f6 100644 --- a/Examples/Tests/ohm_solver_em_modes/CMakeLists.txt +++ b/Examples/Tests/ohm_solver_em_modes/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 1 # dims 2 # nprocs "inputs_test_1d_ohm_solver_em_modes_picmi.py --test --dim 1 --bdir z" # inputs - analysis.py # analysis - diags/field_diag000250 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/field_diag000250" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( RZ # dims 2 # nprocs "inputs_test_rz_ohm_solver_em_modes_picmi.py --test" # inputs - analysis_rz.py # analysis - diags/diag1000100 # output + "analysis_rz.py" # analysis + "analysis_default_regression.py --path diags/diag1000100 --rtol 1e-6" # checksum OFF # dependency ) label_warpx_test(test_rz_ohm_solver_em_modes_picmi slow) diff --git a/Examples/Tests/ohm_solver_em_modes/analysis.py b/Examples/Tests/ohm_solver_em_modes/analysis.py index bee634415d9..e2075944932 100755 --- a/Examples/Tests/ohm_solver_em_modes/analysis.py +++ b/Examples/Tests/ohm_solver_em_modes/analysis.py @@ -347,16 +347,3 @@ def get_analytic_L_mode(w): ) if not sim.test: plt.show() - -if sim.test: - import os - import sys - - sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - from checksumAPI import evaluate_checksum - - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - ) diff --git a/Examples/Tests/ohm_solver_em_modes/analysis_default_regression.py b/Examples/Tests/ohm_solver_em_modes/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/ohm_solver_em_modes/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/ohm_solver_em_modes/analysis_rz.py b/Examples/Tests/ohm_solver_em_modes/analysis_rz.py index a1eb185bbf6..841e1177630 100755 --- a/Examples/Tests/ohm_solver_em_modes/analysis_rz.py +++ b/Examples/Tests/ohm_solver_em_modes/analysis_rz.py @@ -181,17 +181,3 @@ def process(it): assert np.allclose( amps, np.array([61.02377286, 19.80026021, 100.47687017, 10.83331295]) ) - -if sim.test: - import os - import sys - - sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - from checksumAPI import evaluate_checksum - - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - rtol=1e-6, - ) diff --git a/Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt b/Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt index 501b1ce2ced..a57ef7bb922 100644 --- a/Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt +++ b/Examples/Tests/ohm_solver_ion_Landau_damping/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 2 # nprocs "inputs_test_2d_ohm_solver_landau_damping_picmi.py --test --dim 2 --temp_ratio 0.1" # inputs - analysis.py # analysis - diags/diag1000100 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000100" # checksum OFF # dependency ) label_warpx_test(test_2d_ohm_solver_landau_damping_picmi slow) diff --git a/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py b/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py index 620331cf13f..bd193260f2f 100755 --- a/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py +++ b/Examples/Tests/ohm_solver_ion_Landau_damping/analysis.py @@ -111,16 +111,3 @@ ax1.set_title(f"Ion Landau damping - {sim.dim}d") plt.tight_layout() plt.savefig(f"diags/ion_Landau_damping_T_ratio_{sim.T_ratio}.png") - -if sim.test: - import os - import sys - - sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - from checksumAPI import evaluate_checksum - - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - ) diff --git a/Examples/Tests/ohm_solver_ion_Landau_damping/analysis_default_regression.py b/Examples/Tests/ohm_solver_ion_Landau_damping/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/ohm_solver_ion_Landau_damping/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt b/Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt index 81c6b0d41fd..288f8e32e53 100644 --- a/Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt +++ b/Examples/Tests/ohm_solver_ion_beam_instability/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 1 # dims 2 # nprocs "inputs_test_1d_ohm_solver_ion_beam_picmi.py --test --dim 1 --resonant" # inputs - analysis.py # analysis - diags/diag1002500 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1002500" # checksum OFF # dependency ) label_warpx_test(test_1d_ohm_solver_ion_beam_picmi slow) diff --git a/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py b/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py index 3b0a18f29d5..978b8b9a731 100755 --- a/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py +++ b/Examples/Tests/ohm_solver_ion_beam_instability/analysis.py @@ -230,16 +230,3 @@ assert np.isclose(m4_rms_error, 1.515, atol=0.01) assert np.isclose(m5_rms_error, 0.718, atol=0.01) assert np.isclose(m6_rms_error, 0.357, atol=0.01) - - # checksum check - import os - import sys - - sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - from checksumAPI import evaluate_checksum - - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - ) diff --git a/Examples/Tests/ohm_solver_ion_beam_instability/analysis_default_regression.py b/Examples/Tests/ohm_solver_ion_beam_instability/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/ohm_solver_ion_beam_instability/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/CMakeLists.txt b/Examples/Tests/ohm_solver_magnetic_reconnection/CMakeLists.txt index cef47a7d95e..02b4f5e3cb9 100644 --- a/Examples/Tests/ohm_solver_magnetic_reconnection/CMakeLists.txt +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 2 # dims 2 # nprocs "inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py --test" # inputs - analysis.py # analysis - diags/diag1000020 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum OFF # dependency ) diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py b/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py index e7b41d4fbb4..84ab7140fd4 100755 --- a/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/analysis.py @@ -184,16 +184,3 @@ def animate(i): writervideo = FFMpegWriter(fps=14) anim.save("diags/mag_reconnection.mp4", writer=writervideo) - -if sim.test: - import os - import sys - - sys.path.insert(1, "../../../../warpx/Regression/Checksum/") - from checksumAPI import evaluate_checksum - - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - ) diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/analysis_default_regression.py b/Examples/Tests/ohm_solver_magnetic_reconnection/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt b/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt index 95a8d23687e..94fe240263c 100644 --- a/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt +++ b/Examples/Tests/open_bc_poisson_solver/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_open_bc_poisson_solver # inputs - analysis.py # analysis - diags/diag1000001 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000001 --rtol 1e-2" # checksum OFF # dependency ) endif() @@ -19,8 +19,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_open_bc_poisson_solver_sliced # inputs - analysis.py # analysis - diags/diag1000001 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1000001 --rtol 1e-2" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/open_bc_poisson_solver/analysis.py b/Examples/Tests/open_bc_poisson_solver/analysis.py index 25b55503cff..fda9da96cf6 100755 --- a/Examples/Tests/open_bc_poisson_solver/analysis.py +++ b/Examples/Tests/open_bc_poisson_solver/analysis.py @@ -1,16 +1,12 @@ #!/usr/bin/env python3 import os -import sys import numpy as np from openpmd_viewer import OpenPMDTimeSeries from scipy.constants import epsilon_0, pi from scipy.special import erf -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - sigmaz = 300e-6 sigmax = 516e-9 sigmay = 7.7e-9 @@ -60,10 +56,3 @@ def evaluate_E(x, y, z): assert np.allclose(Ex_warpx, Ex_theory, rtol=0.032, atol=0) assert np.allclose(Ey_warpx, Ey_theory, rtol=0.029, atol=0) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - rtol=1e-2, -) diff --git a/Examples/Tests/open_bc_poisson_solver/analysis_default_regression.py b/Examples/Tests/open_bc_poisson_solver/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/open_bc_poisson_solver/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/particle_boundary_interaction/CMakeLists.txt b/Examples/Tests/particle_boundary_interaction/CMakeLists.txt index 5bbb34c0d95..56739cf636b 100644 --- a/Examples/Tests/particle_boundary_interaction/CMakeLists.txt +++ b/Examples/Tests/particle_boundary_interaction/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_EB) RZ # dims 2 # nprocs inputs_test_rz_particle_boundary_interaction_picmi.py # inputs - analysis.py # analysis - diags/diag1/ # output + "analysis.py diags/diag1/" # analysis + "analysis_default_regression.py --path diags/diag1/" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/particle_boundary_interaction/analysis.py b/Examples/Tests/particle_boundary_interaction/analysis.py index d06200157d2..062569d5553 100755 --- a/Examples/Tests/particle_boundary_interaction/analysis.py +++ b/Examples/Tests/particle_boundary_interaction/analysis.py @@ -8,7 +8,6 @@ An input file inputs_test_rz_particle_boundary_interaction_picmi.py is used. """ -import os import sys import numpy as np @@ -16,8 +15,6 @@ from openpmd_viewer import OpenPMDTimeSeries yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum # Open plotfile specified in command line filename = sys.argv[1] @@ -49,10 +46,3 @@ assert ( (diff_x < tolerance) and (y[0] < 1e-8) and (diff_z < tolerance) ), "Test particle_boundary_interaction did not pass" - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - output_format="openpmd", -) diff --git a/Examples/Tests/particle_boundary_interaction/analysis_default_regression.py b/Examples/Tests/particle_boundary_interaction/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/particle_boundary_interaction/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/particle_boundary_process/CMakeLists.txt b/Examples/Tests/particle_boundary_process/CMakeLists.txt index 499cf445da5..d99121afea0 100644 --- a/Examples/Tests/particle_boundary_process/CMakeLists.txt +++ b/Examples/Tests/particle_boundary_process/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 1 # nprocs inputs_test_2d_particle_reflection_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) @@ -17,8 +17,8 @@ if(WarpX_EB) 3 # dims 2 # nprocs inputs_test_3d_particle_absorption # inputs - analysis_absorption.py # analysis - diags/diag1000060 # output + "analysis_absorption.py diags/diag1000060" # analysis + "analysis_default_regression.py --path diags/diag1000060" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/particle_boundary_process/analysis_absorption.py b/Examples/Tests/particle_boundary_process/analysis_absorption.py index 79e8d0e4bc6..498a456d871 100755 --- a/Examples/Tests/particle_boundary_process/analysis_absorption.py +++ b/Examples/Tests/particle_boundary_process/analysis_absorption.py @@ -6,14 +6,10 @@ # absence of the cube, none of the particles would have had time to exit # the problem domain yet. -import os import sys import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # all particles are still there ds40 = yt.load("diags/diag1000040") np40 = ds40.index.particle_headers["electrons"].num_particles @@ -24,9 +20,3 @@ ds60 = yt.load(filename) np60 = ds60.index.particle_headers["electrons"].num_particles assert np60 == 0 - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/particle_boundary_scrape/CMakeLists.txt b/Examples/Tests/particle_boundary_scrape/CMakeLists.txt index 9b303afcc0f..5d2c52f30e0 100644 --- a/Examples/Tests/particle_boundary_scrape/CMakeLists.txt +++ b/Examples/Tests/particle_boundary_scrape/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_EB) 3 # dims 2 # nprocs inputs_test_3d_particle_scrape # inputs - analysis_scrape.py # analysis - diags/diag1000060 # output + "analysis_scrape.py diags/diag1000060" # analysis + "analysis_default_regression.py --path diags/diag1000060" # checksum OFF # dependency ) endif() @@ -19,8 +19,8 @@ if(WarpX_EB) 3 # dims 2 # nprocs inputs_test_3d_particle_scrape_picmi.py # inputs - analysis_scrape.py # analysis - diags/diag1000060 # output + "analysis_scrape.py diags/diag1000060" # analysis + "analysis_default_regression.py --path diags/diag1000060" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/particle_boundary_scrape/analysis_default_regression.py b/Examples/Tests/particle_boundary_scrape/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/particle_boundary_scrape/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/particle_data_python/CMakeLists.txt b/Examples/Tests/particle_data_python/CMakeLists.txt index e58fe72670a..6bae89fd41b 100644 --- a/Examples/Tests/particle_data_python/CMakeLists.txt +++ b/Examples/Tests/particle_data_python/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_particle_attr_access_picmi.py # inputs - analysis.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 2 # dims 2 # nprocs "inputs_test_2d_particle_attr_access_picmi.py --unique" # inputs - analysis.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) @@ -26,7 +26,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_prev_positions_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) diff --git a/Examples/Tests/particle_data_python/analysis.py b/Examples/Tests/particle_data_python/analysis.py deleted file mode 100755 index 0b0cce3295b..00000000000 --- a/Examples/Tests/particle_data_python/analysis.py +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2021 Modern Electron -# -# License: BSD-3-Clause-LBNL - -# This script just checks that the PICMI file executed successfully. -# If it did there will be a plotfile for the final step. - -import sys - -step = int(sys.argv[1][-5:]) - -assert step == 10 diff --git a/Examples/Tests/particle_fields_diags/CMakeLists.txt b/Examples/Tests/particle_fields_diags/CMakeLists.txt index a83818b6966..414d303629a 100644 --- a/Examples/Tests/particle_fields_diags/CMakeLists.txt +++ b/Examples/Tests/particle_fields_diags/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_particle_fields_diags # inputs - analysis_particle_diags.py # analysis - diags/diag1000200 # output + "analysis_particle_diags.py diags/diag1000200" # analysis + "analysis_default_regression.py --path diags/diag1000200" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( # test_3d_particle_fields_diags_single_precision # name # 3 # dims # 2 # nprocs -## inputs_test_3d_particle_fields_diags # inputs -# analysis_particle_diags_single.py # analysis -# diags/diag1000200 # output +# inputs_test_3d_particle_fields_diags # inputs +# "analysis_particle_diags_single.py diags/diag1000200" # analysis +# "analysis_default_regression.py --path diags/diag1000200 --rtol 1e-3" # checksum # OFF # dependency #) diff --git a/Examples/Tests/particle_fields_diags/analysis_default_regression.py b/Examples/Tests/particle_fields_diags/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/particle_fields_diags/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py b/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py index 01a7436a787..f59e0aed8bf 100755 --- a/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py +++ b/Examples/Tests/particle_fields_diags/analysis_particle_diags_impl.py @@ -11,7 +11,6 @@ # Various particle and field quantities are written to file using the reduced diagnostics # and compared with the corresponding quantities computed from the data in the plotfiles. -import os import sys import numpy as np @@ -19,9 +18,6 @@ import yt from scipy.constants import c, e, m_e, m_p -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - def do_analysis(single_precision=False): fn = sys.argv[1] @@ -228,8 +224,6 @@ def do_analysis(single_precision=False): error_plt = dict() error_opmd = dict() tolerance = 5e-3 if single_precision else 1e-12 - # if single precision, increase tolerance from default value - check_tolerance = 5e-3 if single_precision else 1e-9 for k in values_yt.keys(): # check that the zeros line up, since we'll be ignoring them in the error calculation @@ -247,10 +241,3 @@ def do_analysis(single_precision=False): ) assert error_opmd[k] < tolerance print(k, "relative error openPMD = ", error_opmd[k]) - - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - rtol=check_tolerance, - ) diff --git a/Examples/Tests/particle_pusher/CMakeLists.txt b/Examples/Tests/particle_pusher/CMakeLists.txt index 3d8f1496587..cd414316b67 100644 --- a/Examples/Tests/particle_pusher/CMakeLists.txt +++ b/Examples/Tests/particle_pusher/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 3 # dims 1 # nprocs inputs_test_3d_particle_pusher # inputs - analysis.py # analysis - diags/diag1010000 # output + "analysis.py diags/diag1010000" # analysis + "analysis_default_regression.py --path diags/diag1010000" # checksum OFF # dependency ) diff --git a/Examples/Tests/particle_pusher/analysis.py b/Examples/Tests/particle_pusher/analysis.py index 9ed92507d4d..ae7b2054c28 100755 --- a/Examples/Tests/particle_pusher/analysis.py +++ b/Examples/Tests/particle_pusher/analysis.py @@ -22,14 +22,10 @@ # tolerance: 0.001 # Possible running time: ~ 4.0 s -import os import sys import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - tolerance = 0.001 filename = sys.argv[1] @@ -40,9 +36,3 @@ print("error = ", abs(x)) print("tolerance = ", tolerance) assert abs(x) < tolerance - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/particle_pusher/analysis_default_regression.py b/Examples/Tests/particle_pusher/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/particle_pusher/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/particle_thermal_boundary/CMakeLists.txt b/Examples/Tests/particle_thermal_boundary/CMakeLists.txt index eeae6660e02..2f0cbba4ee6 100644 --- a/Examples/Tests/particle_thermal_boundary/CMakeLists.txt +++ b/Examples/Tests/particle_thermal_boundary/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_particle_thermal_boundary # inputs - analysis.py # analysis - diags/diag1002000 # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1002000" # checksum OFF # dependency ) diff --git a/Examples/Tests/particle_thermal_boundary/analysis.py b/Examples/Tests/particle_thermal_boundary/analysis.py index 621bf2032be..81f8a73b474 100755 --- a/Examples/Tests/particle_thermal_boundary/analysis.py +++ b/Examples/Tests/particle_thermal_boundary/analysis.py @@ -14,14 +14,8 @@ beyond 2% in the time that it takes all particles to cross the domain boundary """ -import os -import sys - import numpy as np -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - FE_rdiag = "./diags/reducedfiles/EF.txt" init_Fenergy = np.loadtxt(FE_rdiag)[1, 2] final_Fenergy = np.loadtxt(FE_rdiag)[-1, 2] @@ -32,9 +26,3 @@ init_Penergy = np.loadtxt(PE_rdiag)[0, 2] final_Penergy = np.loadtxt(PE_rdiag)[-1, 2] assert abs(final_Penergy - init_Penergy) / init_Penergy < 0.02 - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/particle_thermal_boundary/analysis_default_regression.py b/Examples/Tests/particle_thermal_boundary/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/particle_thermal_boundary/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/particles_in_pml/CMakeLists.txt b/Examples/Tests/particles_in_pml/CMakeLists.txt index fb539461ec2..4f150c6d4e4 100644 --- a/Examples/Tests/particles_in_pml/CMakeLists.txt +++ b/Examples/Tests/particles_in_pml/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_particles_in_pml # inputs - analysis_particles_in_pml.py # analysis - diags/diag1000180 # output + "analysis_particles_in_pml.py diags/diag1000180" # analysis + "analysis_default_regression.py --path diags/diag1000180" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_particles_in_pml_mr # inputs - analysis_particles_in_pml.py # analysis - diags/diag1000300 # output + "analysis_particles_in_pml.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_particles_in_pml # inputs - analysis_particles_in_pml.py # analysis - diags/diag1000120 # output + "analysis_particles_in_pml.py diags/diag1000120" # analysis + "analysis_default_regression.py --path diags/diag1000120" # checksum OFF # dependency ) @@ -36,8 +36,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_particles_in_pml_mr # inputs - analysis_particles_in_pml.py # analysis - diags/diag1000200 # output + "analysis_particles_in_pml.py diags/diag1000200" # analysis + "analysis_default_regression.py --path diags/diag1000200" # checksum OFF # dependency ) label_warpx_test(test_3d_particles_in_pml_mr slow) diff --git a/Examples/Tests/particles_in_pml/analysis_default_regression.py b/Examples/Tests/particles_in_pml/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/particles_in_pml/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py b/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py index 8e7d95eda08..63ef4c7d2ff 100755 --- a/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py +++ b/Examples/Tests/particles_in_pml/analysis_particles_in_pml.py @@ -18,14 +18,11 @@ charge, with associated fields, behind them. """ -import os import sys import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum # Open plotfile specified in command line filename = sys.argv[1] @@ -66,9 +63,3 @@ print("tolerance_abs: " + str(tolerance_abs)) assert max_Efield < tolerance_abs - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/pass_mpi_communicator/CMakeLists.txt b/Examples/Tests/pass_mpi_communicator/CMakeLists.txt index ac60636b931..4f5a8b4965c 100644 --- a/Examples/Tests/pass_mpi_communicator/CMakeLists.txt +++ b/Examples/Tests/pass_mpi_communicator/CMakeLists.txt @@ -10,7 +10,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_pass_mpi_comm_picmi.py # inputs - OFF #analysis.py # analysis - OFF # output + OFF # analysis + OFF # checksum OFF # dependency ) diff --git a/Examples/Tests/pass_mpi_communicator/analysis_default_regression.py b/Examples/Tests/pass_mpi_communicator/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/pass_mpi_communicator/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/pec/CMakeLists.txt b/Examples/Tests/pec/CMakeLists.txt index e0bab40d058..f331249ded0 100644 --- a/Examples/Tests/pec/CMakeLists.txt +++ b/Examples/Tests/pec/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_pec_field # inputs - analysis_pec.py # analysis - diags/diag1000125 # output + "analysis_pec.py diags/diag1000125" # analysis + "analysis_default_regression.py --path diags/diag1000125" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_pec_field_mr # inputs - analysis_pec_mr.py # analysis - diags/diag1000125 # output + "analysis_pec_mr.py diags/diag1000125" # analysis + "analysis_default_regression.py --path diags/diag1000125" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_pec_particle # inputs - analysis_default_regression.py # analysis - diags/diag1000020 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum OFF # dependency ) @@ -36,7 +36,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_pec_field_insulator # inputs - analysis_default_regression.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) diff --git a/Examples/Tests/pec/analysis_pec.py b/Examples/Tests/pec/analysis_pec.py index 29d9a4e26f4..251a17e017c 100755 --- a/Examples/Tests/pec/analysis_pec.py +++ b/Examples/Tests/pec/analysis_pec.py @@ -11,7 +11,6 @@ # The electric field (Ey) is a standing wave due to the PEC boundary condition, # and as a result, the minimum and maximum value after reflection would be two times the value at initialization due to constructive interference. # Additionally, the value of Ey at the boundary must be equal to zero. -import os import sys import matplotlib @@ -24,9 +23,6 @@ import numpy as np -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # this will be the name of the plot file fn = sys.argv[1] @@ -89,9 +85,3 @@ assert max_Ey_error_rel < tolerance_rel assert min_Ey_error_rel < tolerance_rel - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/pec/analysis_pec_mr.py b/Examples/Tests/pec/analysis_pec_mr.py index 069a1d01afa..a99c4b0bafb 100755 --- a/Examples/Tests/pec/analysis_pec_mr.py +++ b/Examples/Tests/pec/analysis_pec_mr.py @@ -11,7 +11,6 @@ # The electric field (Ey) is a standing wave due to the PEC boundary condition, # and as a result, the minimum and maximum value after reflection would be two times the value at initialization due to constructive interference. # Additionally, the value of Ey at the boundary must be equal to zero. -import os import sys import matplotlib @@ -24,9 +23,6 @@ import numpy as np -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # this will be the name of the plot file fn = sys.argv[1] @@ -89,9 +85,3 @@ assert max_Ey_error_rel < tolerance_rel assert min_Ey_error_rel < tolerance_rel - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/photon_pusher/CMakeLists.txt b/Examples/Tests/photon_pusher/CMakeLists.txt index 7926d8faeaf..78bc1d0b416 100644 --- a/Examples/Tests/photon_pusher/CMakeLists.txt +++ b/Examples/Tests/photon_pusher/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_photon_pusher # inputs - analysis.py # analysis - diags/diag1000050 # output + "analysis.py diags/diag1000050" # analysis + "analysis_default_regression.py --path diags/diag1000050" # checksum OFF # dependency ) diff --git a/Examples/Tests/photon_pusher/analysis.py b/Examples/Tests/photon_pusher/analysis.py index 2a77e325bc5..e2ccfc42656 100755 --- a/Examples/Tests/photon_pusher/analysis.py +++ b/Examples/Tests/photon_pusher/analysis.py @@ -7,15 +7,11 @@ # # License: BSD-3-Clause-LBNL -import os import sys import numpy as np import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # This script checks if photons initialized with different momenta and # different initial directions propagate along straight lines at the speed of # light. The plotfile to be analyzed is passed as a command line argument. @@ -153,12 +149,6 @@ def check(): assert (max(disc_pos) <= tol_pos) and (max(disc_mom) <= tol_mom) - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - ) - # This function generates the input file to test the photon pusher. def generate(): diff --git a/Examples/Tests/photon_pusher/analysis_default_regression.py b/Examples/Tests/photon_pusher/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/photon_pusher/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/plasma_lens/CMakeLists.txt b/Examples/Tests/plasma_lens/CMakeLists.txt index bc13ae433bc..f6d6ea6daeb 100644 --- a/Examples/Tests/plasma_lens/CMakeLists.txt +++ b/Examples/Tests/plasma_lens/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_plasma_lens # inputs - analysis.py # analysis - diags/diag1000084 # output + "analysis.py diags/diag1000084" # analysis + "analysis_default_regression.py --path diags/diag1000084" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_plasma_lens_boosted # inputs - analysis.py # analysis - diags/diag1000084 # output + "analysis.py diags/diag1000084" # analysis + "analysis_default_regression.py --path diags/diag1000084" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_plasma_lens_hard_edged # inputs - analysis.py # analysis - diags/diag1000084 # output + "analysis.py diags/diag1000084" # analysis + "analysis_default_regression.py --path diags/diag1000084" # checksum OFF # dependency ) @@ -36,8 +36,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_plasma_lens_picmi.py # inputs - analysis.py # analysis - diags/diag1000084 # output + "analysis.py diags/diag1000084" # analysis + "analysis_default_regression.py --path diags/diag1000084" # checksum OFF # dependency ) @@ -46,7 +46,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_plasma_lens_short # inputs - analysis.py # analysis - diags/diag1000084 # output + "analysis.py diags/diag1000084" # analysis + "analysis_default_regression.py --path diags/diag1000084" # checksum OFF # dependency ) diff --git a/Examples/Tests/plasma_lens/analysis.py b/Examples/Tests/plasma_lens/analysis.py index 46036573940..44671eea791 100755 --- a/Examples/Tests/plasma_lens/analysis.py +++ b/Examples/Tests/plasma_lens/analysis.py @@ -15,8 +15,6 @@ The motion is slow enough that relativistic effects are ignored. """ -import os -import re import sys import numpy as np @@ -24,8 +22,6 @@ from scipy.constants import c, e, m_e yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum filename = sys.argv[1] ds = yt.load(filename) @@ -194,11 +190,3 @@ def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): assert abs(np.abs((uy - uy_sim) / uy)) < velocity_tolerance, Exception( "error in y particle velocity" ) - -# compare checksums -test_name = os.path.split(os.getcwd())[1] -test_name = re.sub("_picmi", "", test_name) # same checksums for PICMI test -evaluate_checksum( - test_name=test_name, - output_file=sys.argv[1], -) diff --git a/Examples/Tests/plasma_lens/analysis_default_regression.py b/Examples/Tests/plasma_lens/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/plasma_lens/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/pml/CMakeLists.txt b/Examples/Tests/pml/CMakeLists.txt index c63412dc763..8ba70f77aef 100644 --- a/Examples/Tests/pml/CMakeLists.txt +++ b/Examples/Tests/pml/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_pml_x_ckc # inputs - analysis_pml_ckc.py # analysis - diags/diag1000300 # output + "analysis_pml_ckc.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300" # checksum OFF # dependency ) @@ -17,8 +17,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_pml_x_galilean # inputs - analysis_pml_psatd.py # analysis - diags/diag1000300 # output + "analysis_pml_psatd.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300" # checksum OFF # dependency ) endif() @@ -29,8 +29,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_pml_x_psatd # inputs - analysis_pml_psatd.py # analysis - diags/diag1000300 # output + "analysis_pml_psatd.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300" # checksum OFF # dependency ) endif() @@ -41,8 +41,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_pml_x_psatd_restart # inputs - analysis_default_restart.py # analysis - diags/diag1000300 # output + "analysis_default_restart.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300" # checksum test_2d_pml_x_psatd # dependency ) endif() @@ -52,8 +52,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_pml_x_yee # inputs - analysis_pml_yee.py # analysis - diags/diag1000300 # output + "analysis_pml_yee.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300" # checksum OFF # dependency ) @@ -62,8 +62,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_pml_x_yee_restart # inputs - analysis_default_restart.py # analysis - diags/diag1000300 # output + "analysis_default_restart.py diags/diag1000300" # analysis + "analysis_default_regression.py --path diags/diag1000300" # checksum test_2d_pml_x_yee # dependency ) @@ -73,8 +73,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_pml_psatd_dive_divb_cleaning # inputs - analysis_default_regression.py # analysis - diags/diag1000100 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000100" # checksum OFF # dependency ) endif() @@ -85,8 +85,8 @@ if(WarpX_FFT) RZ # dims 2 # nprocs inputs_test_rz_pml_psatd # inputs - analysis_pml_psatd_rz.py # analysis - diags/diag1000500 # output + "analysis_pml_psatd_rz.py diags/diag1000500" # analysis + "analysis_default_regression.py --path diags/diag1000500" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/pml/analysis_pml_ckc.py b/Examples/Tests/pml/analysis_pml_ckc.py index f6637e2d47b..b50cbb867e6 100755 --- a/Examples/Tests/pml/analysis_pml_ckc.py +++ b/Examples/Tests/pml/analysis_pml_ckc.py @@ -8,7 +8,6 @@ # License: BSD-3-Clause-LBNL -import os import sys import numpy as np @@ -16,8 +15,6 @@ import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -56,9 +53,3 @@ print("tolerance_rel: " + str(tolerance_rel)) assert error_rel < tolerance_rel - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/pml/analysis_pml_psatd.py b/Examples/Tests/pml/analysis_pml_psatd.py index 4f44c0f3432..156f58362ce 100755 --- a/Examples/Tests/pml/analysis_pml_psatd.py +++ b/Examples/Tests/pml/analysis_pml_psatd.py @@ -16,8 +16,6 @@ import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -74,9 +72,3 @@ print("reflectivity_max = " + str(reflectivity_max)) assert reflectivity < reflectivity_max - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/pml/analysis_pml_psatd_rz.py b/Examples/Tests/pml/analysis_pml_psatd_rz.py index fb662e36d40..f06b7a52c6c 100755 --- a/Examples/Tests/pml/analysis_pml_psatd_rz.py +++ b/Examples/Tests/pml/analysis_pml_psatd_rz.py @@ -16,15 +16,12 @@ the pulse will remain with in the domain. """ -import os import sys import numpy as np import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum # Open plotfile specified in command line filename = sys.argv[1] @@ -54,9 +51,3 @@ tolerance_abs = 2.0 print("tolerance_abs: " + str(tolerance_abs)) assert max_Efield < tolerance_abs - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/pml/analysis_pml_yee.py b/Examples/Tests/pml/analysis_pml_yee.py index 5f6d21e579c..b6aad4ccb7e 100755 --- a/Examples/Tests/pml/analysis_pml_yee.py +++ b/Examples/Tests/pml/analysis_pml_yee.py @@ -8,7 +8,6 @@ # License: BSD-3-Clause-LBNL -import os import sys import numpy as np @@ -16,8 +15,6 @@ import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -56,9 +53,3 @@ print("tolerance_rel: " + str(tolerance_rel)) assert error_rel < tolerance_rel - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/point_of_contact_eb/CMakeLists.txt b/Examples/Tests/point_of_contact_eb/CMakeLists.txt index b8d7ba1131f..700eba6f92f 100644 --- a/Examples/Tests/point_of_contact_eb/CMakeLists.txt +++ b/Examples/Tests/point_of_contact_eb/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_EB) 3 # dims 2 # nprocs inputs_test_3d_point_of_contact_eb # inputs - analysis.py # analysis - diags/diag1/ # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1/" # checksum OFF # dependency ) endif() @@ -19,8 +19,8 @@ if(WarpX_EB) RZ # dims 2 # nprocs inputs_test_rz_point_of_contact_eb # inputs - analysis.py # analysis - diags/diag1/ # output + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1/" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/point_of_contact_eb/analysis.py b/Examples/Tests/point_of_contact_eb/analysis.py index 1c9dbc85f4c..55a65f2cee3 100755 --- a/Examples/Tests/point_of_contact_eb/analysis.py +++ b/Examples/Tests/point_of_contact_eb/analysis.py @@ -8,16 +8,11 @@ An input file inputs_test_3d_point_of_contact_eb is used. """ -import os -import sys - import numpy as np import yt from openpmd_viewer import OpenPMDTimeSeries yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum ts_scraping = OpenPMDTimeSeries("./diags/diag2/particles_at_eb/") @@ -92,10 +87,3 @@ and (diff_ny < tolerance_n) and (np.abs(nz) < 1e-8) ), "Test point_of_contact did not pass" - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - output_format="openpmd", -) diff --git a/Examples/Tests/point_of_contact_eb/analysis_default_regression.py b/Examples/Tests/point_of_contact_eb/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/point_of_contact_eb/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/projection_divb_cleaner/CMakeLists.txt b/Examples/Tests/projection_divb_cleaner/CMakeLists.txt index 307ae7656c5..40b84bd0397 100644 --- a/Examples/Tests/projection_divb_cleaner/CMakeLists.txt +++ b/Examples/Tests/projection_divb_cleaner/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 3 # dims 1 # nprocs inputs_test_3d_projection_divb_cleaner_callback_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000001 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 3 # dims 1 # nprocs inputs_test_3d_projection_divb_cleaner_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000001 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) @@ -26,7 +26,7 @@ add_warpx_test( RZ # dims 1 # nprocs inputs_test_rz_projection_divb_cleaner # inputs - analysis.py # analysis - diags/diag1000001 # output + "analysis.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) diff --git a/Examples/Tests/projection_divb_cleaner/analysis.py b/Examples/Tests/projection_divb_cleaner/analysis.py index 2324c370032..5db145eba7e 100755 --- a/Examples/Tests/projection_divb_cleaner/analysis.py +++ b/Examples/Tests/projection_divb_cleaner/analysis.py @@ -16,15 +16,11 @@ # tolerance: 1.0e-8 # Possible running time: 0.327827743 s -import os import sys import numpy as np import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - tolerance = 4e-3 filename = sys.argv[1] @@ -72,9 +68,3 @@ print("error = ", error) print("tolerance = ", tolerance) assert error < tolerance - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/python_wrappers/CMakeLists.txt b/Examples/Tests/python_wrappers/CMakeLists.txt index 0045a181606..060cdd7c183 100644 --- a/Examples/Tests/python_wrappers/CMakeLists.txt +++ b/Examples/Tests/python_wrappers/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_python_wrappers_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000100 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000100" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/qed/CMakeLists.txt b/Examples/Tests/qed/CMakeLists.txt index 5dd786f26a1..d38c2e4fc69 100644 --- a/Examples/Tests/qed/CMakeLists.txt +++ b/Examples/Tests/qed/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_qed_breit_wheeler # inputs - analysis_breit_wheeler_yt.py # analysis - diags/diag1000002 # output + "analysis_breit_wheeler_yt.py diags/diag1000002" # analysis + "analysis_default_regression.py --path diags/diag1000002" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_qed_breit_wheeler_opmd # inputs - analysis_breit_wheeler_opmd.py # analysis - diags/diag1/ # output + "analysis_breit_wheeler_opmd.py diags/diag1/" # analysis + "analysis_default_regression.py --path diags/diag1/" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_qed_quantum_sync # inputs - analysis_quantum_sync.py # analysis - diags/diag1000002 # output + "analysis_quantum_sync.py diags/diag1000002" # analysis + "analysis_default_regression.py --path diags/diag1000002" # checksum OFF # dependency ) @@ -36,8 +36,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_qed_breit_wheeler # inputs - analysis_breit_wheeler_yt.py # analysis - diags/diag1000002 # output + "analysis_breit_wheeler_yt.py diags/diag1000002" # analysis + "analysis_default_regression.py --path diags/diag1000002" # checksum OFF # dependency ) @@ -46,8 +46,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_qed_breit_wheeler_opmd # inputs - analysis_breit_wheeler_opmd.py # analysis - diags/diag1/ # output + "analysis_breit_wheeler_opmd.py diags/diag1/" # analysis + "analysis_default_regression.py --path diags/diag1/" # checksum OFF # dependency ) @@ -56,8 +56,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_qed_quantum_sync # inputs - analysis_quantum_sync.py # analysis - diags/diag1000002 # output + "analysis_quantum_sync.py diags/diag1000002" # analysis + "analysis_default_regression.py --path diags/diag1000002" # checksum OFF # dependency ) @@ -66,8 +66,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_qed_schwinger_1 # inputs - analysis_schwinger.py # analysis - diags/diag1000001 # output + "analysis_schwinger.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) @@ -76,8 +76,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_qed_schwinger_2 # inputs - analysis_schwinger.py # analysis - diags/diag1000001 # output + "analysis_schwinger.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) @@ -86,8 +86,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_qed_schwinger_3 # inputs - analysis_schwinger.py # analysis - diags/diag1000001 # output + "analysis_schwinger.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) @@ -96,7 +96,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_qed_schwinger_4 # inputs - analysis_schwinger.py # analysis - diags/diag1000001 # output + "analysis_schwinger.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) diff --git a/Examples/Tests/qed/analysis_breit_wheeler_opmd.py b/Examples/Tests/qed/analysis_breit_wheeler_opmd.py index b88f00a85dc..1803305f008 100755 --- a/Examples/Tests/qed/analysis_breit_wheeler_opmd.py +++ b/Examples/Tests/qed/analysis_breit_wheeler_opmd.py @@ -12,10 +12,6 @@ import analysis_breit_wheeler_core as ac import openpmd_api as io -# sys.path.insert(1, '../../../../warpx/Regression/Checksum/') -# from checksumAPI import evaluate_checksum - - # This script is a frontend for the analysis routines # in analysis_breit_wheeler_core.py (please refer to this file for # a full description). It reads output files in openPMD @@ -72,13 +68,6 @@ def main(): ac.check(dt, particle_data) - # compare checksums - # evaluate_checksum( - # test_name=os.path.split(os.getcwd())[1], - # output_file=sys.argv[1], - # output_format="openpmd", - # ) - if __name__ == "__main__": main() diff --git a/Examples/Tests/qed/analysis_breit_wheeler_yt.py b/Examples/Tests/qed/analysis_breit_wheeler_yt.py index 48c45c990b0..bd8f4454723 100755 --- a/Examples/Tests/qed/analysis_breit_wheeler_yt.py +++ b/Examples/Tests/qed/analysis_breit_wheeler_yt.py @@ -7,14 +7,10 @@ # -*- coding: utf-8 -*- -import os import sys -import yt - -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") import analysis_breit_wheeler_core as ac -from checksumAPI import evaluate_checksum +import yt # This script is a frontend for the analysis routines # in analysis_breit_wheeler_core.py (please refer to this file for @@ -58,12 +54,6 @@ def main(): ac.check(dt, particle_data) - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - ) - if __name__ == "__main__": main() diff --git a/Examples/Tests/qed/analysis_default_regression.py b/Examples/Tests/qed/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/qed/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/qed/analysis_quantum_sync.py b/Examples/Tests/qed/analysis_quantum_sync.py index 531a0eac195..e4ede19260c 100755 --- a/Examples/Tests/qed/analysis_quantum_sync.py +++ b/Examples/Tests/qed/analysis_quantum_sync.py @@ -8,19 +8,15 @@ # -*- coding: utf-8 -*- -import os import sys +import matplotlib.pyplot as plt import numpy as np import scipy.integrate as integ import scipy.special as spe import scipy.stats as st import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -import matplotlib.pyplot as plt -from checksumAPI import evaluate_checksum - # This script performs detailed checks of the Quantum Synchrotron photon emission process. # Two electron populations and two positron populations are initialized with different momenta in different # directions in a background EM field (with non-zero components along each direction). @@ -348,12 +344,6 @@ def check(): print("*************\n") - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - ) - def main(): check() diff --git a/Examples/Tests/qed/analysis_schwinger.py b/Examples/Tests/qed/analysis_schwinger.py index 4ad21e3d518..5d1c5485ba3 100755 --- a/Examples/Tests/qed/analysis_schwinger.py +++ b/Examples/Tests/qed/analysis_schwinger.py @@ -18,9 +18,6 @@ import numpy as np import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # define some parameters c = 299792458.0 @@ -160,9 +157,3 @@ def do_analysis(Ex, Ey, Ez, Bx, By, Bz): do_analysis(Ex_test, Ey_test, Ez_test, Bx_test, By_test, Bz_test) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/radiation_reaction/CMakeLists.txt b/Examples/Tests/radiation_reaction/CMakeLists.txt index 8696cf0f9b7..3286f4efd93 100644 --- a/Examples/Tests/radiation_reaction/CMakeLists.txt +++ b/Examples/Tests/radiation_reaction/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_radiation_reaction # inputs - analysis.py # analysis - diags/diag1000064 # output + "analysis.py diags/diag1000064" # analysis + "analysis_default_regression.py --path diags/diag1000064" # checksum OFF # dependency ) diff --git a/Examples/Tests/radiation_reaction/analysis.py b/Examples/Tests/radiation_reaction/analysis.py index 74155a89cb3..0d4fcf12e8e 100755 --- a/Examples/Tests/radiation_reaction/analysis.py +++ b/Examples/Tests/radiation_reaction/analysis.py @@ -30,15 +30,11 @@ # 3) H. Spohn, Dynamics of charged particles and their radiation field # (Cambridge University Press, Cambridge, 2004) -import os import sys import numpy as np import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Input filename inputname = "inputs" # ________________________________________ @@ -163,12 +159,6 @@ def check(): assert error_rel < tolerance_rel - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - ) - def generate(): with open(inputname, "w") as f: diff --git a/Examples/Tests/radiation_reaction/analysis_default_regression.py b/Examples/Tests/radiation_reaction/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/radiation_reaction/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/reduced_diags/CMakeLists.txt b/Examples/Tests/reduced_diags/CMakeLists.txt index cd4f6392892..743afa79df5 100644 --- a/Examples/Tests/reduced_diags/CMakeLists.txt +++ b/Examples/Tests/reduced_diags/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_reduced_diags # inputs - analysis_reduced_diags.py # analysis - diags/diag1000200 # output + "analysis_reduced_diags.py diags/diag1000200" # analysis + "analysis_default_regression.py --path diags/diag1000200" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_reduced_diags_load_balance_costs_heuristic # inputs - analysis_reduced_diags_load_balance_costs.py # analysis - diags/diag1000003 # output + "analysis_reduced_diags_load_balance_costs.py diags/diag1000003" # analysis + "analysis_default_regression.py --path diags/diag1000003" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_reduced_diags_load_balance_costs_timers # inputs - analysis_reduced_diags_load_balance_costs.py # analysis - diags/diag1000003 # output + "analysis_reduced_diags_load_balance_costs.py diags/diag1000003" # analysis + "analysis_default_regression.py --path diags/diag1000003" # checksum OFF # dependency ) @@ -36,8 +36,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_reduced_diags_load_balance_costs_timers_picmi.py # inputs - analysis_reduced_diags_load_balance_costs.py # analysis - diags/diag1000003 # output + "analysis_reduced_diags_load_balance_costs.py diags/diag1000003" # analysis + "analysis_default_regression.py --path diags/diag1000003" # checksum OFF # dependency ) @@ -47,8 +47,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_reduced_diags_load_balance_costs_timers_psatd # inputs - analysis_reduced_diags_load_balance_costs.py # analysis - diags/diag1000003 # output + "analysis_reduced_diags_load_balance_costs.py diags/diag1000003" # analysis + "analysis_default_regression.py --path diags/diag1000003" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/reduced_diags/analysis_default_regression.py b/Examples/Tests/reduced_diags/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/reduced_diags/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py b/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py index 42916d34568..e0c1fe1d1b3 100755 --- a/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py +++ b/Examples/Tests/reduced_diags/analysis_reduced_diags_impl.py @@ -11,7 +11,6 @@ # Various particle and field quantities are written to file using the reduced diagnostics # and compared with the corresponding quantities computed from the data in the plotfiles. -import os import sys import numpy as np @@ -20,9 +19,6 @@ from scipy.constants import epsilon_0 as eps0 from scipy.constants import mu_0 as mu0 -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # gamma threshold to switch between the relativistic expression of # the kinetic energy and its Taylor expansion. gamma_relativistic_threshold = 1.005 @@ -375,10 +371,3 @@ def do_analysis(single_precision=False): tol = field_energy_tolerance if (k == "field energy") else tolerance assert error[k] < tol print() - - # compare checksums - evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - rtol=1e-9, - ) diff --git a/Examples/Tests/reduced_diags/analysis_reduced_diags_load_balance_costs.py b/Examples/Tests/reduced_diags/analysis_reduced_diags_load_balance_costs.py index 49a0018baa5..978b1fcd4ec 100755 --- a/Examples/Tests/reduced_diags/analysis_reduced_diags_load_balance_costs.py +++ b/Examples/Tests/reduced_diags/analysis_reduced_diags_load_balance_costs.py @@ -17,15 +17,10 @@ # Possible running time: ~ 1 s -import os -import re import sys import numpy as np -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Command line argument fn = sys.argv[1] @@ -76,11 +71,3 @@ def get_efficiency(i): # The load balanced case is expected to be more efficient # than non-load balanced case assert efficiency_before < efficiency_after - -# compare checksums -test_name = os.path.split(os.getcwd())[1] -test_name = re.sub("_picmi", "", test_name) # same checksums for PICMI test -evaluate_checksum( - test_name=test_name, - output_file=sys.argv[1], -) diff --git a/Examples/Tests/reduced_diags/analysis_reduced_diags_single.py b/Examples/Tests/reduced_diags/analysis_reduced_diags_single.py deleted file mode 100755 index d900ec673c1..00000000000 --- a/Examples/Tests/reduced_diags/analysis_reduced_diags_single.py +++ /dev/null @@ -1,16 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright 2019-2021 Luca Fedeli, Yinjian Zhao -# -# This file is part of WarpX. -# -# License: BSD-3-Clause-LBNL - -# This script tests the reduced diagnostics. -# The setup is a uniform plasma with electrons, protons and photons. -# Various particle and field quantities are written to file using the reduced diagnostics -# and compared with the corresponding quantities computed from the data in the plotfiles. - -import analysis_reduced_diags_impl as an - -an.do_analysis(single_precision=True) diff --git a/Examples/Tests/relativistic_space_charge_initialization/CMakeLists.txt b/Examples/Tests/relativistic_space_charge_initialization/CMakeLists.txt index d89fb8b31b6..df73a264429 100644 --- a/Examples/Tests/relativistic_space_charge_initialization/CMakeLists.txt +++ b/Examples/Tests/relativistic_space_charge_initialization/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_relativistic_space_charge_initialization # inputs - analysis.py # analysis - diags/diag1000001 # output + "analysis.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001 --skip-particles" # checksum OFF # dependency ) diff --git a/Examples/Tests/relativistic_space_charge_initialization/analysis.py b/Examples/Tests/relativistic_space_charge_initialization/analysis.py index ef0a87dce92..beb2b889ed7 100755 --- a/Examples/Tests/relativistic_space_charge_initialization/analysis.py +++ b/Examples/Tests/relativistic_space_charge_initialization/analysis.py @@ -12,7 +12,6 @@ the expected theoretical field. """ -import os import sys import matplotlib @@ -24,8 +23,6 @@ import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum # Parameters from the Simulation Qtot = -1.0e-20 @@ -94,10 +91,3 @@ def check(E, E_th, label): check(Ex_array, Ex_th, "Ex") - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - do_particles=False, -) diff --git a/Examples/Tests/relativistic_space_charge_initialization/analysis_default_regression.py b/Examples/Tests/relativistic_space_charge_initialization/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/relativistic_space_charge_initialization/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/repelling_particles/CMakeLists.txt b/Examples/Tests/repelling_particles/CMakeLists.txt index 056f670a860..e5b64cb9166 100644 --- a/Examples/Tests/repelling_particles/CMakeLists.txt +++ b/Examples/Tests/repelling_particles/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_repelling_particles # inputs - analysis.py # analysis - diags/diag1000200 # output + "analysis.py diags/diag1000200" # analysis + "analysis_default_regression.py --path diags/diag1000200" # checksum OFF # dependency ) diff --git a/Examples/Tests/repelling_particles/analysis.py b/Examples/Tests/repelling_particles/analysis.py index 74bde7b68ca..5f052361fc7 100755 --- a/Examples/Tests/repelling_particles/analysis.py +++ b/Examples/Tests/repelling_particles/analysis.py @@ -24,7 +24,6 @@ """ import glob -import os import re import sys @@ -35,9 +34,6 @@ yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Check plotfile name specified in command line last_filename = sys.argv[1] filename_radical = re.findall(r"(.*?)\d+/*$", last_filename)[0] @@ -78,9 +74,3 @@ # Check that the results are close to the theory assert np.allclose(beta1[1:], beta_th[1:], atol=0.01) assert np.allclose(-beta2[1:], beta_th[1:], atol=0.01) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/repelling_particles/analysis_default_regression.py b/Examples/Tests/repelling_particles/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/repelling_particles/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/resampling/CMakeLists.txt b/Examples/Tests/resampling/CMakeLists.txt index 46e34858014..b6f8c5baecf 100644 --- a/Examples/Tests/resampling/CMakeLists.txt +++ b/Examples/Tests/resampling/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 1 # dims 2 # nprocs inputs_test_1d_resample_velocity_coincidence_thinning # inputs - analysis_default_regression.py # analysis - diags/diag1000004 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000004" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 1 # dims 2 # nprocs inputs_test_1d_resample_velocity_coincidence_thinning_cartesian # inputs - analysis_default_regression.py # analysis - diags/diag1000004 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000004" # checksum OFF # dependency ) @@ -26,7 +26,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_leveling_thinning # inputs - analysis.py # analysis - diags/diag1000008 # output + "analysis.py diags/diag1000008" # analysis + "analysis_default_regression.py --path diags/diag1000008" # checksum OFF # dependency ) diff --git a/Examples/Tests/resampling/analysis.py b/Examples/Tests/resampling/analysis.py index 40bad24d65e..8fff4a04a9c 100755 --- a/Examples/Tests/resampling/analysis.py +++ b/Examples/Tests/resampling/analysis.py @@ -9,16 +9,12 @@ ## In this test, we check that leveling thinning works as expected on two simple cases. Each case ## corresponds to a different particle species. -import os import sys import numpy as np import yt from scipy.special import erf -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - fn_final = sys.argv[1] fn0 = fn_final[:-4] + "0000" @@ -170,9 +166,3 @@ assert numparts_unaffected == numparts_unaffected_anticipated # Check that particles with weight higher than level weight are unaffected by resampling. assert np.all(w[-numparts_unaffected:] == w0[-numparts_unaffected:]) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/restart/CMakeLists.txt b/Examples/Tests/restart/CMakeLists.txt index df5b1239a01..1be7f2d5fa7 100644 --- a/Examples/Tests/restart/CMakeLists.txt +++ b/Examples/Tests/restart/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 1 # nprocs inputs_test_2d_id_cpu_read_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 2 # dims 1 # nprocs inputs_test_2d_runtime_components_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) @@ -27,8 +27,8 @@ add_warpx_test( 2 # dims 1 # nprocs "inputs_test_2d_runtime_components_picmi.py amr.restart='../test_2d_runtime_components_picmi/diags/chk000005'" # inputs - OFF #analysis_default_restart.py # analysis - OFF #diags/diag1000010 # output + OFF #"analysis_default_restart.py diags/diag1000010" # analysis + OFF #"analysis_default_regression.py --path diags/diag1000010" # checksum test_2d_runtime_components_picmi # dependency ) @@ -37,8 +37,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_acceleration # inputs - analysis_default_regression.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) @@ -47,8 +47,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_acceleration_restart # inputs - analysis_default_restart.py # analysis - diags/diag1000010 # output + "analysis_default_restart.py diags/diag1000010" # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum test_3d_acceleration # dependency ) @@ -58,8 +58,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_acceleration_psatd # inputs - analysis_default_regression.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) endif() @@ -70,8 +70,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_acceleration_psatd_restart # inputs - analysis_default_restart.py # analysis - diags/diag1000010 # output + "analysis_default_restart.py diags/diag1000010" # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum test_3d_acceleration_psatd # dependency ) endif() @@ -82,8 +82,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_acceleration_psatd_time_avg # inputs - analysis_default_regression.py # analysis - diags/diag1000010 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) endif() @@ -94,8 +94,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_acceleration_psatd_time_avg_restart # inputs - analysis_default_restart.py # analysis - diags/diag1000010 # output + "analysis_default_restart.py diags/diag1000010" # analysis + "analysis_default_regression.py --path diags/diag1000010" # checksum test_3d_acceleration_psatd_time_avg # dependency ) endif() diff --git a/Examples/Tests/restart/analysis_restart.py b/Examples/Tests/restart/analysis_restart.py deleted file mode 100755 index 26a05da90f2..00000000000 --- a/Examples/Tests/restart/analysis_restart.py +++ /dev/null @@ -1,21 +0,0 @@ -#!/usr/bin/env python3 - -import os -import sys - -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - -filename = sys.argv[1] - -# Check restart data v. original data -sys.path.insert(0, "../../../../warpx/Examples/") -from analysis_default_restart import check_restart - -check_restart(filename) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/restart_eb/CMakeLists.txt b/Examples/Tests/restart_eb/CMakeLists.txt index 50f808c3e1f..0c685340c4c 100644 --- a/Examples/Tests/restart_eb/CMakeLists.txt +++ b/Examples/Tests/restart_eb/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_EB) 3 # dims 1 # nprocs inputs_test_3d_eb_picmi.py # inputs - analysis_default_regression.py # analysis - diags/diag1000060 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000060" # checksum OFF # dependency ) endif() @@ -19,9 +19,9 @@ endif() # test_3d_eb_picmi_restart # name # 3 # dims # 1 # nprocs -## "inputs_test_3d_eb_picmi.py amr.restart='../test_3d_eb_picmi/diags/chk000030'" # inputs -# analysis_default_restart.py # analysis -# diags/diag1000060 # output +# "inputs_test_3d_eb_picmi.py amr.restart='../test_3d_eb_picmi/diags/chk000030'" # inputs +# "analysis_default_restart.py diags/diag1000060" # analysis +# "analysis_default_regression.py --path diags/diag1000060" # checksum # test_3d_eb_picmi # dependency # ) #endif() diff --git a/Examples/Tests/rigid_injection/CMakeLists.txt b/Examples/Tests/rigid_injection/CMakeLists.txt index 21004c3248c..ca0a84a87ef 100644 --- a/Examples/Tests/rigid_injection/CMakeLists.txt +++ b/Examples/Tests/rigid_injection/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_rigid_injection_btd # inputs - analysis_rigid_injection_btd.py # analysis - diags/diag1000001 # output + "analysis_rigid_injection_btd.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) @@ -16,7 +16,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_rigid_injection_lab # inputs - analysis_rigid_injection_lab.py # analysis - diags/diag1000289 # output + "analysis_rigid_injection_lab.py diags/diag1000289" # analysis + "analysis_default_regression.py --path diags/diag1000289" # checksum OFF # dependency ) diff --git a/Examples/Tests/rigid_injection/analysis_default_regression.py b/Examples/Tests/rigid_injection/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/rigid_injection/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/rigid_injection/analysis_rigid_injection_btd.py b/Examples/Tests/rigid_injection/analysis_rigid_injection_btd.py index 759c211b42d..d87a680a819 100755 --- a/Examples/Tests/rigid_injection/analysis_rigid_injection_btd.py +++ b/Examples/Tests/rigid_injection/analysis_rigid_injection_btd.py @@ -20,7 +20,6 @@ frame, i.e., on the back-transformed diagnostics. """ -import os import sys import numpy as np @@ -29,9 +28,6 @@ yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - filename = sys.argv[1] # Tolerances to check consistency between plotfile BTD and openPMD BTD @@ -80,9 +76,3 @@ print(f"error = {err}") print(f"tolerance = {tol}") assert err < tol - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/rigid_injection/analysis_rigid_injection_lab.py b/Examples/Tests/rigid_injection/analysis_rigid_injection_lab.py index 91e2bed1ed0..69fbe4cc537 100755 --- a/Examples/Tests/rigid_injection/analysis_rigid_injection_lab.py +++ b/Examples/Tests/rigid_injection/analysis_rigid_injection_lab.py @@ -23,15 +23,12 @@ with the gaussian_beam injection style. """ -import os import sys import numpy as np import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -101,9 +98,3 @@ def remove_rigid_lines(plotfile, nlines_if_rigid): center = ad_start["beam", "particle_center"] assert np.array_equal(z, orig_z) assert np.array_equal(1 * (np.abs(x) < 5.0e-7), center) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/scraping/CMakeLists.txt b/Examples/Tests/scraping/CMakeLists.txt index a0fd04b6b3f..71897e85b88 100644 --- a/Examples/Tests/scraping/CMakeLists.txt +++ b/Examples/Tests/scraping/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_EB) RZ # dims 2 # nprocs inputs_test_rz_scraping # inputs - analysis_rz.py # analysis - diags/diag1000037 # output + "analysis_rz.py diags/diag1000037" # analysis + "analysis_default_regression.py --path diags/diag1000037" # checksum OFF # dependency ) endif() @@ -19,8 +19,8 @@ if(WarpX_EB) RZ # dims 2 # nprocs inputs_test_rz_scraping_filter # inputs - analysis_rz_filter.py # analysis - diags/diag1000037 # output + "analysis_rz_filter.py diags/diag1000037" # analysis + "analysis_default_regression.py --path diags/diag1000037" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/scraping/analysis_default_regression.py b/Examples/Tests/scraping/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/scraping/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/scraping/analysis_rz.py b/Examples/Tests/scraping/analysis_rz.py index aa0038dbcf5..c5b60350cb8 100755 --- a/Examples/Tests/scraping/analysis_rz.py +++ b/Examples/Tests/scraping/analysis_rz.py @@ -20,16 +20,12 @@ # tolerance: 0 # Possible running time: < 1 s -import os import sys import numpy as np import yt from openpmd_viewer import OpenPMDTimeSeries -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - tolerance = 0 fn = sys.argv[1] @@ -82,10 +78,3 @@ def n_scraped_particles(iteration): assert np.all( np.sort(id_initial) == np.sort(id_final) ) # Sort because particles may not be in the same order - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - do_particles=False, -) diff --git a/Examples/Tests/silver_mueller/CMakeLists.txt b/Examples/Tests/silver_mueller/CMakeLists.txt index 7866d23dc1f..6cdeeffac6f 100644 --- a/Examples/Tests/silver_mueller/CMakeLists.txt +++ b/Examples/Tests/silver_mueller/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 1 # dims 2 # nprocs inputs_test_1d_silver_mueller # inputs - analysis.py # analysis - diags/diag1000500 # output + "analysis.py diags/diag1000500" # analysis + "analysis_default_regression.py --path diags/diag1000500" # checksum OFF # dependency ) @@ -16,8 +16,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_silver_mueller_x # inputs - analysis.py # analysis - diags/diag1000500 # output + "analysis.py diags/diag1000500" # analysis + "analysis_default_regression.py --path diags/diag1000500" # checksum OFF # dependency ) @@ -26,8 +26,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_silver_mueller_z # inputs - analysis.py # analysis - diags/diag1000500 # output + "analysis.py diags/diag1000500" # analysis + "analysis_default_regression.py --path diags/diag1000500" # checksum OFF # dependency ) @@ -36,7 +36,7 @@ add_warpx_test( RZ # dims 2 # nprocs inputs_test_rz_silver_mueller_z # inputs - analysis.py # analysis - diags/diag1000500 # output + "analysis.py diags/diag1000500" # analysis + "analysis_default_regression.py --path diags/diag1000500" # checksum OFF # dependency ) diff --git a/Examples/Tests/silver_mueller/analysis.py b/Examples/Tests/silver_mueller/analysis.py index aee27131bc9..678c5e4186a 100755 --- a/Examples/Tests/silver_mueller/analysis.py +++ b/Examples/Tests/silver_mueller/analysis.py @@ -11,7 +11,6 @@ test check that the reflected field at the boundary is negligible. """ -import os import re import sys @@ -19,8 +18,6 @@ import yt yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum filename = sys.argv[1] @@ -50,9 +47,3 @@ assert np.all(abs(Ex) < max_reflection_amplitude) assert np.all(abs(Ey) < max_reflection_amplitude) assert np.all(abs(Ez) < max_reflection_amplitude) - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/silver_mueller/analysis_default_regression.py b/Examples/Tests/silver_mueller/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/silver_mueller/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/single_particle/CMakeLists.txt b/Examples/Tests/single_particle/CMakeLists.txt index bee870f0b17..fb823b39431 100644 --- a/Examples/Tests/single_particle/CMakeLists.txt +++ b/Examples/Tests/single_particle/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_bilinear_filter # inputs - analysis.py # analysis - diags/diag1000001 # output + "analysis.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) diff --git a/Examples/Tests/single_particle/analysis.py b/Examples/Tests/single_particle/analysis.py index 4127663e14d..efd3f36cfdf 100755 --- a/Examples/Tests/single_particle/analysis.py +++ b/Examples/Tests/single_particle/analysis.py @@ -7,7 +7,6 @@ # License: BSD-3-Clause-LBNL -import os import sys import numpy as np @@ -15,8 +14,6 @@ from scipy import signal yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum # Build Jx without filter. This can be obtained by running this test without # a filter, e.g., execute @@ -65,9 +62,3 @@ print("tolerance_rel: " + str(tolerance_rel)) assert error_rel < tolerance_rel - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/single_particle/analysis_default_regression.py b/Examples/Tests/single_particle/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/single_particle/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/space_charge_initialization/CMakeLists.txt b/Examples/Tests/space_charge_initialization/CMakeLists.txt index 6ca1f4ad04c..00f9a5462fd 100644 --- a/Examples/Tests/space_charge_initialization/CMakeLists.txt +++ b/Examples/Tests/space_charge_initialization/CMakeLists.txt @@ -6,8 +6,8 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_space_charge_initialization # inputs - analysis.py # analysis - diags/diag1000001 # output + "analysis.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001 --skip-particles" # checksum OFF # dependency ) @@ -16,7 +16,7 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_space_charge_initialization # inputs - analysis.py # analysis - diags/diag1000001 # output + "analysis.py diags/diag1000001" # analysis + "analysis_default_regression.py --path diags/diag1000001 --skip-particles" # checksum OFF # dependency ) diff --git a/Examples/Tests/space_charge_initialization/analysis.py b/Examples/Tests/space_charge_initialization/analysis.py index d63ba8f7334..b8e5e689a87 100755 --- a/Examples/Tests/space_charge_initialization/analysis.py +++ b/Examples/Tests/space_charge_initialization/analysis.py @@ -12,7 +12,6 @@ the expected theoretical field. """ -import os import sys import matplotlib @@ -25,8 +24,6 @@ from scipy.special import gammainc yt.funcs.mylog.setLevel(0) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum # Parameters from the Simulation Qtot = -1.0e-20 @@ -123,10 +120,3 @@ def check(E, E_th, label): check(Ey_array, Ey_th, "Ey") if ds.dimensionality == 3: check(Ez_array, Ez_th, "Ez") - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], - do_particles=False, -) diff --git a/Examples/Tests/space_charge_initialization/analysis_default_regression.py b/Examples/Tests/space_charge_initialization/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/space_charge_initialization/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/subcycling/CMakeLists.txt b/Examples/Tests/subcycling/CMakeLists.txt index 688f54ac01c..503c9f24f34 100644 --- a/Examples/Tests/subcycling/CMakeLists.txt +++ b/Examples/Tests/subcycling/CMakeLists.txt @@ -6,7 +6,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_subcycling_mr # inputs - analysis_default_regression.py # analysis - diags/diag1000250 # output + OFF # analysis + "analysis_default_regression.py --path diags/diag1000250" # checksum OFF # dependency ) diff --git a/Examples/Tests/vay_deposition/CMakeLists.txt b/Examples/Tests/vay_deposition/CMakeLists.txt index ce8d51d3c2a..86108530b1d 100644 --- a/Examples/Tests/vay_deposition/CMakeLists.txt +++ b/Examples/Tests/vay_deposition/CMakeLists.txt @@ -7,8 +7,8 @@ if(WarpX_FFT) 2 # dims 2 # nprocs inputs_test_2d_vay_deposition # inputs - analysis.py # analysis - diags/diag1000050 # output + "analysis.py diags/diag1000050" # analysis + "analysis_default_regression.py --path diags/diag1000050" # checksum OFF # dependency ) endif() @@ -19,8 +19,8 @@ if(WarpX_FFT) 3 # dims 2 # nprocs inputs_test_3d_vay_deposition # inputs - analysis.py # analysis - diags/diag1000025 # output + "analysis.py diags/diag1000025" # analysis + "analysis_default_regression.py --path diags/diag1000025" # checksum OFF # dependency ) endif() diff --git a/Examples/Tests/vay_deposition/analysis.py b/Examples/Tests/vay_deposition/analysis.py index ba428520660..def231538de 100755 --- a/Examples/Tests/vay_deposition/analysis.py +++ b/Examples/Tests/vay_deposition/analysis.py @@ -6,7 +6,6 @@ # # License: BSD-3-Clause-LBNL -import os import sys import numpy as np @@ -15,9 +14,6 @@ yt.funcs.mylog.setLevel(50) -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - # Plotfile data set fn = sys.argv[1] ds = yt.load(fn) @@ -34,9 +30,3 @@ print("error_rel = {}".format(error_rel)) print("tolerance = {}".format(tolerance)) assert error_rel < tolerance - -# compare checksums -evaluate_checksum( - test_name=os.path.split(os.getcwd())[1], - output_file=sys.argv[1], -) diff --git a/Examples/Tests/vay_deposition/analysis_default_regression.py b/Examples/Tests/vay_deposition/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/vay_deposition/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/analysis_default_openpmd_regression.py b/Examples/analysis_default_openpmd_regression.py deleted file mode 100755 index 6f38693f820..00000000000 --- a/Examples/analysis_default_openpmd_regression.py +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env python3 - -import os -import re -import sys - -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - -test_name = os.path.split(os.getcwd())[1] -output_file = sys.argv[1] - -# Run checksum regression test -if re.search("single_precision", output_file): - evaluate_checksum( - test_name=test_name, - output_file=output_file, - output_format="openpmd", - rtol=2e-6, - ) -else: - evaluate_checksum( - test_name=test_name, - output_file=output_file, - output_format="openpmd", - ) diff --git a/Examples/analysis_default_regression.py b/Examples/analysis_default_regression.py index 7c02f6904b2..e143e396f0c 100755 --- a/Examples/analysis_default_regression.py +++ b/Examples/analysis_default_regression.py @@ -1,25 +1,88 @@ #!/usr/bin/env python3 +import argparse import os -import re import sys +import yt +from openpmd_viewer import OpenPMDTimeSeries + sys.path.insert(1, "../../../../warpx/Regression/Checksum/") from checksumAPI import evaluate_checksum -test_name = os.path.split(os.getcwd())[1] -output_file = sys.argv[1] -# Run checksum regression test -if re.search("single_precision", output_file): +def main(args): + # parse test name from test directory + test_name = os.path.split(os.getcwd())[1] + if "_restart" in test_name: + rtol_restart = 1e-12 + print( + f"Warning: Setting relative tolerance {rtol_restart} for restart checksum analysis" + ) + # use original test's checksums + test_name = test_name.replace("_restart", "") + # reset relative tolerance + args.rtol = rtol_restart + # TODO check environment and reset tolerance (portable, machine precision) + # compare checksums evaluate_checksum( test_name=test_name, - output_file=output_file, - rtol=2e-6, + output_file=args.path, + output_format=args.format, + rtol=args.rtol, + do_fields=args.do_fields, + do_particles=args.do_particles, ) -else: - # using default relative tolerance - evaluate_checksum( - test_name=test_name, - output_file=output_file, + + +if __name__ == "__main__": + # define parser + parser = argparse.ArgumentParser() + # add arguments: output path + parser.add_argument( + "--path", + help="path to output file(s)", + type=str, + ) + # add arguments: relative tolerance + parser.add_argument( + "--rtol", + help="relative tolerance to compare checksums", + type=float, + required=False, + default=1e-9, + ) + # add arguments: skip fields + parser.add_argument( + "--skip-fields", + help="skip fields when comparing checksums", + action="store_true", + dest="skip_fields", + ) + # add arguments: skip particles + parser.add_argument( + "--skip-particles", + help="skip particles when comparing checksums", + action="store_true", + dest="skip_particles", ) + # parse arguments + args = parser.parse_args() + # set args.format automatically + try: + yt.load(args.path) + except Exception: + try: + OpenPMDTimeSeries(args.path) + except Exception: + print("Could not open the file as a plotfile or an openPMD time series") + else: + args.format = "openpmd" + else: + args.format = "plotfile" + # set args.do_fields (not parsed, based on args.skip_fields) + args.do_fields = False if args.skip_fields else True + # set args.do_particles (not parsed, based on args.skip_particles) + args.do_particles = False if args.skip_particles else True + # execute main function + main(args) diff --git a/Examples/analysis_default_restart.py b/Examples/analysis_default_restart.py index c019a0b5945..ad6bc22e60e 100755 --- a/Examples/analysis_default_restart.py +++ b/Examples/analysis_default_restart.py @@ -6,9 +6,6 @@ import numpy as np import yt -sys.path.insert(1, "../../../../warpx/Regression/Checksum/") -from checksumAPI import evaluate_checksum - def check_restart(filename, tolerance=1e-12): """ @@ -67,17 +64,6 @@ def check_restart(filename, tolerance=1e-12): print() -# test name (for checksums, remove "_restart") and output file name -test_name = os.path.split(os.getcwd())[1] -test_name = test_name.replace("_restart", "") -output_file = sys.argv[1] - # compare restart results against original results +output_file = sys.argv[1] check_restart(output_file) - -# compare restart checksums against original checksums -evaluate_checksum( - test_name=test_name, - output_file=output_file, - rtol=1e-12, -) diff --git a/Regression/Checksum/benchmarks_json/test_2d_collision_xz_picmi.json b/Regression/Checksum/benchmarks_json/test_2d_collision_xz_picmi.json new file mode 100644 index 00000000000..5407fe62374 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_collision_xz_picmi.json @@ -0,0 +1,29 @@ +{ + "lev=0": { + "Bx": 0.0, + "By": 0.0, + "Bz": 0.0, + "Ex": 0.0, + "Ey": 0.0, + "Ez": 0.0, + "jx": 0.0, + "jy": 0.0, + "jz": 0.0 + }, + "electron": { + "particle_momentum_x": 1.0340540111403247e-19, + "particle_momentum_y": 1.0154008257709287e-19, + "particle_momentum_z": 1.0325278275532687e-19, + "particle_position_x": 2667286.170170416, + "particle_position_y": 2647299.0368030528, + "particle_weight": 1.7256099431746894e+26 + }, + "ion": { + "particle_momentum_x": 2.489595454855792e-19, + "particle_momentum_y": 2.295864697447754e-19, + "particle_momentum_z": 2.2800373850644785e-19, + "particle_position_x": 2657799.867468604, + "particle_position_y": 2664764.1491634008, + "particle_weight": 1.7256099431746894e+26 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_dirichlet_bc_picmi.json b/Regression/Checksum/benchmarks_json/test_2d_dirichlet_bc_picmi.json new file mode 100644 index 00000000000..41567dc3bf2 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_dirichlet_bc_picmi.json @@ -0,0 +1,5 @@ +{ + "lev=0": { + "phi": 10817.97280547637 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_dive_cleaning.json b/Regression/Checksum/benchmarks_json/test_2d_dive_cleaning.json index 71e147cb1f7..06216490281 100644 --- a/Regression/Checksum/benchmarks_json/test_2d_dive_cleaning.json +++ b/Regression/Checksum/benchmarks_json/test_2d_dive_cleaning.json @@ -1,27 +1,21 @@ { - "beam": { - "particle_Bx": 0.0, - "particle_By": 1.4229748768905527e-19, - "particle_Bz": 0.0, - "particle_Ex": 210305.84591470752, - "particle_Ey": 0.0, - "particle_Ez": 210741.1714121227, - "particle_momentum_x": 1.111019933689776e-26, - "particle_momentum_y": 0.0, - "particle_momentum_z": 1.113270980745195e-26, - "particle_position_x": 0.03183627816789909, - "particle_position_y": 0.03171922054171794, - "particle_weight": 31207.545372303823 - }, "lev=0": { "Bx": 0.0, - "By": 1.5468538800972258e-20, + "By": 1.2286794953440962e-20, "Bz": 0.0, - "Ex": 8533.638650013556, + "Ex": 8214.62652670017, "Ey": 0.0, - "Ez": 8534.98921988922, + "Ez": 8214.876065863818, "jx": 0.0, "jy": 0.0, "jz": 0.0 + }, + "beam": { + "particle_momentum_x": 1.580805184936455e-26, + "particle_momentum_y": 0.0, + "particle_momentum_z": 1.5759037267515492e-26, + "particle_position_x": 0.0319918174870481, + "particle_position_y": 0.03196785829873647, + "particle_weight": 31207.545372303823 } -} \ No newline at end of file +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_maxwell_hybrid_qed_solver.json b/Regression/Checksum/benchmarks_json/test_2d_maxwell_hybrid_qed_solver.json index 9f7b7f64dcd..a5c1c1fd700 100644 --- a/Regression/Checksum/benchmarks_json/test_2d_maxwell_hybrid_qed_solver.json +++ b/Regression/Checksum/benchmarks_json/test_2d_maxwell_hybrid_qed_solver.json @@ -1,13 +1,13 @@ { "lev=0": { - "Bx": 3.543966469013954e-05, + "Bx": 3.5439667773344274e-05, "By": 0.0, - "Bz": 5.103535813972088e-12, + "Bz": 0.0, "Ex": 0.0, - "Ey": 6553600000.005218, + "Ey": 6553599999.996383, "Ez": 0.0, "jx": 0.0, "jy": 0.0, "jz": 0.0 } -} \ No newline at end of file +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_particle_attr_access_picmi.json b/Regression/Checksum/benchmarks_json/test_2d_particle_attr_access_picmi.json new file mode 100644 index 00000000000..92a1ccbe638 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_particle_attr_access_picmi.json @@ -0,0 +1,14 @@ +{ + "lev=0": { + "phi": 0.0023065875621041164 + }, + "electrons": { + "particle_momentum_x": 1.1623026977941542e-25, + "particle_momentum_y": 1.0012020618770149e-25, + "particle_momentum_z": 1.0768794697418634e-25, + "particle_newPid": 750.0, + "particle_position_x": 2.4984316660445582, + "particle_position_y": 2.498475649375752, + "particle_weight": 300.0 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_particle_attr_access_unique_picmi.json b/Regression/Checksum/benchmarks_json/test_2d_particle_attr_access_unique_picmi.json new file mode 100644 index 00000000000..d1630437fe0 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_particle_attr_access_unique_picmi.json @@ -0,0 +1,14 @@ +{ + "lev=0": { + "phi": 0.004613047318603685 + }, + "electrons": { + "particle_momentum_x": 2.285288009419423e-25, + "particle_momentum_y": 2.0816449298979767e-25, + "particle_momentum_z": 2.0646896248001752e-25, + "particle_newPid": 1500.0, + "particle_position_x": 4.499791671680002, + "particle_position_y": 4.499957554820931, + "particle_weight": 600.0 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_qed_breit_wheeler_opmd.json b/Regression/Checksum/benchmarks_json/test_2d_qed_breit_wheeler_opmd.json new file mode 100644 index 00000000000..fdfddfcbf2d --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_qed_breit_wheeler_opmd.json @@ -0,0 +1,134 @@ +{ + "lev=0": { + "Ex": 0.0 + }, + "dummy_phot": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "ele1": { + "particle_opticalDepthQSR": 94805.7653371546, + "particle_position_x": 0.02366929620831116, + "particle_position_y": 0.0, + "particle_position_z": 0.023729402832031253, + "particle_momentum_x": 2.6017428344962994e-14, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 904932.0220947266 + }, + "ele2": { + "particle_opticalDepthQSR": 11848.411715651348, + "particle_position_x": 0.00294089697265625, + "particle_position_y": 0.0, + "particle_position_z": 0.00291261572265625, + "particle_momentum_x": 0.0, + "particle_momentum_y": 8.023686137822937e-15, + "particle_momentum_z": 0.0, + "particle_weight": 112028.12194824219 + }, + "ele3": { + "particle_opticalDepthQSR": 126169.82836793675, + "particle_position_x": 0.03160814697265625, + "particle_position_y": 0.0, + "particle_position_z": 0.03157382483413338, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 1.7279204307644868e-13, + "particle_weight": 1203489.3035888672 + }, + "ele4": { + "particle_opticalDepthQSR": 49124.07007571301, + "particle_position_x": 0.012249532599865704, + "particle_position_y": 0.0, + "particle_position_z": 0.012296328078458279, + "particle_momentum_x": 3.86289566432756e-13, + "particle_momentum_y": 3.86289566432756e-13, + "particle_momentum_z": 3.86289566432756e-13, + "particle_weight": 467967.9870605469 + }, + "p1": { + "particle_opticalDepthBW": 871455.4785435478, + "particle_position_x": 0.2384708675227609, + "particle_position_y": 0.0, + "particle_position_z": 0.23841459716796876, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 9095067.977905273 + }, + "p2": { + "particle_opticalDepthBW": 1026305.8359152814, + "particle_position_x": 0.25920310302734373, + "particle_position_y": 0.0, + "particle_position_z": 0.25923138427734393, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 9887971.878051758 + }, + "p3": { + "particle_opticalDepthBW": 819556.3040882011, + "particle_position_x": 0.23053585302734383, + "particle_position_y": 0.0, + "particle_position_z": 0.230565112772128, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 8796510.696411133 + }, + "p4": { + "particle_opticalDepthBW": 953481.8671852223, + "particle_position_x": 0.24989221486908522, + "particle_position_y": 0.0, + "particle_position_z": 0.24984575334030557, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 9532032.012939453 + }, + "pos1": { + "particle_opticalDepthQSR": 95351.53381764909, + "particle_position_x": 0.02366929620831116, + "particle_position_y": 0.0, + "particle_position_z": 0.023729402832031253, + "particle_momentum_x": 2.580947118885758e-14, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 904932.0220947266 + }, + "pos2": { + "particle_opticalDepthQSR": 11715.614695181783, + "particle_position_x": 0.00294089697265625, + "particle_position_y": 0.0, + "particle_position_z": 0.00291261572265625, + "particle_momentum_x": 0.0, + "particle_momentum_y": 8.016397398195943e-15, + "particle_momentum_z": 0.0, + "particle_weight": 112028.12194824219 + }, + "pos3": { + "particle_opticalDepthQSR": 126049.551252123, + "particle_position_x": 0.03160814697265625, + "particle_position_y": 0.0, + "particle_position_z": 0.03157382483413338, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 1.718369575300312e-13, + "particle_weight": 1203489.3035888672 + }, + "pos4": { + "particle_opticalDepthQSR": 48953.578114257085, + "particle_position_x": 0.012249532599865704, + "particle_position_y": 0.0, + "particle_position_z": 0.012296328078458279, + "particle_momentum_x": 3.873971285219934e-13, + "particle_momentum_y": 3.873971285219934e-13, + "particle_momentum_z": 3.873971285219934e-13, + "particle_weight": 467967.9870605469 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_3d_dive_cleaning.json b/Regression/Checksum/benchmarks_json/test_3d_dive_cleaning.json index db23c26f9a7..b8206240500 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_dive_cleaning.json +++ b/Regression/Checksum/benchmarks_json/test_3d_dive_cleaning.json @@ -1,28 +1,22 @@ { - "beam": { - "particle_Bx": 1.6547943661629038e-20, - "particle_By": 1.7166945226626064e-20, - "particle_Bz": 1.851357799836734e-20, - "particle_Ex": 39363.91309372786, - "particle_Ey": 39331.17437154593, - "particle_Ez": 39396.18209787599, - "particle_momentum_x": 1.700384472207379e-27, - "particle_momentum_y": 1.69889110346099e-27, - "particle_momentum_z": 1.7017928140329036e-27, - "particle_position_x": 0.031880969779242374, - "particle_position_y": 0.03175704658379704, - "particle_position_z": 0.03183674192208247, - "particle_weight": 0.06241509074460764 - }, "lev=0": { - "Bx": 2.2090009624207165e-20, - "By": 2.2307246822783936e-20, - "Bz": 2.1967888687392684e-20, - "Ex": 8888.956516621029, - "Ey": 8838.45337149105, - "Ez": 8837.421045658291, + "Bx": 1.9159134471952935e-20, + "By": 1.8827238279614072e-20, + "Bz": 1.8885687211875642e-20, + "Ex": 8648.536817097653, + "Ey": 8613.089981021956, + "Ez": 8626.889465408336, "jx": 0.0, "jy": 0.0, "jz": 0.0 + }, + "beam": { + "particle_momentum_x": 2.3895400750846334e-27, + "particle_momentum_y": 2.3729444184823814e-27, + "particle_momentum_z": 2.390801909783316e-27, + "particle_position_x": 0.031866021988156114, + "particle_position_y": 0.032281276107277185, + "particle_position_z": 0.03185444043880588, + "particle_weight": 0.06241509074460764 } -} \ No newline at end of file +} diff --git a/Regression/Checksum/benchmarks_json/test_3d_particle_scrape.json b/Regression/Checksum/benchmarks_json/test_3d_particle_scrape.json new file mode 100644 index 00000000000..b03a954397a --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_particle_scrape.json @@ -0,0 +1,10 @@ +{ + "lev=0": { + "Bx": 148673.005859208, + "By": 148673.00585920803, + "Bz": 3371.758117878557, + "Ex": 55378581103426.695, + "Ey": 55378581103426.7, + "Ez": 68412803445328.25 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_3d_particle_scrape_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_particle_scrape_picmi.json new file mode 100644 index 00000000000..b03a954397a --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_particle_scrape_picmi.json @@ -0,0 +1,10 @@ +{ + "lev=0": { + "Bx": 148673.005859208, + "By": 148673.00585920803, + "Bz": 3371.758117878557, + "Ex": 55378581103426.695, + "Ey": 55378581103426.7, + "Ez": 68412803445328.25 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_picmi.json new file mode 100644 index 00000000000..205bf8204dd --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_plasma_lens_picmi.json @@ -0,0 +1,21 @@ +{ + "lev=0": { + "Bx": 3.742282886653039e-14, + "By": 3.733653562337366e-14, + "Bz": 3.159003724979974e-16, + "Ex": 4.413173824952238e-06, + "Ey": 4.440807110847932e-06, + "Ez": 8.994610621212147e-06, + "jx": 2.294712258669695e-10, + "jy": 1.8314117733996873e-10, + "jz": 2.1990787829485306e-08 + }, + "electrons": { + "particle_momentum_x": 7.424668333878816e-24, + "particle_momentum_y": 5.9396389377972404e-24, + "particle_momentum_z": 2.730924530737562e-22, + "particle_position_x": 0.03608389438974155, + "particle_position_y": 0.028872102262786022, + "particle_position_z": 3.894799963324232 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_3d_qed_breit_wheeler_opmd.json b/Regression/Checksum/benchmarks_json/test_3d_qed_breit_wheeler_opmd.json new file mode 100644 index 00000000000..bf98f8bd963 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_qed_breit_wheeler_opmd.json @@ -0,0 +1,134 @@ +{ + "lev=0": { + "Ex": 0.0 + }, + "dummy_phot": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "ele1": { + "particle_opticalDepthQSR": 150084.76186868473, + "particle_position_x": 0.037522814130005, + "particle_position_y": 0.0375364375, + "particle_position_z": 0.0375473125, + "particle_momentum_x": 4.111367826819435e-14, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 1.4303398132324217 + }, + "ele2": { + "particle_opticalDepthQSR": 19166.68554224755, + "particle_position_x": 0.0047233984375, + "particle_position_y": 0.004773031169456623, + "particle_position_z": 0.004769421875, + "particle_momentum_x": 0.0, + "particle_momentum_y": 1.3120815641048112e-14, + "particle_momentum_z": 0.0, + "particle_weight": 0.18199920654296872 + }, + "ele3": { + "particle_opticalDepthQSR": 197996.95086384623, + "particle_position_x": 0.049603601562499995, + "particle_position_y": 0.049503953125, + "particle_position_z": 0.049484700428793646, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 2.703275178685535e-13, + "particle_weight": 1.8893241882324217 + }, + "ele4": { + "particle_opticalDepthQSR": 79277.54339646622, + "particle_position_x": 0.019832326822916666, + "particle_position_y": 0.019875475260416667, + "particle_position_z": 0.019791898437499997, + "particle_momentum_x": 6.270940742446365e-13, + "particle_momentum_y": 6.270940742446365e-13, + "particle_momentum_z": 6.270940742446365e-13, + "particle_weight": 0.7561206817626952 + }, + "p1": { + "particle_opticalDepthBW": 779753.6899265796, + "particle_position_x": 0.22462570742050836, + "particle_position_y": 0.22460756249999997, + "particle_position_z": 0.22459668749999998, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 8.569660186767576 + }, + "p2": { + "particle_opticalDepthBW": 1011632.5072099702, + "particle_position_x": 0.2574206015625001, + "particle_position_y": 0.25736832040839486, + "particle_position_z": 0.2573745781250001, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 9.81800079345703 + }, + "p3": { + "particle_opticalDepthBW": 707943.7338077008, + "particle_position_x": 0.2125403984375, + "particle_position_y": 0.21264004687499996, + "particle_position_z": 0.21265919757150045, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 8.110675811767576 + }, + "p4": { + "particle_opticalDepthBW": 899116.8515237333, + "particle_position_x": 0.24232183723958334, + "particle_position_y": 0.24226657421875006, + "particle_position_z": 0.24235025781250008, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 9.243879318237303 + }, + "pos1": { + "particle_opticalDepthQSR": 150411.2911918669, + "particle_position_x": 0.037522814130005, + "particle_position_y": 0.0375364375, + "particle_position_z": 0.0375473125, + "particle_momentum_x": 4.0804162983963274e-14, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 1.4303398132324217 + }, + "pos2": { + "particle_opticalDepthQSR": 19071.695614845616, + "particle_position_x": 0.0047233984375, + "particle_position_y": 0.004773031169456623, + "particle_position_z": 0.004769421875, + "particle_momentum_x": 0.0, + "particle_momentum_y": 1.2937663481149343e-14, + "particle_momentum_z": 0.0, + "particle_weight": 0.18199920654296872 + }, + "pos3": { + "particle_opticalDepthQSR": 197682.0402897762, + "particle_position_x": 0.049603601562499995, + "particle_position_y": 0.049503953125, + "particle_position_z": 0.049484700428793646, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 2.7069590910941717e-13, + "particle_weight": 1.8893241882324217 + }, + "pos4": { + "particle_opticalDepthQSR": 79817.78242989839, + "particle_position_x": 0.019832326822916666, + "particle_position_y": 0.019875475260416667, + "particle_position_z": 0.019791898437499997, + "particle_momentum_x": 6.229925285542024e-13, + "particle_momentum_y": 6.229925285542024e-13, + "particle_momentum_z": 6.229925285542024e-13, + "particle_weight": 0.7561206817626952 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_3d_reduced_diags_load_balance_costs_timers_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_reduced_diags_load_balance_costs_timers_picmi.json new file mode 100644 index 00000000000..a77d93b9621 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_reduced_diags_load_balance_costs_timers_picmi.json @@ -0,0 +1,22 @@ +{ + "electrons": { + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_position_x": 262144.0, + "particle_position_y": 262144.0, + "particle_position_z": 65536.0, + "particle_weight": 1600000000000000.0 + }, + "lev=0": { + "Bx": 0.0, + "By": 0.0, + "Bz": 0.0, + "Ex": 0.0, + "Ey": 0.0, + "Ez": 0.0, + "jx": 0.0, + "jy": 0.0, + "jz": 0.0 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_scraping.json b/Regression/Checksum/benchmarks_json/test_rz_scraping.json index 8f85b956b26..3a97a2dc651 100644 --- a/Regression/Checksum/benchmarks_json/test_rz_scraping.json +++ b/Regression/Checksum/benchmarks_json/test_rz_scraping.json @@ -4,5 +4,14 @@ }, "lev=1": { "Er": 0.0 + }, + "electron": { + "particle_momentum_x": 8.802233511708275e-20, + "particle_momentum_y": 8.865573181381068e-20, + "particle_momentum_z": 0.0, + "particle_position_x": 52.1624916491251, + "particle_position_y": 128.0, + "particle_theta": 776.9665451756912, + "particle_weight": 4.841626861764053e+18 } } diff --git a/Regression/Checksum/benchmarks_json/test_rz_scraping_filter.json b/Regression/Checksum/benchmarks_json/test_rz_scraping_filter.json new file mode 100644 index 00000000000..3a97a2dc651 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_rz_scraping_filter.json @@ -0,0 +1,17 @@ +{ + "lev=0": { + "Er": 0.0 + }, + "lev=1": { + "Er": 0.0 + }, + "electron": { + "particle_momentum_x": 8.802233511708275e-20, + "particle_momentum_y": 8.865573181381068e-20, + "particle_momentum_z": 0.0, + "particle_position_x": 52.1624916491251, + "particle_position_y": 128.0, + "particle_theta": 776.9665451756912, + "particle_weight": 4.841626861764053e+18 + } +} From fbc1a56ac5d1cf40572f3acda0213a6eeea7f1d5 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 2 Jan 2025 09:00:09 -0800 Subject: [PATCH 128/278] Doc: Wave Attenuation Numerics (#5532) Add Yin's latest paper. --- Docs/source/highlights.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index 7f613625c55..300d94149f8 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -188,6 +188,14 @@ Scientific works in High-Performance Computing, applied mathematics and numerics Please see :ref:`this section `. +Related works using WarpX: + +#. Yan Y., Du F., Tang J., Yu D and Zhao Y., + **Numerical study on wave attenuation via 1D fully kinetic electromagnetic particle-in-cell simulations**. + Plasma Sources Sci. Technol. **33** 115013, 2024 + `DOI:10.1088/1361-6595/ad8c7c `__ + + Nuclear Fusion and Plasma Confinement ************************************* From 98889d69b2166aa7a610cdd6dc8abcd1baafee2a Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Thu, 2 Jan 2025 09:24:33 -0800 Subject: [PATCH 129/278] Injection from EB: do not create particles outside of user-specified bounds (#5521) When using the injection from embedded boundaries along with `zmin`, `zmax`, `xmin`, `xmax`, etc., we were creating a large number of macroparticles along the EB surface, and then removing the particles that are outside of `zmin`, `zmax`, `xmin`, `xmax`, etc. by setting their ID to an invalid value. In case where the area defined by `zmin`, `zmax`, `xmin`, `xmax`, etc. is a small fraction of the EB surface, it is much more efficient not to create these particles in the first place. In addition, it avoids having to create a large number of particle IDs, which are eventually not used in the simulation. (In some use cases, this unnecessarily lead to a particle ID overflow, i.e. WarpX ended up having to generate particle IDs that were outside the maximum possible range given by the number of bits.) This PR removes this issue. --- Source/Particles/PhysicalParticleContainer.cpp | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index c973e9afafa..baac138dd38 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -1483,17 +1483,14 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, if (eb_flag_arr(i,j,k).isRegular() || eb_flag_arr(i,j,k).isCovered()) { return; } // Scale by the (normalized) area of the EB surface in this cell num_ppc_real_in_this_cell *= eb_bnd_area_arr(i,j,k); - } else + } #else amrex::Real const num_ppc_real_in_this_cell = num_ppc_real; // user input: number of macroparticles per cell #endif - { - // Injection from a plane - auto lo = getCellCoords(overlap_corner, dx, {0._rt, 0._rt, 0._rt}, iv); - auto hi = getCellCoords(overlap_corner, dx, {1._rt, 1._rt, 1._rt}, iv); - // Skip cells that do not overlap with the plane - if (!flux_pos->overlapsWith(lo, hi)) { return; } - } + // Skip cells that do not overlap with the bounds specified by the user (xmin/xmax, ymin/ymax, zmin/zmax) + auto lo = getCellCoords(overlap_corner, dx, {0._rt, 0._rt, 0._rt}, iv); + auto hi = getCellCoords(overlap_corner, dx, {1._rt, 1._rt, 1._rt}, iv); + if (!flux_pos->overlapsWith(lo, hi)) { return; } auto index = overlap_box.index(iv); // Take into account refined injection region From f8b97eb82b0798fd6821b3d7358caa2f748eb083 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Thu, 2 Jan 2025 15:07:25 -0800 Subject: [PATCH 130/278] [pre-commit.ci] pre-commit autoupdate (#5529) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.3 → v0.8.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.3...v0.8.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7c396c95b1d..55d880c8866 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.3 + rev: v0.8.4 hooks: # Run the linter - id: ruff From bbf9e0d2d3eae080ded84e15481a975efbed454b Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 3 Jan 2025 14:02:45 -0800 Subject: [PATCH 131/278] PEC: Fix Uninit Var Warning (GCC) (#5536) GCC 13 throws a compile-time warning in 3D that in some cases, `is_normal_to_boundary` is used as uninitialized. There are a lot of `if`s in its usage, so I simply changed the init value to `false` to silence it. --- Source/BoundaryConditions/PEC_Insulator.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/BoundaryConditions/PEC_Insulator.cpp b/Source/BoundaryConditions/PEC_Insulator.cpp index b9926e4bf22..b6b6bcf08ea 100644 --- a/Source/BoundaryConditions/PEC_Insulator.cpp +++ b/Source/BoundaryConditions/PEC_Insulator.cpp @@ -69,7 +69,7 @@ namespace bool GuardCell = false; bool isInsulatorBoundary = false; amrex::Real sign = +1._rt; - bool is_normal_to_boundary; + bool is_normal_to_boundary = false; amrex::Real field_value = 0._rt; bool set_field = false; // Loop over all dimensions From 97aa294586b0268ba0ca3f9f46680d5d52b02736 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Mon, 6 Jan 2025 16:03:58 -0800 Subject: [PATCH 132/278] Use same MLMG parameters in MS solver as in ES solver (#5517) Fixes #5508. ~~We might not want to set the solver precision with the same values as used for the ES solver but that can be debated in this PR.~~ Note this is a patch fix until we can refactor the MS solver to separate it from the `WarpX` class. --------- Signed-off-by: roelof-groenewald --- .../MagnetostaticSolver/MagnetostaticSolver.cpp | 16 ++++++++-------- Source/WarpX.H | 1 + Source/WarpX.cpp | 1 + 3 files changed, 10 insertions(+), 8 deletions(-) diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp index 96e92b80359..ce39265e720 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp @@ -128,22 +128,22 @@ WarpX::AddMagnetostaticFieldLabFrame() // const amrex::Real magnetostatic_absolute_tolerance = self_fields_absolute_tolerance*PhysConst::c; // temporary fix!!! - const amrex::Real magnetostatic_absolute_tolerance = 0.0; - amrex::Real self_fields_required_precision; + const amrex::Real absolute_tolerance = 0.0; + amrex::Real required_precision; if constexpr (std::is_same::value) { - self_fields_required_precision = 1e-5; + required_precision = 1e-5; } else { - self_fields_required_precision = 1e-11; + required_precision = 1e-11; } - const int self_fields_max_iters = 200; - const int self_fields_verbosity = 2; + const int verbosity = 2; computeVectorPotential( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), m_fields.get_mr_levels_alldirs(FieldType::vector_potential_fp_nodal, finest_level), - self_fields_required_precision, magnetostatic_absolute_tolerance, self_fields_max_iters, - self_fields_verbosity); + required_precision, absolute_tolerance, magnetostatic_solver_max_iters, + verbosity + ); } /* Compute the vector potential `A` by solving the Poisson equation with `J` as diff --git a/Source/WarpX.H b/Source/WarpX.H index 73998d6faf2..56d4f879de8 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -902,6 +902,7 @@ public: // Magnetostatic Solver Interface MagnetostaticSolver::VectorPoissonBoundaryHandler m_vector_poisson_boundary_handler; + int magnetostatic_solver_max_iters = 200; void ComputeMagnetostaticField (); void AddMagnetostaticFieldLabFrame (); void computeVectorPotential (ablastr::fields::MultiLevelVectorField const& curr, diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 965235e1078..75aa964da3a 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -684,6 +684,7 @@ WarpX::ReadParameters () poisson_solver_id!=PoissonSolverAlgo::IntegratedGreenFunction, "To use the FFT Poisson solver, compile with WARPX_USE_FFT=ON."); #endif + utils::parser::queryWithParser(pp_warpx, "self_fields_max_iters", magnetostatic_solver_max_iters); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( ( From c13764504103c556bab87288e851ace44c1d2c4d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 7 Jan 2025 02:01:07 +0000 Subject: [PATCH 133/278] [pre-commit.ci] pre-commit autoupdate (#5539) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.4 → v0.8.6](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.4...v0.8.6) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 55d880c8866..c07ad07f74a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.4 + rev: v0.8.6 hooks: # Run the linter - id: ruff From 1bff387be15deb55585e0ee529581bd7160b3cb9 Mon Sep 17 00:00:00 2001 From: Marco Garten Date: Wed, 8 Jan 2025 09:29:35 -0800 Subject: [PATCH 134/278] Update CMake version for Perlmutter AY25 (#5535) Update CMake as per NERSC automated message upon loading cmake/3.24.3: _"This module is deprecated and scheduled for removal at the end of AY24 (Jan 14, 2025). Please move to cmake/3.30.2."_ --------- Co-authored-by: Axel Huebl --- .../perlmutter-nersc/perlmutter_cpu_warpx.profile.example | 2 +- .../perlmutter-nersc/perlmutter_gpu_warpx.profile.example | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example b/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example index 488d53c6af9..a7493ecd4bc 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example +++ b/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example @@ -7,7 +7,7 @@ if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in yo # required dependencies module load cpu -module load cmake/3.24.3 +module load cmake/3.30.2 module load cray-fftw/3.3.10.6 # optional: for QED support with detailed tables diff --git a/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example b/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example index 7e76d1366a3..5d413db71e1 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example +++ b/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example @@ -12,7 +12,7 @@ module load craype module load craype-x86-milan module load craype-accel-nvidia80 module load cudatoolkit -module load cmake/3.24.3 +module load cmake/3.30.2 # optional: for QED support with detailed tables export BOOST_ROOT=/global/common/software/spackecp/perlmutter/e4s-23.08/default/spack/opt/spack/linux-sles15-zen3/gcc-12.3.0/boost-1.83.0-nxqk3hnci5g3wqv75wvsmuke3w74mzxi From 1813753fa362a22df41fbed3ccbe6e38e0914be4 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 8 Jan 2025 18:30:04 +0100 Subject: [PATCH 135/278] Remove an unnecessary call to WarpX::GetInstance() (#5540) This PR removes an unnecessary call to ` WarpX::GetInstance()` from a member function of the `WarpX` class. --- Source/Parallelization/WarpXRegrid.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/Parallelization/WarpXRegrid.cpp b/Source/Parallelization/WarpXRegrid.cpp index a0a2d4929df..7adc00ed523 100644 --- a/Source/Parallelization/WarpXRegrid.cpp +++ b/Source/Parallelization/WarpXRegrid.cpp @@ -320,7 +320,7 @@ WarpX::ComputeCostsHeuristic (amrex::Vector Date: Wed, 8 Jan 2025 09:30:49 -0800 Subject: [PATCH 136/278] CI: ignore all `.rst` files in the repository (#5523) Following up on #5387, I think we should also ignore all `.rst` files in the repository when we decide whether or not to run the CI workflows. GitHub Actions syntax taken from the examples [here](https://docs.github.com/en/actions/writing-workflows/workflow-syntax-for-github-actions#patterns-to-match-file-paths) (see `'**.js'` example). Azure syntax to be tested. If we merge this before #5522, we can test it (i.e., test that CI is skipped) in #5522 after rebasing there. --- .azure-pipelines.yml | 1 + .github/workflows/clang_sanitizers.yml | 1 + .github/workflows/clang_tidy.yml | 1 + .github/workflows/cuda.yml | 1 + .github/workflows/hip.yml | 1 + .github/workflows/insitu.yml | 1 + .github/workflows/intel.yml | 1 + .github/workflows/macos.yml | 1 + .github/workflows/ubuntu.yml | 1 + .github/workflows/windows.yml | 1 + 10 files changed, 10 insertions(+) diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index d22097a208f..28c4e03d102 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -13,6 +13,7 @@ pr: paths: exclude: - Docs + - '**/*.rst' jobs: - job: diff --git a/.github/workflows/clang_sanitizers.yml b/.github/workflows/clang_sanitizers.yml index d63a329bf64..15dbb00756a 100644 --- a/.github/workflows/clang_sanitizers.yml +++ b/.github/workflows/clang_sanitizers.yml @@ -7,6 +7,7 @@ on: pull_request: paths-ignore: - "Docs/**" + - "**.rst" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-clangsanitizers diff --git a/.github/workflows/clang_tidy.yml b/.github/workflows/clang_tidy.yml index edb3e8b1988..6e83b07000f 100644 --- a/.github/workflows/clang_tidy.yml +++ b/.github/workflows/clang_tidy.yml @@ -7,6 +7,7 @@ on: pull_request: paths-ignore: - "Docs/**" + - "**.rst" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-clangtidy diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index e4967bea790..404f53a3295 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -7,6 +7,7 @@ on: pull_request: paths-ignore: - "Docs/**" + - "**.rst" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-cuda diff --git a/.github/workflows/hip.yml b/.github/workflows/hip.yml index 6ab4e4a8401..f61c8fe1313 100644 --- a/.github/workflows/hip.yml +++ b/.github/workflows/hip.yml @@ -7,6 +7,7 @@ on: pull_request: paths-ignore: - "Docs/**" + - "**.rst" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-hip diff --git a/.github/workflows/insitu.yml b/.github/workflows/insitu.yml index 50b482d28d3..3d3942174a7 100644 --- a/.github/workflows/insitu.yml +++ b/.github/workflows/insitu.yml @@ -7,6 +7,7 @@ on: pull_request: paths-ignore: - "Docs/**" + - "**.rst" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-insituvis diff --git a/.github/workflows/intel.yml b/.github/workflows/intel.yml index 9b98c6e5990..25819e188e3 100644 --- a/.github/workflows/intel.yml +++ b/.github/workflows/intel.yml @@ -7,6 +7,7 @@ on: pull_request: paths-ignore: - "Docs/**" + - "**.rst" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-intel diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 0ddfcf38b41..87482cc6166 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -7,6 +7,7 @@ on: pull_request: paths-ignore: - "Docs/**" + - "**.rst" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-macos diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index bbe20679781..d657daf5793 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -7,6 +7,7 @@ on: pull_request: paths-ignore: - "Docs/**" + - "**.rst" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-ubuntu diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index ae4843e0536..7f964239a02 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -7,6 +7,7 @@ on: pull_request: paths-ignore: - "Docs/**" + - "**.rst" concurrency: group: ${{ github.ref }}-${{ github.head_ref }}-windows From 9fdc6ecab5462d21a17ae943e7f9e0f319daead5 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Wed, 8 Jan 2025 09:31:47 -0800 Subject: [PATCH 137/278] Docs: fix bugs (broken links, missing examples) (#5522) Just fixing a few errors and warnings I found while working on another documentation PR. Mostly broken links and missing examples. Merge #5523 first, see https://github.com/ECP-WarpX/WarpX/pull/5522#issuecomment-2555975093 below. --- Docs/source/developers/checksum.rst | 5 ----- Docs/source/developers/fields.rst | 4 ++-- Docs/source/developers/particles.rst | 2 +- Docs/source/install/hpc/dane.rst | 6 +++--- Docs/source/install/hpc/lawrencium.rst | 2 +- Docs/source/refs.bib | 1 + Docs/source/theory/multiphysics/collisions.rst | 2 +- Docs/source/usage/examples.rst | 8 -------- Docs/source/usage/examples/thomson_parabola_spectrometer | 1 + .../thomson_parabola_spectrometer/README.rst | 2 +- 10 files changed, 11 insertions(+), 22 deletions(-) create mode 120000 Docs/source/usage/examples/thomson_parabola_spectrometer diff --git a/Docs/source/developers/checksum.rst b/Docs/source/developers/checksum.rst index ccbea3408ef..1e71ee3ddae 100644 --- a/Docs/source/developers/checksum.rst +++ b/Docs/source/developers/checksum.rst @@ -22,11 +22,6 @@ This relies on the function ``evaluate_checksum``: .. autofunction:: checksumAPI.evaluate_checksum -Here's an example: - -.. literalinclude:: ../../../Examples/Tests/embedded_circle/analysis.py - :language: python - This can also be included as part of an existing analysis script. How to evaluate checksums from the command line diff --git a/Docs/source/developers/fields.rst b/Docs/source/developers/fields.rst index bd6a886ae2a..ee782570bad 100644 --- a/Docs/source/developers/fields.rst +++ b/Docs/source/developers/fields.rst @@ -119,9 +119,9 @@ Bilinear filter The multi-pass bilinear filter (applied on the current density) is implemented in ``Source/Filter/``, and class ``WarpX`` holds an instance of this class in member variable ``WarpX::bilinear_filter``. For performance reasons (to avoid creating too many guard cells), this filter is directly applied in communication routines, see ``WarpX::AddCurrentFromFineLevelandSumBoundary`` above and -.. doxygenfunction:: WarpX::ApplyFilterMF(const amrex::Vector, 3>> &mfvec, int lev, int idim) +.. doxygenfunction:: WarpX::ApplyFilterMF(const ablastr::fields::MultiLevelVectorField &mfvec, int lev, int idim) -.. doxygenfunction:: WarpX::SumBoundaryJ(const amrex::Vector, 3>> ¤t, int lev, int idim, const amrex::Periodicity &period) +.. doxygenfunction:: WarpX::SumBoundaryJ(const ablastr::fields::MultiLevelVectorField ¤t, int lev, int idim, const amrex::Periodicity &period) Godfrey's anti-NCI filter for FDTD simulations ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/Docs/source/developers/particles.rst b/Docs/source/developers/particles.rst index 1f1e2eab606..45a92107ae9 100644 --- a/Docs/source/developers/particles.rst +++ b/Docs/source/developers/particles.rst @@ -83,7 +83,7 @@ Main functions .. doxygenfunction:: PhysicalParticleContainer::PushPX -.. doxygenfunction:: WarpXParticleContainer::DepositCurrent(amrex::Vector, 3>> &J, amrex::Real dt, amrex::Real relative_time) +.. doxygenfunction:: WarpXParticleContainer::DepositCurrent(ablastr::fields::MultiLevelVectorField const &J, amrex::Real dt, amrex::Real relative_time) .. note:: The current deposition is used both by ``PhysicalParticleContainer`` and ``LaserParticleContainer``, so it is in the parent class ``WarpXParticleContainer``. diff --git a/Docs/source/install/hpc/dane.rst b/Docs/source/install/hpc/dane.rst index 2e0efc99391..9c3c9077df5 100644 --- a/Docs/source/install/hpc/dane.rst +++ b/Docs/source/install/hpc/dane.rst @@ -3,7 +3,7 @@ Dane (LLNL) ============= -The `Dane Intel CPU cluster `_ is located at LLNL. +The `Dane Intel CPU cluster `__ is located at LLNL. Introduction @@ -11,9 +11,9 @@ Introduction If you are new to this system, **please see the following resources**: -* `LLNL user account `__ (login required) * `Jupyter service `__ (`documentation `__, login required) -* `Production directories `_: +* `Production directories `__: * ``/p/lustre1/$(whoami)`` and ``/p/lustre2/$(whoami)``: personal directory on the parallel filesystem * Note that the ``$HOME`` directory and the ``/usr/workspace/$(whoami)`` space are NFS mounted and *not* suitable for production quality data generation. diff --git a/Docs/source/install/hpc/lawrencium.rst b/Docs/source/install/hpc/lawrencium.rst index 2217c5a31ce..f163531a29a 100644 --- a/Docs/source/install/hpc/lawrencium.rst +++ b/Docs/source/install/hpc/lawrencium.rst @@ -69,7 +69,7 @@ And since Lawrencium does not yet provide a module for them, install ADIOS2, BLA cmake -S src/lapackpp -B src/lapackpp-v100-build -DCMAKE_CXX_STANDARD=17 -Dgpu_backend=cuda -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=$HOME/sw/v100/lapackpp-master -Duse_cmake_find_lapack=ON -DBLAS_LIBRARIES=${LAPACK_DIR}/lib/libblas.a -DLAPACK_LIBRARIES=${LAPACK_DIR}/lib/liblapack.a cmake --build src/lapackpp-v100-build --target install --parallel 12 -Optionally, download and install Python packages for :ref:`PICMI ` or dynamic ensemble optimizations (:ref:`libEnsemble `): +Optionally, download and install Python packages for :ref:`PICMI ` or dynamic ensemble optimizations (`libEnsemble `__): .. code-block:: bash diff --git a/Docs/source/refs.bib b/Docs/source/refs.bib index 02251c433d5..d6c81c34404 100644 --- a/Docs/source/refs.bib +++ b/Docs/source/refs.bib @@ -458,6 +458,7 @@ @misc{Fallahi2020 @article{VayFELA2009, title = {FULL ELECTROMAGNETIC SIMULATION OF FREE-ELECTRON LASER AMPLIFIER PHYSICS VIA THE LORENTZ-BOOSTED FRAME APPROACH}, author = {Fawley, William M and Vay, Jean-Luc}, + journal = {}, abstractNote = {Numerical simulation of some systems containing charged particles with highly relativistic directed motion can by speeded up by orders of magnitude by choice of the proper Lorentz-boosted frame[1]. A particularly good example is that of short wavelength free-electron lasers (FELs) in which a high energy electron beam interacts with a static magnetic undulator. In the optimal boost frame with Lorentz factor gamma_F , the red-shifted FEL radiation and blue shifted undulator have identical wavelengths and the number of required time-steps (presuming the Courant condition applies) decreases by a factor of 2(gamma_F)**2 for fully electromagnetic simulation. We have adapted the WARP code [2]to apply this method to several FEL problems involving coherent spontaneous emission (CSE) from pre-bunched ebeams, including that in a biharmonic undulator.}, url = {https://www.osti.gov/biblio/964405}, place = {United States}, diff --git a/Docs/source/theory/multiphysics/collisions.rst b/Docs/source/theory/multiphysics/collisions.rst index 08485345a13..a2b11bf42a2 100644 --- a/Docs/source/theory/multiphysics/collisions.rst +++ b/Docs/source/theory/multiphysics/collisions.rst @@ -131,7 +131,7 @@ The process is also the same as for elastic scattering except the excitation ene Benchmarks ---------- -See the :ref:`MCC example ` for a benchmark of the MCC +See the :ref:`MCC example ` for a benchmark of the MCC implementation against literature results. Particle cooling due to elastic collisions diff --git a/Docs/source/usage/examples.rst b/Docs/source/usage/examples.rst index fa3e674edd3..4ac80a8bab0 100644 --- a/Docs/source/usage/examples.rst +++ b/Docs/source/usage/examples.rst @@ -65,14 +65,6 @@ Microelectronics * `ARTEMIS manual `__ -Nuclear Fusion --------------- - -.. note:: - - TODO - - Fundamental Plasma Physics -------------------------- diff --git a/Docs/source/usage/examples/thomson_parabola_spectrometer b/Docs/source/usage/examples/thomson_parabola_spectrometer new file mode 120000 index 00000000000..8e72fba4100 --- /dev/null +++ b/Docs/source/usage/examples/thomson_parabola_spectrometer @@ -0,0 +1 @@ +../../../../Examples/Physics_applications/thomson_parabola_spectrometer \ No newline at end of file diff --git a/Examples/Physics_applications/thomson_parabola_spectrometer/README.rst b/Examples/Physics_applications/thomson_parabola_spectrometer/README.rst index b033ee8c1dd..10009008714 100644 --- a/Examples/Physics_applications/thomson_parabola_spectrometer/README.rst +++ b/Examples/Physics_applications/thomson_parabola_spectrometer/README.rst @@ -28,7 +28,7 @@ The PICMI input file is not available for this example yet. For `MPI-parallel `__ runs, prefix these lines with ``mpiexec -n 4 ...`` or ``srun -n 4 ...``, depending on the system. -.. literalinclude:: inputs +.. literalinclude:: inputs_test_3d_thomson_parabola_spectrometer :language: ini :caption: You can copy this file from ``Examples/Physics_applications/thomson_parabola_spectrometer/inputs_test_3d_thomson_parabola_spectrometer``. From 499fcb07944cadd6d11358bac3de7142d23b590d Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 8 Jan 2025 11:52:56 -0800 Subject: [PATCH 138/278] Python 3.13 Support, 3.8 EOL (#5361) Add support for Python 3.13. Remove 3.8 because it is EOL as of Oct 2024. Bump to pybind11 2.13.0+, which add Python 3.13 support in CI. --- CMakeLists.txt | 2 +- Docs/source/developers/gnumake/python.rst | 2 +- Docs/source/install/dependencies.rst | 2 +- Python/setup.py | 2 +- cmake/dependencies/pybind11.cmake | 4 ++-- setup.py | 5 +++-- 6 files changed, 9 insertions(+), 8 deletions(-) diff --git a/CMakeLists.txt b/CMakeLists.txt index c7a889633da..ff5d156fb8a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -480,7 +480,7 @@ foreach(D IN LISTS WarpX_DIMS) warpx_enable_IPO(pyWarpX_${SD}) else() # conditionally defined target in pybind11 - # https://github.com/pybind/pybind11/blob/v2.12.0/tools/pybind11Common.cmake#L397-L403 + # https://github.com/pybind/pybind11/blob/v2.13.0/tools/pybind11Common.cmake#L407-L413 target_link_libraries(pyWarpX_${SD} PRIVATE pybind11::lto) endif() endif() diff --git a/Docs/source/developers/gnumake/python.rst b/Docs/source/developers/gnumake/python.rst index 543b80d5ddd..06dbd5ac737 100644 --- a/Docs/source/developers/gnumake/python.rst +++ b/Docs/source/developers/gnumake/python.rst @@ -3,7 +3,7 @@ Installing WarpX as a Python package ==================================== -A full Python installation of WarpX can be done, which includes a build of all of the C++ code, or a pure Python version can be made which only installs the Python scripts. WarpX requires Python version 3.8 or newer. +A full Python installation of WarpX can be done, which includes a build of all of the C++ code, or a pure Python version can be made which only installs the Python scripts. WarpX requires Python version 3.9 or newer. For a full Python installation of WarpX --------------------------------------- diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index 13e2377d568..200677807d7 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -37,7 +37,7 @@ Optional dependencies include: - `SENSEI 4.0.0+ `__: for in situ analysis and visualization - `CCache `__: to speed up rebuilds (For CUDA support, needs version 3.7.9+ and 4.2+ is recommended) - `Ninja `__: for faster parallel compiles -- `Python 3.8+ `__ +- `Python 3.9+ `__ - `mpi4py `__ - `numpy `__ diff --git a/Python/setup.py b/Python/setup.py index fa38e14e7ce..c119917631e 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -71,6 +71,6 @@ description="""Wrapper of WarpX""", package_data=package_data, install_requires=["numpy", "picmistandard==0.33.0", "periodictable"], - python_requires=">=3.8", + python_requires=">=3.8", # left for CI, truly ">=3.9" zip_safe=False, ) diff --git a/cmake/dependencies/pybind11.cmake b/cmake/dependencies/pybind11.cmake index 50b00013f7a..e90b56b2d38 100644 --- a/cmake/dependencies/pybind11.cmake +++ b/cmake/dependencies/pybind11.cmake @@ -37,7 +37,7 @@ function(find_pybind11) mark_as_advanced(FETCHCONTENT_UPDATES_DISCONNECTED_FETCHEDpybind11) endif() else() - find_package(pybind11 2.12.0 CONFIG REQUIRED) + find_package(pybind11 2.13.0 CONFIG REQUIRED) message(STATUS "pybind11: Found version '${pybind11_VERSION}'") endif() endfunction() @@ -52,7 +52,7 @@ option(WarpX_pybind11_internal "Download & build pybind11" ON) set(WarpX_pybind11_repo "https://github.com/pybind/pybind11.git" CACHE STRING "Repository URI to pull and build pybind11 from if(WarpX_pybind11_internal)") -set(WarpX_pybind11_branch "v2.12.0" +set(WarpX_pybind11_branch "v2.13.6" CACHE STRING "Repository branch for WarpX_pybind11_repo if(WarpX_pybind11_internal)") diff --git a/setup.py b/setup.py index 0feb0a710d4..cb98d6371f5 100644 --- a/setup.py +++ b/setup.py @@ -307,7 +307,7 @@ def build_extension(self, ext): cmdclass=cmdclass, # scripts=['warpx_1d', 'warpx_2d', 'warpx_rz', 'warpx_3d'], zip_safe=False, - python_requires=">=3.8", + python_requires=">=3.8", # left for CI, truly ">=3.9" # tests_require=['pytest'], install_requires=install_requires, # see: src/bindings/python/cli @@ -336,10 +336,11 @@ def build_extension(self, ext): "Topic :: Scientific/Engineering :: Physics", "Programming Language :: C++", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", ( "License :: OSI Approved :: " "BSD License" ), # TODO: use real SPDX: BSD-3-Clause-LBNL From a56ca8d91e359ce2bad970b7017813e83a51fb02 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 8 Jan 2025 21:52:47 -0800 Subject: [PATCH 139/278] Release 25.01 (#5544) Prepare the January release of WarpX: ```bash # update dependencies ./Tools/Release/updateAMReX.py ./Tools/Release/updatePICSAR.py # no changes, still 24.09 ./Tools/Release/updatepyAMReX.py # bump version number ./Tools/Release/newVersion.sh ``` Following this workflow: https://warpx.readthedocs.io/en/latest/maintenance/release.html --- .github/workflows/cuda.yml | 2 +- CMakeLists.txt | 2 +- Docs/source/conf.py | 4 +- Docs/source/maintenance/release.rst | 5 +- Python/setup.py | 2 +- Tools/Release/releasePR.py | 200 ++++++++++++++++++++++++++++ cmake/dependencies/AMReX.cmake | 4 +- cmake/dependencies/pyAMReX.cmake | 4 +- setup.py | 2 +- 9 files changed, 214 insertions(+), 11 deletions(-) create mode 100755 Tools/Release/releasePR.py diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 404f53a3295..aa0daa2a718 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -127,7 +127,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach b3f67385e62f387b548389222840486c0fffca57 && cd - + cd ../amrex && git checkout --detach 25.01 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/CMakeLists.txt b/CMakeLists.txt index ff5d156fb8a..90771cbbb29 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ # Preamble #################################################################### # cmake_minimum_required(VERSION 3.24.0) -project(WarpX VERSION 24.12) +project(WarpX VERSION 25.01) include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake) diff --git a/Docs/source/conf.py b/Docs/source/conf.py index e54a6cc23ba..247e11faa4f 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -107,9 +107,9 @@ def __init__(self, *args, **kwargs): # built documents. # # The short X.Y version. -version = "24.12" +version = "25.01" # The full version, including alpha/beta/rc tags. -release = "24.12" +release = "25.01" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/Docs/source/maintenance/release.rst b/Docs/source/maintenance/release.rst index f25b8c313e4..9c6dbcc3f82 100644 --- a/Docs/source/maintenance/release.rst +++ b/Docs/source/maintenance/release.rst @@ -28,7 +28,7 @@ In order to create a GitHub release, you need to: 1. Create a new branch from ``development`` and update the version number in all source files. We usually wait for the AMReX release to be tagged first, then we also point to its tag. - There is a script for updating core dependencies of WarpX and the WarpX version: + There are scripts for updating core dependencies of WarpX and the WarpX version: .. code-block:: sh @@ -42,6 +42,9 @@ In order to create a GitHub release, you need to: Then open a PR, wait for tests to pass and then merge. + The maintainer script ``Tools/Release/releasePR.py`` automates the steps above. + Please read through the instructions in the script before running. + 2. **Local Commit** (Optional): at the moment, ``@ax3l`` is managing releases and signs tags (naming: ``YY.MM``) locally with his GPG key before uploading them to GitHub. **Publish**: On the `GitHub Release page `__, create a new release via ``Draft a new release``. diff --git a/Python/setup.py b/Python/setup.py index c119917631e..a50b467c070 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -65,7 +65,7 @@ setup( name="pywarpx", - version="24.12", + version="25.01", packages=["pywarpx"], package_dir={"pywarpx": "pywarpx"}, description="""Wrapper of WarpX""", diff --git a/Tools/Release/releasePR.py b/Tools/Release/releasePR.py new file mode 100755 index 00000000000..3fd1b016efd --- /dev/null +++ b/Tools/Release/releasePR.py @@ -0,0 +1,200 @@ +#!/usr/bin/env python3 +# +# Copyright 2025 The WarpX Community +# +# This file is part of WarpX. +# +# Authors: Axel Huebl +# + +# This file is a maintainer tool to open a release PR for WarpX. +# It is highly automated and does a few assumptions, e.g., that you +# are releasing for the current month. +# +# You also need to have git and the GitHub CLI tool "gh" installed and properly +# configured for it to work: +# https://cli.github.com/ +# +import subprocess +import sys +from datetime import datetime +from pathlib import Path + +# Maintainer Inputs ########################################################### + +print("""Hi there, this is a WarpX maintainer tool to ...\n. +For it to work, you need write access on the source directory and +you should be working in a clean git branch without ongoing +rebase/merge/conflict resolves and without unstaged changes.""") + +# check source dir +REPO_DIR = Path(__file__).parent.parent.parent.absolute() +print(f"\nYour current source directory is: {REPO_DIR}") + +REPLY = input("Are you sure you want to continue? [y/N] ") +print() +if REPLY not in ["Y", "y"]: + print("You did not confirm with 'y', aborting.") + sys.exit(1) + +release_repo = input("What is the name of your git remote? (e.g., ax3l) ") +commit_sign = input("How to sign the commit? (e.g., -sS) ") + + +# Helpers ##################################################################### + + +def concat_answers(answers): + return "\n".join(answers) + "\n" + + +# Stash current work ########################################################## + +subprocess.run(["git", "stash"], capture_output=True, text=True) + + +# Git Branch ################################################################## + +WarpX_version_yr = f"{datetime.now().strftime('%y')}" +WarpX_version_mn = f"{datetime.now().strftime('%m')}" +WarpX_version = f"{WarpX_version_yr}.{WarpX_version_mn}" +release_branch = f"release-{WarpX_version}" +subprocess.run(["git", "checkout", "development"], capture_output=True, text=True) +subprocess.run(["git", "fetch"], capture_output=True, text=True) +subprocess.run(["git", "pull", "--ff-only"], capture_output=True, text=True) +subprocess.run(["git", "branch", "-D", release_branch], capture_output=True, text=True) +subprocess.run( + ["git", "checkout", "-b", release_branch], capture_output=True, text=True +) + + +# AMReX New Version ########################################################### + +AMReX_version = f"{datetime.now().strftime('%y')}.{datetime.now().strftime('%m')}" +answers = concat_answers(["y", AMReX_version, AMReX_version, "y"]) + +process = subprocess.Popen( + [Path(REPO_DIR).joinpath("Tools/Release/updateAMReX.py")], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, +) + +process.communicate(answers) +del process + +# commit +subprocess.run(["git", "add", "-u"], capture_output=True, text=True) +subprocess.run( + ["git", "commit", commit_sign, "-m", f"AMReX: {AMReX_version}"], text=True +) + + +# PICSAR New Version ########################################################## + +PICSAR_version = "24.09" +answers = concat_answers(["y", PICSAR_version, PICSAR_version, "y"]) + +process = subprocess.Popen( + [Path(REPO_DIR).joinpath("Tools/Release/updatePICSAR.py")], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, +) + +process.communicate(answers) +del process + +# commit +subprocess.run(["git", "add", "-u"], capture_output=True, text=True) +subprocess.run( + ["git", "commit", commit_sign, "-m", f"PICSAR: {PICSAR_version}"], text=True +) + + +# pyAMReX New Version ######################################################### + +pyAMReX_version = f"{datetime.now().strftime('%y')}.{datetime.now().strftime('%m')}" +answers = concat_answers(["y", pyAMReX_version, pyAMReX_version, "y"]) + +process = subprocess.Popen( + [Path(REPO_DIR).joinpath("Tools/Release/updatepyAMReX.py")], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, +) + +process.communicate(answers) +del process + +# commit +subprocess.run(["git", "add", "-u"], capture_output=True, text=True) +subprocess.run( + ["git", "commit", commit_sign, "-m", f"pyAMReX: {pyAMReX_version}"], text=True +) + + +# WarpX New Version ########################################################### + +answers = concat_answers(["y", WarpX_version_yr, WarpX_version_mn, "", "", "y"]) + +process = subprocess.Popen( + [Path(REPO_DIR).joinpath("Tools/Release/newVersion.sh")], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, +) + +process.communicate(answers) +del process + +# commit +subprocess.run(["git", "add", "-u"], capture_output=True, text=True) +subprocess.run( + ["git", "commit", commit_sign, "-m", f"WarpX: {WarpX_version}"], text=True +) + + +# GitHub PR ################################################################### + +subprocess.run(["git", "push", "-u", release_repo, release_branch], text=True) + +subprocess.run( + [ + "gh", + "pr", + "create", + "--title", + f"Release {WarpX_version}", + "--body", + f"""Prepare the {datetime.now().strftime('%B')} release of WarpX: +```bash +# update dependencies +./Tools/Release/updateAMReX.py +./Tools/Release/updatePICSAR.py # no changes, still {PICSAR_version} +./Tools/Release/updatepyAMReX.py +# bump version number +./Tools/Release/newVersion.sh +``` + +Following this workflow: https://warpx.readthedocs.io/en/latest/maintenance/release.html +""", + "--label", + "component: documentation", + "--label", + "component: third party", + "--web", + ], + text=True, +) + + +# Epilogue #################################################################### + +print("""Done. Please check your source, e.g. via + git diff +now and commit the changes if no errors occurred.""") diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 0066a3103cd..88df1f82fe8 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -271,7 +271,7 @@ macro(find_amrex) endif() set(COMPONENT_PRECISION ${WarpX_PRECISION} P${WarpX_PARTICLE_PRECISION}) - find_package(AMReX 24.12 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} ${COMPONENT_FFT} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) + find_package(AMReX 25.01 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} ${COMPONENT_FFT} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) # note: TINYP skipped because user-configured and optional # AMReX CMake helper scripts @@ -294,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "b3f67385e62f387b548389222840486c0fffca57" +set(WarpX_amrex_branch "25.01" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index 93c4cc63e5a..777b75e2ed3 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -59,7 +59,7 @@ function(find_pyamrex) endif() elseif(NOT WarpX_pyamrex_internal) # TODO: MPI control - find_package(pyAMReX 24.12 CONFIG REQUIRED) + find_package(pyAMReX 25.01 CONFIG REQUIRED) message(STATUS "pyAMReX: Found version '${pyAMReX_VERSION}'") endif() endfunction() @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "cba1ca5098fd4edc83b2ae630c0391140fac55f4" +set(WarpX_pyamrex_branch "25.01" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") diff --git a/setup.py b/setup.py index cb98d6371f5..c3f2e730726 100644 --- a/setup.py +++ b/setup.py @@ -280,7 +280,7 @@ def build_extension(self, ext): setup( name="pywarpx", # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version="24.12", + version="25.01", packages=["pywarpx"], package_dir={"pywarpx": "Python/pywarpx"}, author="Jean-Luc Vay, David P. Grote, Maxence Thévenet, Rémi Lehe, Andrew Myers, Weiqun Zhang, Axel Huebl, et al.", From 09ab371649e35fd8f17c2f1c6f59613e035c5d5b Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Thu, 9 Jan 2025 18:52:19 +0100 Subject: [PATCH 140/278] Make `do_subcycling` a private variable of the WarpX class (#5546) This PR makes `do_subcycling` (renamed `m_do_subcycling`) a private member variable of the `WarpX` class. This is part of the effort towards making WarpX class less static. --- Source/Evolve/WarpXComputeDt.cpp | 2 +- Source/Evolve/WarpXEvolve.cpp | 6 +++--- Source/Initialization/WarpXInitData.cpp | 2 +- Source/WarpX.H | 4 +++- Source/WarpX.cpp | 11 +++++------ 5 files changed, 13 insertions(+), 12 deletions(-) diff --git a/Source/Evolve/WarpXComputeDt.cpp b/Source/Evolve/WarpXComputeDt.cpp index b82cb6aff26..9645f7edbe2 100644 --- a/Source/Evolve/WarpXComputeDt.cpp +++ b/Source/Evolve/WarpXComputeDt.cpp @@ -94,7 +94,7 @@ WarpX::ComputeDt () dt.resize(0); dt.resize(max_level+1,deltat); - if (do_subcycling) { + if (m_do_subcycling) { for (int lev = max_level-1; lev >= 0; --lev) { dt[lev] = dt[lev+1] * refRatio(lev)[0]; } diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 163138ca572..c9e363879a6 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -187,7 +187,7 @@ WarpX::Evolve (int numsteps) OneStep_multiJ(cur_time); } // Electromagnetic case: no subcycling or no mesh refinement - else if ( !do_subcycling || (finest_level == 0)) + else if ( !m_do_subcycling || (finest_level == 0)) { OneStep_nosub(cur_time); // E: guard cells are up-to-date @@ -195,14 +195,14 @@ WarpX::Evolve (int numsteps) // F: guard cells are NOT up-to-date } // Electromagnetic case: subcycling with one level of mesh refinement - else if (do_subcycling && (finest_level == 1)) + else if (m_do_subcycling && (finest_level == 1)) { OneStep_sub1(cur_time); } else { WARPX_ABORT_WITH_MESSAGE( - "do_subcycling = " + std::to_string(do_subcycling) + "do_subcycling = " + std::to_string(m_do_subcycling) + " is an unsupported do_subcycling type."); } diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index daecfac8bed..71c773c0669 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -850,7 +850,7 @@ WarpX::computeMaxStepBoostAccelerator() { const Real interaction_time_boost = (len_plasma_boost-zmin_domain_boost_step_0)/ (moving_window_v-v_plasma_boost); // Divide by dt, and update value of max_step. - const auto computed_max_step = (do_subcycling)? + const auto computed_max_step = (m_do_subcycling)? static_cast(interaction_time_boost/dt[0]): static_cast(interaction_time_boost/dt[maxLevel()]); max_step = computed_max_step; diff --git a/Source/WarpX.H b/Source/WarpX.H index 56d4f879de8..2c949cb514a 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -391,7 +391,6 @@ public: //! Specifies the type of grid used for the above sorting, i.e. cell-centered, nodal, or mixed static amrex::IntVect sort_idx_type; - static bool do_subcycling; static bool do_multi_J; static int do_multi_J_n_depositions; @@ -1454,6 +1453,9 @@ private: std::optional m_const_dt; std::optional m_max_dt; + // whether to use subcycling + bool m_do_subcycling = false; + // Macroscopic properties std::unique_ptr m_macroscopic_properties; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 75aa964da3a..3f53decbb83 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -179,7 +179,6 @@ amrex::IntVect WarpX::sort_idx_type(AMREX_D_DECL(0,0,0)); bool WarpX::do_dynamic_scheduling = true; -bool WarpX::do_subcycling = false; bool WarpX::do_multi_J = false; int WarpX::do_multi_J_n_depositions; bool WarpX::safe_guard_cells = false; @@ -448,7 +447,7 @@ WarpX::WarpX () // (e.g., use_fdtd_nci_corr) if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { AMREX_ALWAYS_ASSERT(use_fdtd_nci_corr == 0); - AMREX_ALWAYS_ASSERT(do_subcycling == 0); + AMREX_ALWAYS_ASSERT(m_do_subcycling == 0); } if (WarpX::current_deposition_algo != CurrentDepositionAlgo::Esirkepov) { @@ -622,7 +621,7 @@ WarpX::ReadParameters () utils::parser::queryWithParser(pp_warpx, "cfl", cfl); pp_warpx.query("verbose", verbose); utils::parser::queryWithParser(pp_warpx, "regrid_int", regrid_int); - pp_warpx.query("do_subcycling", do_subcycling); + pp_warpx.query("do_subcycling", m_do_subcycling); pp_warpx.query("do_multi_J", do_multi_J); if (do_multi_J) { @@ -636,7 +635,7 @@ WarpX::ReadParameters () override_sync_intervals = utils::parser::IntervalsParser(override_sync_intervals_string_vec); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(do_subcycling != 1 || max_level <= 1, + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_do_subcycling != 1 || max_level <= 1, "Subcycling method 1 only works for 2 levels."); ReadBoostedFrameParameters(gamma_boost, beta_boost, boost_direction); @@ -2045,7 +2044,7 @@ WarpX::AllocLevelData (int lev, const BoxArray& ba, const DistributionMapping& d guard_cells.Init( dt[lev], dx, - do_subcycling, + m_do_subcycling, WarpX::use_fdtd_nci_corr, grid_type, do_moving_window, @@ -2408,7 +2407,7 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm ncomps, ngPhi, 0.0_rt ); } - if (do_subcycling && lev == 0) + if (m_do_subcycling && lev == 0) { m_fields.alloc_init(FieldType::current_store, Direction{0}, lev, amrex::convert(ba,jx_nodal_flag), dm, ncomps, ngJ, 0.0_rt); m_fields.alloc_init(FieldType::current_store, Direction{1}, lev, amrex::convert(ba,jy_nodal_flag), dm, ncomps, ngJ, 0.0_rt); From 17b0c3d1667493f724e3923c3acadd0c47bf08e5 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 9 Jan 2025 13:53:04 -0800 Subject: [PATCH 141/278] CMake: ABLASTR FFT AMReX (#5548) Complete the control for AMReX_FFT through pure ABLASTR super-builds. --- cmake/dependencies/AMReX.cmake | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 88df1f82fe8..82d56d98a5f 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -51,7 +51,7 @@ macro(find_amrex) set(AMReX_OMP OFF CACHE INTERNAL "") endif() - if(WarpX_FFT) + if(WarpX_FFT OR ABLASTR_FFT) set(AMReX_FFT ON CACHE INTERNAL "") else() set(AMReX_FFT OFF CACHE INTERNAL "") From 16347e6f9aaa95fc28211c21ece913e2d0b7e0a1 Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Fri, 10 Jan 2025 00:04:40 -0800 Subject: [PATCH 142/278] python callback at restart (#5549) This PR adds a python callback option after initialization at restart, enabling users to read user-defined checkpoint vars that could be written using the python call afterdiagnostics that already exists --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Python/pywarpx/callbacks.py | 11 +++++++++++ Source/Initialization/WarpXInitData.cpp | 3 +++ 2 files changed, 14 insertions(+) diff --git a/Python/pywarpx/callbacks.py b/Python/pywarpx/callbacks.py index 45c9b63a082..d12293e699b 100644 --- a/Python/pywarpx/callbacks.py +++ b/Python/pywarpx/callbacks.py @@ -280,6 +280,7 @@ def callfuncsinlist(self, *args, **kw): "loadExternalFields": {}, "beforeInitEsolve": {}, "afterInitEsolve": {}, + "afterInitatRestart": {}, "afterinit": {}, "beforecollisions": {}, "aftercollisions": {}, @@ -406,6 +407,16 @@ def installafterInitEsolve(f): installcallback("afterInitEsolve", f) +# ---------------------------------------------------------------------------- +def callfromafterInitatRestart(f): + installcallback("afterInitatRestart", f) + return f + + +def installafterInitatRestart(f): + installcallback("afterInitatRestart", f) + + # ---------------------------------------------------------------------------- def callfromafterinit(f): installcallback("afterinit", f) diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 71c773c0669..5de8912be6a 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -610,6 +610,9 @@ WarpX::InitData () AddExternalFields(lev); } } + else { + ExecutePythonCallback("afterInitatRestart"); + } if (restart_chkfile.empty() || write_diagnostics_on_restart) { // Write full diagnostics before the first iteration. From 555f4351a0c53b2b36ebdce641d8f3c52c71c509 Mon Sep 17 00:00:00 2001 From: Thomas Marks Date: Fri, 10 Jan 2025 19:42:58 -0500 Subject: [PATCH 143/278] Python: useful error when `initialize_warpx` not called before creating `ParticleContainerWrapper` (#5412) Currently, WarpX is initialized when `sim.step` is called, or when the user calls `initialize_warpx`. However, if the user tries to create a `ParticleContainerWrapper` before this point, they get an error along the following lines: ``` File "/home/marksta/projects/warpx-ionization/picmi.py", line 185, in sim.run() File "/home/marksta/projects/warpx-ionization/picmi.py", line 179, in run self.elec_wrapper = particle_containers.ParticleContainerWrapper(self.electrons.name) File "/home/marksta/.local/lib/python3.10/site-packages/pywarpx/particle_containers.py", line 29, in __init__ mypc = libwarpx.warpx.multi_particle_container() File "/home/marksta/.local/lib/python3.10/site-packages/pywarpx/_libwarpx.py", line 46, in __getattr__ return self.__getattribute__(attribute) AttributeError: 'LibWarpX' object has no attribute 'warpx' ``` This is confusing. When I got this, I assumed I had maybe installed WarpX wrong. I added a catch for this exception that re-raises it with some additional context that should help direct the user to call `initialize_warpx` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Python/pywarpx/particle_containers.py | 30 +++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/Python/pywarpx/particle_containers.py b/Python/pywarpx/particle_containers.py index bc6b2d74106..db5dfda883e 100644 --- a/Python/pywarpx/particle_containers.py +++ b/Python/pywarpx/particle_containers.py @@ -24,10 +24,21 @@ class ParticleContainerWrapper(object): def __init__(self, species_name): self.name = species_name + self._particle_container = None + + @property + def particle_container(self): + if self._particle_container is None: + try: + mypc = libwarpx.warpx.multi_particle_container() + self._particle_container = mypc.get_particle_container_from_name( + self.name + ) + except AttributeError as e: + msg = "This is likely caused by attempting to access a ParticleContainerWrapper before initialize_warpx has been called" + raise AttributeError(msg) from e - # grab the desired particle container - mypc = libwarpx.warpx.multi_particle_container() - self.particle_container = mypc.get_particle_container_from_name(self.name) + return self._particle_container def add_particles( self, @@ -758,7 +769,18 @@ class ParticleBoundaryBufferWrapper(object): """ def __init__(self): - self.particle_buffer = libwarpx.warpx.get_particle_boundary_buffer() + self._particle_buffer = None + + @property + def particle_buffer(self): + if self._particle_buffer is None: + try: + self._particle_buffer = libwarpx.warpx.get_particle_boundary_buffer() + except AttributeError as e: + msg = "This is likely caused by attempting to access a ParticleBoundaryBufferWrapper before initialize_warpx has been called" + raise AttributeError(msg) from e + + return self._particle_buffer def get_particle_boundary_buffer_size(self, species_name, boundary, local=False): """ From 2ec6c1364986718c239a6cfe8a2b5ed313c70d88 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 14 Jan 2025 00:58:14 +0000 Subject: [PATCH 144/278] [pre-commit.ci] pre-commit autoupdate (#5553) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.8.6 → v0.9.1](https://github.com/astral-sh/ruff-pre-commit/compare/v0.8.6...v0.9.1) --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- .../ml_materials/run_warpx_training.py | 2 +- .../usage/workflows/ml_materials/train.py | 2 +- .../inputs_base_1d_picmi.py | 2 +- .../laser_acceleration/analysis_openpmd_rz.py | 24 +++++----- .../Physics_applications/laser_ion/plot_2d.py | 4 +- .../spacecraft_charging/analysis.py | 6 +-- .../Tests/accelerator_lattice/analysis.py | 6 ++- Examples/Tests/boundaries/analysis.py | 12 ++--- .../analysis.py | 2 +- ...effective_potential_electrostatic_picmi.py | 6 +-- ...test_2d_ohm_solver_landau_damping_picmi.py | 2 +- ...nputs_test_1d_ohm_solver_ion_beam_picmi.py | 2 +- ..._ohm_solver_magnetic_reconnection_picmi.py | 14 +++--- .../particle_boundary_interaction/analysis.py | 6 +-- .../Tests/pass_mpi_communicator/analysis.py | 6 +-- Examples/Tests/plasma_lens/analysis.py | 12 +++-- Python/pywarpx/fields.py | 4 +- Python/pywarpx/particle_containers.py | 48 +++++++++---------- Python/pywarpx/picmi.py | 16 +++---- Regression/Checksum/checksum.py | 3 +- .../post_processing_utils.py | 2 +- Tools/Algorithms/psatd.ipynb | 6 +-- Tools/Algorithms/stencil.py | 4 +- Tools/Release/releasePR.py | 2 +- setup.py | 2 +- 26 files changed, 100 insertions(+), 97 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c07ad07f74a..9279bcd038d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.8.6 + rev: v0.9.1 hooks: # Run the linter - id: ruff diff --git a/Docs/source/usage/workflows/ml_materials/run_warpx_training.py b/Docs/source/usage/workflows/ml_materials/run_warpx_training.py index 9e6b5682ec7..9a246de1cc2 100644 --- a/Docs/source/usage/workflows/ml_materials/run_warpx_training.py +++ b/Docs/source/usage/workflows/ml_materials/run_warpx_training.py @@ -260,7 +260,7 @@ def get_laser(antenna_z, profile_t_peak, fill_in=True): diag_particle_list = ["weighting", "position", "momentum"] coarse_btd_end = int((L_plasma_bulk + 0.001 + stage_spacing * (N_stage - 1)) * 100000) stage_end_snapshots = [ - f"{int((L_plasma_bulk+stage_spacing*ii)*100000)}:{int((L_plasma_bulk+stage_spacing*ii)*100000+50)}:5" + f"{int((L_plasma_bulk + stage_spacing * ii) * 100000)}:{int((L_plasma_bulk + stage_spacing * ii) * 100000 + 50)}:5" for ii in range(1) ] btd_particle_diag = picmi.LabFrameParticleDiagnostic( diff --git a/Docs/source/usage/workflows/ml_materials/train.py b/Docs/source/usage/workflows/ml_materials/train.py index 957a652e0c4..35b02c5cd44 100644 --- a/Docs/source/usage/workflows/ml_materials/train.py +++ b/Docs/source/usage/workflows/ml_materials/train.py @@ -180,7 +180,7 @@ def test_dataset(model, test_source, test_target, loss_fun): ) # Manual: Training loop END t4 = time.time() -print(f"total training time: {t4-t3:.3f}s") +print(f"total training time: {t4 - t3:.3f}s") ######### save model ######### diff --git a/Examples/Physics_applications/capacitive_discharge/inputs_base_1d_picmi.py b/Examples/Physics_applications/capacitive_discharge/inputs_base_1d_picmi.py index 3de88f3b3cb..a03cf1954ad 100644 --- a/Examples/Physics_applications/capacitive_discharge/inputs_base_1d_picmi.py +++ b/Examples/Physics_applications/capacitive_discharge/inputs_base_1d_picmi.py @@ -423,7 +423,7 @@ def run_sim(self): assert hasattr(self.solver, "phi") if libwarpx.amr.ParallelDescriptor.MyProc() == 0: - np.save(f"ion_density_case_{self.n+1}.npy", self.ion_density_array) + np.save(f"ion_density_case_{self.n + 1}.npy", self.ion_density_array) # query the particle z-coordinates if this is run during CI testing # to cover that functionality diff --git a/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py b/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py index f136ffeb1d4..1449e54d8ee 100755 --- a/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py +++ b/Examples/Physics_applications/laser_acceleration/analysis_openpmd_rz.py @@ -20,15 +20,15 @@ # this is in C (Python) order; r is the fastest varying index (Nm, Nz, Nr) = jt.shape -assert ( - Nm == 3 -), "Wrong number of angular modes stored or possible incorrect ordering when flushed" -assert ( - Nr == 64 -), "Wrong number of radial points stored or possible incorrect ordering when flushed" -assert ( - Nz == 512 -), "Wrong number of z points stored or possible incorrect ordering when flushed" +assert Nm == 3, ( + "Wrong number of angular modes stored or possible incorrect ordering when flushed" +) +assert Nr == 64, ( + "Wrong number of radial points stored or possible incorrect ordering when flushed" +) +assert Nz == 512, ( + "Wrong number of z points stored or possible incorrect ordering when flushed" +) assert ii.meshes["part_per_grid"][io.Mesh_Record_Component.SCALAR].shape == [ 512, @@ -60,6 +60,6 @@ electron_meanz = np.sum(np.dot(zlist, rhoe0)) / np.sum(rhoe0) beam_meanz = np.sum(np.dot(zlist, rhob0)) / np.sum(rhob0) -assert ( - (electron_meanz > 0) and (beam_meanz < 0) -), "problem with openPMD+RZ. Maybe openPMD+RZ mixed up the order of rho_ diagnostics?" +assert (electron_meanz > 0) and (beam_meanz < 0), ( + "problem with openPMD+RZ. Maybe openPMD+RZ mixed up the order of rho_ diagnostics?" +) diff --git a/Examples/Physics_applications/laser_ion/plot_2d.py b/Examples/Physics_applications/laser_ion/plot_2d.py index b3aefb80606..87b2d76c8f7 100644 --- a/Examples/Physics_applications/laser_ion/plot_2d.py +++ b/Examples/Physics_applications/laser_ion/plot_2d.py @@ -120,7 +120,7 @@ def visualize_density_iteration(ts, iteration, out_dir): for ax in axs[:-1]: ax.set_xticklabels([]) axs[2].set_xlabel(r"$z$ ($\mu$m)") - fig.suptitle(f"Iteration: {it}, Time: {time/1e-15:.1f} fs") + fig.suptitle(f"Iteration: {it}, Time: {time / 1e-15:.1f} fs") plt.tight_layout() @@ -190,7 +190,7 @@ def visualize_field_iteration(ts, iteration, out_dir): for ax in axs[:-1]: ax.set_xticklabels([]) axs[2].set_xlabel(r"$z$ ($\mu$m)") - fig.suptitle(f"Iteration: {it}, Time: {time/1e-15:.1f} fs") + fig.suptitle(f"Iteration: {it}, Time: {time / 1e-15:.1f} fs") plt.tight_layout() diff --git a/Examples/Physics_applications/spacecraft_charging/analysis.py b/Examples/Physics_applications/spacecraft_charging/analysis.py index 8e13657b62e..9e4b9e8219f 100755 --- a/Examples/Physics_applications/spacecraft_charging/analysis.py +++ b/Examples/Physics_applications/spacecraft_charging/analysis.py @@ -68,6 +68,6 @@ def func(x, v0, tau): print("percentage error for v0 = " + str(diff_v0 * 100) + "%") print("percentage error for tau = " + str(diff_tau * 100) + "%") -assert (diff_v0 < tolerance_v0) and ( - diff_tau < tolerance_tau -), "Test spacecraft_charging did not pass" +assert (diff_v0 < tolerance_v0) and (diff_tau < tolerance_tau), ( + "Test spacecraft_charging did not pass" +) diff --git a/Examples/Tests/accelerator_lattice/analysis.py b/Examples/Tests/accelerator_lattice/analysis.py index f53d54cbe12..331c5322e03 100755 --- a/Examples/Tests/accelerator_lattice/analysis.py +++ b/Examples/Tests/accelerator_lattice/analysis.py @@ -118,9 +118,11 @@ def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): xx = xx + dt * vx # Compare the analytic to the simulated final values -print(f"Error in x position is {abs(np.abs((xx - xx_sim)/xx))}, which should be < 0.01") print( - f"Error in x velocity is {abs(np.abs((ux - ux_sim)/ux))}, which should be < 0.002" + f"Error in x position is {abs(np.abs((xx - xx_sim) / xx))}, which should be < 0.01" +) +print( + f"Error in x velocity is {abs(np.abs((ux - ux_sim) / ux))}, which should be < 0.002" ) assert abs(np.abs((xx - xx_sim) / xx)) < 0.01, Exception("error in x particle position") diff --git a/Examples/Tests/boundaries/analysis.py b/Examples/Tests/boundaries/analysis.py index 9630c07d0ab..29de2bb37cb 100755 --- a/Examples/Tests/boundaries/analysis.py +++ b/Examples/Tests/boundaries/analysis.py @@ -101,9 +101,9 @@ def do_periodic(x): assert len(a_id) == 1, "Absorbing particles not absorbed" assert np.all(vx == -vx0), "Reflecting particle velocity not correct" assert np.all(vz == +vz0), "Periodic particle velocity not correct" -assert np.all( - np.abs((xx - xxa) / xx) < 1.0e-15 -), "Reflecting particle position not correct" -assert np.all( - np.abs((zz - zza) / zz) < 1.0e-15 -), "Periodic particle position not correct" +assert np.all(np.abs((xx - xxa) / xx) < 1.0e-15), ( + "Reflecting particle position not correct" +) +assert np.all(np.abs((zz - zza) / zz) < 1.0e-15), ( + "Periodic particle position not correct" +) diff --git a/Examples/Tests/effective_potential_electrostatic/analysis.py b/Examples/Tests/effective_potential_electrostatic/analysis.py index b51cd129252..20998e0a066 100755 --- a/Examples/Tests/effective_potential_electrostatic/analysis.py +++ b/Examples/Tests/effective_potential_electrostatic/analysis.py @@ -66,7 +66,7 @@ def get_radial_function(field, info): ) plt.plot(r_grid, n_e_analytic, "k--", alpha=0.6) - plt.plot(r_grid, n_e, label=f"t = {ts.t[ii]*1e6:.2f} $\mu$s") + plt.plot(r_grid, n_e, label=f"t = {ts.t[ii] * 1e6:.2f} $\mu$s") print("RMS error (%) in density: ", rms_errors) assert np.all(rms_errors < 0.05) diff --git a/Examples/Tests/effective_potential_electrostatic/inputs_test_3d_effective_potential_electrostatic_picmi.py b/Examples/Tests/effective_potential_electrostatic/inputs_test_3d_effective_potential_electrostatic_picmi.py index 27b1728b7b2..018739aa682 100644 --- a/Examples/Tests/effective_potential_electrostatic/inputs_test_3d_effective_potential_electrostatic_picmi.py +++ b/Examples/Tests/effective_potential_electrostatic/inputs_test_3d_effective_potential_electrostatic_picmi.py @@ -102,13 +102,13 @@ print( f"Plasma parameters:\n" f"\tlambda_e = {lambda_e:.1e} m\n" - f"\tt_pe = {1.0/f_pe:.1e} s\n" + f"\tt_pe = {1.0 / f_pe:.1e} s\n" f"\tv_ti = {v_ti:.1e} m/s\n" ) print( f"Numerical parameters:\n" - f"\tdz/lambda_e = {dz/lambda_e:.2f}\n" - f"\tdt*w_pe = {dt*f_pe*2.0*np.pi:.2f}\n" + f"\tdz/lambda_e = {dz / lambda_e:.2f}\n" + f"\tdt*w_pe = {dt * f_pe * 2.0 * np.pi:.2f}\n" f"\tdiag steps = {diag_steps:d}\n" f"\ttotal steps = {total_steps:d}\n" ) diff --git a/Examples/Tests/ohm_solver_ion_Landau_damping/inputs_test_2d_ohm_solver_landau_damping_picmi.py b/Examples/Tests/ohm_solver_ion_Landau_damping/inputs_test_2d_ohm_solver_landau_damping_picmi.py index 7c1709d059f..320d36785db 100644 --- a/Examples/Tests/ohm_solver_ion_Landau_damping/inputs_test_2d_ohm_solver_landau_damping_picmi.py +++ b/Examples/Tests/ohm_solver_ion_Landau_damping/inputs_test_2d_ohm_solver_landau_damping_picmi.py @@ -93,7 +93,7 @@ def __init__(self, test, dim, m, T_ratio, verbose): if comm.rank == 0: print( f"Initializing simulation with input parameters:\n" - f"\tT = {self.T_plasma*1e-3:.1f} keV\n" + f"\tT = {self.T_plasma * 1e-3:.1f} keV\n" f"\tn = {self.n_plasma:.1e} m^-3\n" f"\tB0 = {self.B0:.2f} T\n" f"\tM/m = {self.m_ion:.0f}\n" diff --git a/Examples/Tests/ohm_solver_ion_beam_instability/inputs_test_1d_ohm_solver_ion_beam_picmi.py b/Examples/Tests/ohm_solver_ion_beam_instability/inputs_test_1d_ohm_solver_ion_beam_picmi.py index 19569a04e5b..52160038831 100644 --- a/Examples/Tests/ohm_solver_ion_beam_instability/inputs_test_1d_ohm_solver_ion_beam_picmi.py +++ b/Examples/Tests/ohm_solver_ion_beam_instability/inputs_test_1d_ohm_solver_ion_beam_picmi.py @@ -110,7 +110,7 @@ def __init__(self, test, dim, resonant, verbose): if comm.rank == 0: print( f"Initializing simulation with input parameters:\n" - f"\tT = {self.T_plasma*1e-3:.1f} keV\n" + f"\tT = {self.T_plasma * 1e-3:.1f} keV\n" f"\tn = {self.n_plasma:.1e} m^-3\n" f"\tB0 = {self.B0:.2f} T\n" f"\tM/m = {self.m_ion:.0f}\n" diff --git a/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py b/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py index f074c81cbb3..2ddbd0df93d 100644 --- a/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py +++ b/Examples/Tests/ohm_solver_magnetic_reconnection/inputs_test_2d_ohm_solver_magnetic_reconnection_picmi.py @@ -83,14 +83,14 @@ def __init__(self, test, verbose): self.Bg *= self.B0 self.dB *= self.B0 self.Bx = ( - f"{self.B0}*tanh(z*{1.0/self.l_i})" - f"+{-self.dB*self.Lx/(2.0*self.Lz)}*cos({2.0*np.pi/self.Lx}*x)" - f"*sin({np.pi/self.Lz}*z)" + f"{self.B0}*tanh(z*{1.0 / self.l_i})" + f"+{-self.dB * self.Lx / (2.0 * self.Lz)}*cos({2.0 * np.pi / self.Lx}*x)" + f"*sin({np.pi / self.Lz}*z)" ) self.By = ( - f"sqrt({self.Bg**2 + self.B0**2}-" f"({self.B0}*tanh(z*{1.0/self.l_i}))**2)" + f"sqrt({self.Bg**2 + self.B0**2}-({self.B0}*tanh(z*{1.0 / self.l_i}))**2)" ) - self.Bz = f"{self.dB}*sin({2.0*np.pi/self.Lx}*x)*cos({np.pi/self.Lz}*z)" + self.Bz = f"{self.dB}*sin({2.0 * np.pi / self.Lx}*x)*cos({np.pi / self.Lz}*z)" self.J0 = self.B0 / constants.mu0 / self.l_i @@ -103,7 +103,7 @@ def __init__(self, test, verbose): if comm.rank == 0: print( f"Initializing simulation with input parameters:\n" - f"\tTi = {self.Ti*1e-3:.1f} keV\n" + f"\tTi = {self.Ti * 1e-3:.1f} keV\n" f"\tn0 = {self.n_plasma:.1e} m^-3\n" f"\tB0 = {self.B0:.2f} T\n" f"\tM/m = {self.m_ion:.0f}\n" @@ -117,7 +117,7 @@ def __init__(self, test, verbose): ) print( f"Numerical parameters:\n" - f"\tdz = {self.Lz/self.NZ:.1e} m\n" + f"\tdz = {self.Lz / self.NZ:.1e} m\n" f"\tdt = {self.dt:.1e} s\n" f"\tdiag steps = {self.diag_steps:d}\n" f"\ttotal steps = {self.total_steps:d}\n" diff --git a/Examples/Tests/particle_boundary_interaction/analysis.py b/Examples/Tests/particle_boundary_interaction/analysis.py index 062569d5553..edf9d463f98 100755 --- a/Examples/Tests/particle_boundary_interaction/analysis.py +++ b/Examples/Tests/particle_boundary_interaction/analysis.py @@ -43,6 +43,6 @@ print("percentage error for x = %5.4f %%" % (diff_x * 100)) print("percentage error for z = %5.4f %%" % (diff_z * 100)) -assert ( - (diff_x < tolerance) and (y[0] < 1e-8) and (diff_z < tolerance) -), "Test particle_boundary_interaction did not pass" +assert (diff_x < tolerance) and (y[0] < 1e-8) and (diff_z < tolerance), ( + "Test particle_boundary_interaction did not pass" +) diff --git a/Examples/Tests/pass_mpi_communicator/analysis.py b/Examples/Tests/pass_mpi_communicator/analysis.py index 041687c4775..cfac572c1b9 100755 --- a/Examples/Tests/pass_mpi_communicator/analysis.py +++ b/Examples/Tests/pass_mpi_communicator/analysis.py @@ -37,7 +37,7 @@ # Dictionaries have same outer keys (levels, species)? if checksum1.data.keys() != checksum2.data.keys(): - print("ERROR: plotfile 1 and plotfile 2 checksums " "have different outer keys:") + print("ERROR: plotfile 1 and plotfile 2 checksums have different outer keys:") print("Plot1: %s" % checksum1.data.keys()) print("Plot2: %s" % checksum2.data.keys()) sys.exit(1) @@ -45,9 +45,7 @@ # Dictionaries have same inner keys (field and particle quantities)? for key1 in checksum1.data.keys(): if checksum1.data[key1].keys() != checksum2.data[key1].keys(): - print( - "ERROR: plotfile 1 and plotfile 2 checksums have " "different inner keys:" - ) + print("ERROR: plotfile 1 and plotfile 2 checksums have different inner keys:") print("Common outer keys: %s" % checksum2.data.keys()) print("Plotfile 1 inner keys in %s: %s" % (key1, checksum1.data[key1].keys())) print("Plotfile 2 inner keys in %s: %s" % (key1, checksum2.data[key1].keys())) diff --git a/Examples/Tests/plasma_lens/analysis.py b/Examples/Tests/plasma_lens/analysis.py index 44671eea791..ed7ee653af6 100755 --- a/Examples/Tests/plasma_lens/analysis.py +++ b/Examples/Tests/plasma_lens/analysis.py @@ -160,13 +160,17 @@ def applylens(x0, vx0, vz0, gamma, lens_length, lens_strength): xx = xx + dt0 * vx yy = yy + dt1 * vy -print(f"Error in x position is {abs(np.abs((xx - xx_sim)/xx))}, which should be < 0.02") -print(f"Error in y position is {abs(np.abs((yy - yy_sim)/yy))}, which should be < 0.02") print( - f"Error in x velocity is {abs(np.abs((ux - ux_sim)/ux))}, which should be < 0.002" + f"Error in x position is {abs(np.abs((xx - xx_sim) / xx))}, which should be < 0.02" ) print( - f"Error in y velocity is {abs(np.abs((uy - uy_sim)/uy))}, which should be < 0.002" + f"Error in y position is {abs(np.abs((yy - yy_sim) / yy))}, which should be < 0.02" +) +print( + f"Error in x velocity is {abs(np.abs((ux - ux_sim) / ux))}, which should be < 0.002" +) +print( + f"Error in y velocity is {abs(np.abs((uy - uy_sim) / uy))}, which should be < 0.002" ) if plasma_lens_lengths[0] < 0.01: diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index 5d3b892b543..9beef1de5c8 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -284,10 +284,10 @@ def _find_start_stop(self, ii, imin, imax, d): iistart = ii iistop = ii + 1 assert imin <= iistart <= imax, Exception( - f"Dimension {d+1} lower index is out of bounds" + f"Dimension {d + 1} lower index is out of bounds" ) assert imin <= iistop <= imax, Exception( - f"Dimension {d+1} upper index is out of bounds" + f"Dimension {d + 1} upper index is out of bounds" ) return iistart, iistop diff --git a/Python/pywarpx/particle_containers.py b/Python/pywarpx/particle_containers.py index db5dfda883e..9a4d7257a69 100644 --- a/Python/pywarpx/particle_containers.py +++ b/Python/pywarpx/particle_containers.py @@ -107,31 +107,31 @@ def add_particles( maxlen = max(maxlen, lenw) # --- Make sure that the lengths of the input parameters are consistent - assert ( - x is None or lenx == maxlen or lenx == 1 - ), "Length of x doesn't match len of others" - assert ( - y is None or leny == maxlen or leny == 1 - ), "Length of y doesn't match len of others" - assert ( - z is None or lenz == maxlen or lenz == 1 - ), "Length of z doesn't match len of others" - assert ( - ux is None or lenux == maxlen or lenux == 1 - ), "Length of ux doesn't match len of others" - assert ( - uy is None or lenuy == maxlen or lenuy == 1 - ), "Length of uy doesn't match len of others" - assert ( - uz is None or lenuz == maxlen or lenuz == 1 - ), "Length of uz doesn't match len of others" - assert ( - w is None or lenw == maxlen or lenw == 1 - ), "Length of w doesn't match len of others" + assert x is None or lenx == maxlen or lenx == 1, ( + "Length of x doesn't match len of others" + ) + assert y is None or leny == maxlen or leny == 1, ( + "Length of y doesn't match len of others" + ) + assert z is None or lenz == maxlen or lenz == 1, ( + "Length of z doesn't match len of others" + ) + assert ux is None or lenux == maxlen or lenux == 1, ( + "Length of ux doesn't match len of others" + ) + assert uy is None or lenuy == maxlen or lenuy == 1, ( + "Length of uy doesn't match len of others" + ) + assert uz is None or lenuz == maxlen or lenuz == 1, ( + "Length of uz doesn't match len of others" + ) + assert w is None or lenw == maxlen or lenw == 1, ( + "Length of w doesn't match len of others" + ) for key, val in kwargs.items(): - assert ( - np.size(val) == 1 or len(val) == maxlen - ), f"Length of {key} doesn't match len of others" + assert np.size(val) == 1 or len(val) == maxlen, ( + f"Length of {key} doesn't match len of others" + ) # --- Broadcast scalars into appropriate length arrays # --- If the parameter was not supplied, use the default value diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index d464c44726f..f8261cd7847 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -2556,7 +2556,7 @@ def __init__( **kw, ): assert stl_file is None or implicit_function is None, Exception( - "Only one between implicit_function and " "stl_file can be specified" + "Only one between implicit_function and stl_file can be specified" ) self.implicit_function = implicit_function @@ -2666,9 +2666,9 @@ def __init__( self.strengths_E = strengths_E self.strengths_B = strengths_B - assert (self.strengths_E is not None) or ( - self.strengths_B is not None - ), Exception("One of strengths_E or strengths_B must be supplied") + assert (self.strengths_E is not None) or (self.strengths_B is not None), ( + Exception("One of strengths_E or strengths_B must be supplied") + ) self.handle_init(kw) @@ -3011,9 +3011,9 @@ def initialize_inputs(self): particle_shape = self.particle_shape for s in self.species: if s.particle_shape is not None: - assert ( - particle_shape is None or particle_shape == s.particle_shape - ), Exception("WarpX only supports one particle shape for all species") + assert particle_shape is None or particle_shape == s.particle_shape, ( + Exception("WarpX only supports one particle shape for all species") + ) # --- If this was set for any species, use that value. particle_shape = s.particle_shape @@ -4102,7 +4102,7 @@ def __init__( kw = self._handle_charge_on_eb(**kw) else: raise RuntimeError( - f"{self.type} reduced diagnostic is not yet supported " "in pywarpx." + f"{self.type} reduced diagnostic is not yet supported in pywarpx." ) self.handle_init(kw) diff --git a/Regression/Checksum/checksum.py b/Regression/Checksum/checksum.py index b2f327e36e3..8c93f4ea6ea 100644 --- a/Regression/Checksum/checksum.py +++ b/Regression/Checksum/checksum.py @@ -238,8 +238,7 @@ def evaluate(self, rtol=1.0e-9, atol=1.0e-40): # Dictionaries have same outer keys (levels, species)? if self.data.keys() != ref_benchmark.data.keys(): print( - "ERROR: Benchmark and output file checksum " - "have different outer keys:" + "ERROR: Benchmark and output file checksum have different outer keys:" ) print("Benchmark: %s" % ref_benchmark.data.keys()) print("Test file: %s" % self.data.keys()) diff --git a/Regression/PostProcessingUtils/post_processing_utils.py b/Regression/PostProcessingUtils/post_processing_utils.py index 55bc357c28b..cbd55c433d3 100644 --- a/Regression/PostProcessingUtils/post_processing_utils.py +++ b/Regression/PostProcessingUtils/post_processing_utils.py @@ -164,6 +164,6 @@ def check_random_filter(fn, filtered_fn, random_fraction, dim, species_name): ## Dirty trick to find particles with the same ID + same CPU (does not work with more than 10 ## MPI ranks) random_filter_expression = ( - "np.isin(ids + 0.1*cpus," "ids_filtered_warpx + 0.1*cpus_filtered_warpx)" + "np.isin(ids + 0.1*cpus,ids_filtered_warpx + 0.1*cpus_filtered_warpx)" ) check_particle_filter(fn, filtered_fn, random_filter_expression, dim, species_name) diff --git a/Tools/Algorithms/psatd.ipynb b/Tools/Algorithms/psatd.ipynb index 3e8b2a82d2e..6153b904968 100644 --- a/Tools/Algorithms/psatd.ipynb +++ b/Tools/Algorithms/psatd.ipynb @@ -64,9 +64,9 @@ " Wd[i, j] = Wd[i, j].expand().simplify()\n", " diff = W[i, j] - Wd[i, j]\n", " diff = diff.expand().simplify()\n", - " assert (\n", - " diff == 0\n", - " ), f\"Diagonalization failed: W[{i},{j}] - Wd[{i},{j}] = {diff} is not zero\"\n", + " assert diff == 0, (\n", + " f\"Diagonalization failed: W[{i},{j}] - Wd[{i},{j}] = {diff} is not zero\"\n", + " )\n", "\n", "\n", "def simple_mat(W):\n", diff --git a/Tools/Algorithms/stencil.py b/Tools/Algorithms/stencil.py index 2fe67d1c681..1fafd3837a7 100644 --- a/Tools/Algorithms/stencil.py +++ b/Tools/Algorithms/stencil.py @@ -361,10 +361,10 @@ def run_main( print("\nCell size:") print(f"- dx = {dx_boosted}") if dims > 1: - print(f"- dx[1:]/dx[0] = {dx_boosted[1:]/dx_boosted[0]}") + print(f"- dx[1:]/dx[0] = {dx_boosted[1:] / dx_boosted[0]}") print("\nTime step:") print(f"- dt = {dt}") - print(f"- c*dt/dx = {c*dt/dx_boosted}") + print(f"- c*dt/dx = {c * dt / dx_boosted}") print("\nSpectral order:") print(f"- order = {psatd_order}") print("\nLorentz boost, Galilean velocity:") diff --git a/Tools/Release/releasePR.py b/Tools/Release/releasePR.py index 3fd1b016efd..9dfa178e5b4 100755 --- a/Tools/Release/releasePR.py +++ b/Tools/Release/releasePR.py @@ -171,7 +171,7 @@ def concat_answers(answers): "--title", f"Release {WarpX_version}", "--body", - f"""Prepare the {datetime.now().strftime('%B')} release of WarpX: + f"""Prepare the {datetime.now().strftime("%B")} release of WarpX: ```bash # update dependencies ./Tools/Release/updateAMReX.py diff --git a/setup.py b/setup.py index c3f2e730726..ad5501371c5 100644 --- a/setup.py +++ b/setup.py @@ -342,7 +342,7 @@ def build_extension(self, ext): "Programming Language :: Python :: 3.12", "Programming Language :: Python :: 3.13", ( - "License :: OSI Approved :: " "BSD License" + "License :: OSI Approved :: BSD License" ), # TODO: use real SPDX: BSD-3-Clause-LBNL ], # new PEP 639 format From 188e401c9b944c6d87b33447e7a841998f2f3abc Mon Sep 17 00:00:00 2001 From: Arianna Formenti Date: Tue, 14 Jan 2025 09:57:09 -0800 Subject: [PATCH 145/278] Add doxygen documentation regarding `is_igf_2d_slices` (#5556) This is a boolean flag that lets the user select between two types of FFT-based Poisson solver: * fully 3D: solves Poisson equation in 3D geometry * quasi-3D: solves many 2D Poisson equations, one for each `z` slice, independently one another --- Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H | 3 ++- Source/ablastr/fields/IntegratedGreenFunctionSolver.H | 1 + Source/ablastr/fields/PoissonSolver.H | 1 + 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H index f57cfff6080..0a0b0e2be48 100755 --- a/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H +++ b/Source/FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H @@ -76,14 +76,15 @@ public: * \f[ * \vec{\nabla}^2 r \phi - (\vec{\beta}\cdot\vec{\nabla})^2 r \phi = -\frac{r \rho}{\epsilon_0} * \f] - * \param[out] phi The potential to be computed by this function * \param[in] rho The charge density for a given species (relativistic solver) * or total charge density (labframe solver) + * \param[out] phi The potential to be computed by this function * \param[in] beta Represents the velocity of the source of `phi` * \param[in] required_precision The relative convergence threshold for the MLMG solver * \param[in] absolute_tolerance The absolute convergence threshold for the MLMG solver * \param[in] max_iters The maximum number of iterations allowed for the MLMG solver * \param[in] verbosity The verbosity setting for the MLMG solver + * \param[in] is_igf_2d_slices boolean to select between fully 3D Poisson solver and quasi-3D, i.e. one 2D Poisson solve on every z slice (default: false) */ void computePhi ( ablastr::fields::MultiLevelScalarField const& rho, diff --git a/Source/ablastr/fields/IntegratedGreenFunctionSolver.H b/Source/ablastr/fields/IntegratedGreenFunctionSolver.H index 9492cff885e..b34678055a8 100755 --- a/Source/ablastr/fields/IntegratedGreenFunctionSolver.H +++ b/Source/ablastr/fields/IntegratedGreenFunctionSolver.H @@ -124,6 +124,7 @@ namespace ablastr::fields * @param[out] phi the electrostatic potential amrex::MultiFab * @param[in] cell_size an arreay of 3 reals dx dy dz * @param[in] ba amrex::BoxArray with the grid of a given level + * @param[in] is_igf_2d boolean to select between fully 3D Poisson solver and quasi-3D, i.e. one 2D Poisson solve on every z slice (default: false) */ void computePhiIGF (amrex::MultiFab const & rho, diff --git a/Source/ablastr/fields/PoissonSolver.H b/Source/ablastr/fields/PoissonSolver.H index 1cc7d39e9b0..c79736e0d1b 100755 --- a/Source/ablastr/fields/PoissonSolver.H +++ b/Source/ablastr/fields/PoissonSolver.H @@ -179,6 +179,7 @@ inline void interpolatePhiBetweenLevels ( * \param[in] grids the grids per level (e.g., from AmrMesh) * \param[in] grid_type Integer that corresponds to the type of grid used in the simulation (collocated, staggered, hybrid) * \param[in] is_solver_igf_on_lev0 boolean to select the Poisson solver: 1 for FFT on level 0 & Multigrid on other levels, 0 for Multigrid on all levels + * \param[in] is_igf_2d boolean to select between fully 3D Poisson solver and quasi-3D, i.e. one 2D Poisson solve on every z slice (default: false) * \param[in] eb_enabled solve with embedded boundaries * \param[in] do_single_precision_comms perform communications in single precision * \param[in] rel_ref_ratio mesh refinement ratio between levels (default: 1) From a20365520d143814f3d18bb9fa97198a750a810a Mon Sep 17 00:00:00 2001 From: Arianna Formenti Date: Tue, 14 Jan 2025 10:15:02 -0800 Subject: [PATCH 146/278] Update differential luminosity test (#5555) Fixes the species in the test: `test_3d_diff_lumi_diag_photons` from `electron` and `positron` to `photons`. Updates checksums consequently. (Off Topic: it's 5555!) --- .../inputs_test_3d_diff_lumi_diag_photons | 6 +++-- .../test_3d_diff_lumi_diag_photons.json | 25 ++++++++++--------- 2 files changed, 17 insertions(+), 14 deletions(-) diff --git a/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_photons b/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_photons index f0ef254d911..90d66938a4a 100644 --- a/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_photons +++ b/Examples/Tests/diff_lumi_diag/inputs_test_3d_diff_lumi_diag_photons @@ -3,7 +3,9 @@ FILE = inputs_base_3d # Test with electrons/positrons: use parse_density_function -beam1.species_type = electron +particles.photon_species = beam1 beam2 + +beam1.species_type = photon beam1.injection_style = "NUniformPerCell" beam1.num_particles_per_cell_each_dim = 1 1 1 beam1.profile = parse_density_function @@ -15,7 +17,7 @@ beam1.ymax = 4*sigmay beam1.zmin =-muz-4*sigmaz beam1.zmax =-muz+4*sigmaz -beam2.species_type = positron +beam2.species_type = photon beam2.injection_style = "NUniformPerCell" beam2.num_particles_per_cell_each_dim = 1 1 1 beam2.profile = parse_density_function diff --git a/Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag_photons.json b/Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag_photons.json index 09b2031cdd2..86659bafd79 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag_photons.json +++ b/Regression/Checksum/benchmarks_json/test_3d_diff_lumi_diag_photons.json @@ -1,24 +1,25 @@ { "lev=0": { - "rho_beam1": 656097367.2335038, - "rho_beam2": 656097367.2335038 + "rho_beam1": 0.0, + "rho_beam2": 0.0 }, - "beam1": { + "beam2": { "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, - "particle_momentum_z": 1.7512476113279403e-11, + "particle_momentum_z": 1.7511853009715152e-11, "particle_position_x": 0.2621440000000001, - "particle_position_y": 0.005242880000000001, - "particle_position_z": 314572.79999473685, - "particle_weight": 11997744756.90957 + "particle_position_y": 0.005242880000000004, + "particle_position_z": 314572.8000000002, + "particle_weight": 11997744756.909575 }, - "beam2": { + "beam1": { "particle_momentum_x": 0.0, "particle_momentum_y": 0.0, - "particle_momentum_z": 1.7513431895752007e-11, + "particle_momentum_z": 1.75121641230803e-11, "particle_position_x": 0.2621440000000001, - "particle_position_y": 0.005242880000000001, - "particle_position_z": 314572.79999472946, + "particle_position_y": 0.005242880000000004, + "particle_position_z": 314572.8000000004, "particle_weight": 11997744756.909573 } -} \ No newline at end of file +} + From c666e75508fef6adb2a25a756546cf11b1e290d8 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Tue, 14 Jan 2025 20:18:08 +0100 Subject: [PATCH 147/278] Pass `gamma_boost` as argument to `AcceleratorLattice`, `LatticeElementFinder` (#5541) This PR is a small step towards the goal of reducing the usage of static variables in the WarpX class. --------- Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- Source/AcceleratorLattice/AcceleratorLattice.H | 7 ++++++- Source/AcceleratorLattice/AcceleratorLattice.cpp | 6 ++++-- Source/AcceleratorLattice/LatticeElementFinder.H | 4 +++- Source/AcceleratorLattice/LatticeElementFinder.cpp | 7 ++++--- Source/Parallelization/WarpXRegrid.cpp | 2 +- Source/WarpX.cpp | 2 +- 6 files changed, 19 insertions(+), 9 deletions(-) diff --git a/Source/AcceleratorLattice/AcceleratorLattice.H b/Source/AcceleratorLattice/AcceleratorLattice.H index 4b3eff46094..e8acc2c8743 100644 --- a/Source/AcceleratorLattice/AcceleratorLattice.H +++ b/Source/AcceleratorLattice/AcceleratorLattice.H @@ -42,10 +42,15 @@ public: * \brief Initialize the element finder instance at the given level of refinement * * @param[in] lev the level of refinement + * @param[in] gamma_boost the Lorentz factor of the boosted frame * @param[in] ba the box array at the level of refinement * @param[in] dm the distribution map at the level of refinement */ - void InitElementFinder (int lev, amrex::BoxArray const & ba, amrex::DistributionMapping const & dm); + void InitElementFinder ( + int lev, + amrex::Real gamma_boost, + amrex::BoxArray const & ba, + amrex::DistributionMapping const & dm); /** * \brief Update the element finder, needed when the simulation frame has moved relative to the lab frame diff --git a/Source/AcceleratorLattice/AcceleratorLattice.cpp b/Source/AcceleratorLattice/AcceleratorLattice.cpp index edccae9374a..b0513f767a0 100644 --- a/Source/AcceleratorLattice/AcceleratorLattice.cpp +++ b/Source/AcceleratorLattice/AcceleratorLattice.cpp @@ -76,13 +76,15 @@ AcceleratorLattice::ReadLattice (std::string const & root_name, amrex::ParticleR } void -AcceleratorLattice::InitElementFinder (int const lev, amrex::BoxArray const & ba, amrex::DistributionMapping const & dm) +AcceleratorLattice::InitElementFinder ( + int const lev, amrex::Real const gamma_boost, + amrex::BoxArray const & ba, amrex::DistributionMapping const & dm) { if (m_lattice_defined) { m_element_finder = std::make_unique>(ba, dm); for (amrex::MFIter mfi(*m_element_finder); mfi.isValid(); ++mfi) { - (*m_element_finder)[mfi].InitElementFinder(lev, mfi, *this); + (*m_element_finder)[mfi].InitElementFinder(lev, gamma_boost, mfi, *this); } } } diff --git a/Source/AcceleratorLattice/LatticeElementFinder.H b/Source/AcceleratorLattice/LatticeElementFinder.H index 6773ed56a65..f7eb5c66531 100644 --- a/Source/AcceleratorLattice/LatticeElementFinder.H +++ b/Source/AcceleratorLattice/LatticeElementFinder.H @@ -30,10 +30,12 @@ struct LatticeElementFinder * \brief Initialize the element finder at the level and grid * * @param[in] lev the refinement level + * @param[in] gamma_boost the Lorentz factor of the boosted frame * @param[in] a_mfi specifies the grid where the finder is defined * @param[in] accelerator_lattice a reference to the accelerator lattice at the refinement level */ - void InitElementFinder (int lev, amrex::MFIter const& a_mfi, + void InitElementFinder (int lev, amrex::Real gamma_boost, + amrex::MFIter const& a_mfi, AcceleratorLattice const& accelerator_lattice); /** diff --git a/Source/AcceleratorLattice/LatticeElementFinder.cpp b/Source/AcceleratorLattice/LatticeElementFinder.cpp index ec784049760..64e593aee30 100644 --- a/Source/AcceleratorLattice/LatticeElementFinder.cpp +++ b/Source/AcceleratorLattice/LatticeElementFinder.cpp @@ -15,7 +15,8 @@ using namespace amrex::literals; void -LatticeElementFinder::InitElementFinder (int const lev, amrex::MFIter const& a_mfi, +LatticeElementFinder::InitElementFinder (int const lev, const amrex::Real gamma_boost, + amrex::MFIter const& a_mfi, AcceleratorLattice const& accelerator_lattice) { @@ -26,8 +27,8 @@ LatticeElementFinder::InitElementFinder (int const lev, amrex::MFIter const& a_m m_dz = WarpX::CellSize(lev)[2]; - m_gamma_boost = WarpX::gamma_boost; - m_uz_boost = std::sqrt(WarpX::gamma_boost*WarpX::gamma_boost - 1._prt)*PhysConst::c; + m_gamma_boost = gamma_boost; + m_uz_boost = std::sqrt(m_gamma_boost*m_gamma_boost - 1._prt)*PhysConst::c; AllocateIndices(accelerator_lattice); diff --git a/Source/Parallelization/WarpXRegrid.cpp b/Source/Parallelization/WarpXRegrid.cpp index 7adc00ed523..81cbb55c2c7 100644 --- a/Source/Parallelization/WarpXRegrid.cpp +++ b/Source/Parallelization/WarpXRegrid.cpp @@ -285,7 +285,7 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi } // Re-initialize the lattice element finder with the new ba and dm. - m_accelerator_lattice[lev]->InitElementFinder(lev, ba, dm); + m_accelerator_lattice[lev]->InitElementFinder(lev, gamma_boost, ba, dm); if (costs[lev] != nullptr) { diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 3f53decbb83..96335e10c5e 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -2100,7 +2100,7 @@ WarpX::AllocLevelData (int lev, const BoxArray& ba, const DistributionMapping& d guard_cells.ng_alloc_Rho, guard_cells.ng_alloc_F, guard_cells.ng_alloc_G, aux_is_nodal); m_accelerator_lattice[lev] = std::make_unique(); - m_accelerator_lattice[lev]->InitElementFinder(lev, ba, dm); + m_accelerator_lattice[lev]->InitElementFinder(lev, gamma_boost, ba, dm); } From 57703f8fc25a4c05ed887fd75ea68306f694a99f Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 14 Jan 2025 12:29:37 -0800 Subject: [PATCH 148/278] Use `amrex::getParticleCell` More (#5557) One leftover occurance in `ParticleReductionFunctor`. Follow-up to #5118 --- .../ParticleReductionFunctor.cpp | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.cpp b/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.cpp index d6cd27f7cc0..abc3fb2c8cf 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.cpp +++ b/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.cpp @@ -69,6 +69,9 @@ ParticleReductionFunctor::operator() (amrex::MultiFab& mf_dst, const int dcomp, get_particle_position(p, xw, yw, zw); // Get position in AMReX convention to calculate corresponding index. + // Ideally this will be replaced with the AMReX NGP interpolator + // Always do x direction. No RZ case because it's not implemented, and code + // will have aborted const auto [ii, jj, kk] = amrex::getParticleCell(p, plo, dxi).dim3(); // Fix dimensions since parser assumes u = gamma * v / c @@ -97,20 +100,8 @@ ParticleReductionFunctor::operator() (amrex::MultiFab& mf_dst, const int dcomp, // Ideally this will be replaced with the AMReX NGP interpolator // Always do x direction. No RZ case because it's not implemented, and code // will have aborted - int ii = 0, jj = 0, kk = 0; - const amrex::ParticleReal x = p.pos(0); - const amrex::Real lx = (x - plo[0]) * dxi[0]; - ii = static_cast(amrex::Math::floor(lx)); -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_3D) - const amrex::ParticleReal y = p.pos(1); - const amrex::Real ly = (y - plo[1]) * dxi[1]; - jj = static_cast(amrex::Math::floor(ly)); -#endif -#if defined(WARPX_DIM_3D) - const amrex::ParticleReal z = p.pos(2); - const amrex::Real lz = (z - plo[2]) * dxi[2]; - kk = static_cast(amrex::Math::floor(lz)); -#endif + const auto [ii, jj, kk] = amrex::getParticleCell(p, plo, dxi).dim3(); + // Fix dimensions since parser assumes u = gamma * v / c const amrex::ParticleReal ux = p.rdata(PIdx::ux) / PhysConst::c; const amrex::ParticleReal uy = p.rdata(PIdx::uy) / PhysConst::c; From ce487f93832919bfc34979e04a31f3c0020d7a22 Mon Sep 17 00:00:00 2001 From: U Sinha <46874465+sinha-r@users.noreply.github.com> Date: Wed, 15 Jan 2025 14:39:49 -0800 Subject: [PATCH 149/278] Containerfile (#5141) Containerfile to run WarpX at NERSC # To Do - [x] GPU-aware MPI is needed: we need to use `shifter` instead of podman - [x] heFFTe has been disabled as of now due to compilation errors (this can be skipped for now and added in a follow-up PR later on, we do not use heFFTe extensively yet) --------- Co-authored-by: Axel Huebl --- Tools/machines/perlmutter-nersc/Containerfile | 167 ++++++++++++++++++ 1 file changed, 167 insertions(+) create mode 100644 Tools/machines/perlmutter-nersc/Containerfile diff --git a/Tools/machines/perlmutter-nersc/Containerfile b/Tools/machines/perlmutter-nersc/Containerfile new file mode 100644 index 00000000000..5a8553c2619 --- /dev/null +++ b/Tools/machines/perlmutter-nersc/Containerfile @@ -0,0 +1,167 @@ +# Build this container from its current directory: +# podman build --build-arg NJOBS=6 -t warpx-perlmutter-dev . +# Adjust NJOBS to the number of processes to use for parallel compilation. +# +# Run from the via WarpX source directory +# podman run -v ${PWD}:/opt/warpx -it warpx-perlmutter-dev +# then +# cd /opt/warpx +# and compile. + +# Base System and Essential Tools Installation +FROM nvidia/cuda:12.6.0-devel-ubuntu22.04 AS base + +# parallel builds +ARG NJOBS +ENV NJOBS=$NJOBS + +# Set up environment variables +ENV DEBIAN_FRONTEND=noninteractive \ + SW_DIR=/opt/software \ + FORCE_UNSAFE_CONFIGURE=1 + +# Perlmutter A100 compilation optimization +ENV AMREX_CUDA_ARCH=8.0 \ + CUDAARCHS=80 \ + CXXFLAGS="-march=znver3" \ + CFLAGS="-march=znver3" + +# Install essential system dependencies including MPI libraries +RUN apt-get update && \ + apt-get install -y --no-install-recommends \ + autoconf \ + build-essential \ + ca-certificates \ + coreutils \ + curl \ + environment-modules \ + gfortran \ + git \ + openssh-server \ + python3 \ + python3-pip \ + python3-dev \ + python3-venv \ + unzip \ + vim \ + libmpich-dev \ + cmake \ + libblas-dev \ + liblapack-dev \ + g++ \ + pkg-config \ + libbz2-dev \ + zlib1g-dev \ + libpng-dev \ + libzstd-dev \ + && rm -rf /var/lib/apt/lists/* + +# Install c-blosc from source +FROM base AS c-blosc + +RUN git clone -b v1.21.1 https://github.com/Blosc/c-blosc.git /tmp/c-blosc && \ + cmake \ + -S /tmp/c-blosc \ + -B /tmp/c-blosc-build \ + -DCMAKE_INSTALL_PREFIX=/usr .. && \ + cmake --build /tmp/c-blosc-build \ + --target install \ + -j${NJOBS} && \ + rm -rf /tmp/c-blosc* + +# Install ADIOS2 from source +FROM base AS adios2 + +# Ensure c-blosc is installed before ADIOS2 +COPY --from=c-blosc /usr /usr + +# Verify the location of Blosc library +RUN find /usr -name 'libblosc*' + +# Ensure Blosc library paths are correctly configured +ENV BLOSC_LIBRARY=/usr/lib/libblosc.so.1.21.1 +ENV BLOSC_INCLUDE_DIR=/usr/include + +# Install ADIOS2 +RUN git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git /tmp/adios2 && \ + cd /tmp/adios2 && \ + cmake -S . -B build \ + -DADIOS2_USE_Blosc=ON \ + -DBLOSC_LIBRARY=${BLOSC_LIBRARY} \ + -DBLOSC_INCLUDE_DIR=${BLOSC_INCLUDE_DIR} \ + -DADIOS2_USE_Fortran=OFF \ + -DADIOS2_USE_Python=OFF \ + -DADIOS2_USE_ZeroMQ=OFF \ + -DADIOS2_USE_BZip2=ON \ + -DADIOS2_USE_ZFP=OFF \ + -DADIOS2_USE_SZ=OFF \ + -DADIOS2_USE_MGARD=OFF \ + -DADIOS2_USE_PNG=ON \ + -DCMAKE_INSTALL_PREFIX=/usr .. && \ + cmake --build build --target install -j${NJOBS} && \ + rm -rf /tmp/adios2 + +# Install BLAS++ and LAPACK++ +FROM base AS blaspp_lapackpp + +RUN git clone -b v2024.05.31 https://github.com/icl-utk-edu/blaspp.git /tmp/blaspp && \ + cd /tmp/blaspp && \ + cmake -S . -B build \ + -Duse_openmp=OFF \ + -Dgpu_backend=cuda \ + -DCMAKE_CXX_STANDARD=17 \ + -DCMAKE_INSTALL_PREFIX=/usr \ + -DBLAS_LIBRARIES=/usr/lib/x86_64-linux-gnu/libblas.so \ + -DLAPACK_LIBRARIES=/usr/lib/x86_64-linux-gnu/liblapack.so .. && \ + cmake --build build \ + --target install \ + -j${NJOBS} && \ + rm -rf /tmp/blaspp + +RUN git clone -b v2024.05.31 https://github.com/icl-utk-edu/lapackpp.git /tmp/lapackpp && \ + cd /tmp/lapackpp && \ + cmake -S . -B build \ + -DCMAKE_CXX_STANDARD=17 \ + -Dbuild_tests=OFF \ + -DCMAKE_INSTALL_PREFIX=/usr \ + -DLAPACK_LIBRARIES=/usr/lib/x86_64-linux-gnu/liblapack.so .. && \ + cmake --build build \ + --target install -j${NJOBS} && \ + rm -rf /tmp/lapackpp + +# Final Image +FROM base AS final + +# Copy installed software from previous stages +COPY --from=c-blosc /usr /usr +COPY --from=adios2 /usr /usr +COPY --from=blaspp_lapackpp /usr /usr + +# Create and activate Python virtual environment +RUN python3 -m venv /opt/venv && \ + /opt/venv/bin/pip install --no-cache-dir \ + wheel \ + numpy \ + pandas \ + scipy \ + matplotlib \ + jupyter \ + scikit-learn \ + openpmd-api \ + yt \ + cupy-cuda12x \ + torch \ + optimas[all] \ + cython \ + packaging \ + build \ + setuptools + +# Set up the environment for the virtual environment +ENV PATH="/opt/venv/bin:${PATH}" + +# Set up entrypoint +ENTRYPOINT ["/bin/bash", "-c"] + +# Default command +CMD ["/bin/bash"] From a053457086a82092e85b6afcf501236f1a32ff25 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 16 Jan 2025 08:01:31 -0800 Subject: [PATCH 150/278] AMReX/pyAMReX/PICSAR: Weekly Update (#5564) Weekly update to latest AMReX. Weekly update to latest pyAMReX. Weekly update to latest PICSAR (no changes). ```console ./Tools/Release/updateAMReX.py ./Tools/Release/updatepyAMReX.py ./Tools/Release/updatePICSAR.py ``` --------- Signed-off-by: Axel Huebl --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- cmake/dependencies/pyAMReX.cmake | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index aa0daa2a718..1309dc4bb81 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -127,7 +127,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 25.01 && cd - + cd ../amrex && git checkout --detach e761abff95afbfa442cbe108027094bbddef5b11 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 82d56d98a5f..6bde3785176 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -294,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "25.01" +set(WarpX_amrex_branch "e761abff95afbfa442cbe108027094bbddef5b11" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index 777b75e2ed3..c9ee2732b62 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "25.01" +set(WarpX_pyamrex_branch "47331d7891bda9b02e75cf452d2c55fe76c77d06" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") From 1ec42fb554a454c37ebeebd7ebe1e2bd3046f3bd Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 16 Jan 2025 18:59:49 -0800 Subject: [PATCH 151/278] Doc: Update Frontier (#5570) Touch up modules to new system defaults, mitigate increased Cython 3+ support in the Python data science ecosystem. Close #5561. - [x] compile tested - [x] runtime tested --- .../frontier_warpx.profile.example | 8 +++---- .../frontier-olcf/install_dependencies.sh | 21 ++++++++++--------- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/Tools/machines/frontier-olcf/frontier_warpx.profile.example b/Tools/machines/frontier-olcf/frontier_warpx.profile.example index 5ca6e1e1622..ad78ab1acaf 100644 --- a/Tools/machines/frontier-olcf/frontier_warpx.profile.example +++ b/Tools/machines/frontier-olcf/frontier_warpx.profile.example @@ -6,12 +6,12 @@ export MY_PROFILE=$(cd $(dirname $BASH_SOURCE) && pwd)"/"$(basename $BASH_SOURCE if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in your $MY_PROFILE file! Please edit its line 2 to continue!"; return; fi # required dependencies -module switch Core Core/24.07 module load cmake/3.27.9 module load craype-accel-amd-gfx90a module load rocm/5.7.1 module load cray-mpich/8.1.28 module load cce/17.0.0 # must be loaded after rocm +# https://docs.olcf.ornl.gov/systems/frontier_user_guide.html#compatible-compiler-rocm-toolchain-versions # optional: faster builds module load ccache @@ -27,11 +27,11 @@ export LD_LIBRARY_PATH=${HOME}/sw/frontier/gpu/blaspp-2024.05.31/lib64:$LD_LIBRA export LD_LIBRARY_PATH=${HOME}/sw/frontier/gpu/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH # optional: for QED lookup table generation support -module load boost/1.79.0 +module load boost/1.85.0 # optional: for openPMD support -module load adios2/2.8.3-mpi -module load hdf5/1.12.1-mpi +module load adios2/2.10.0-mpi +module load hdf5/1.14.3-mpi # optional: for Python bindings or libEnsemble module load cray-python/3.11.5 diff --git a/Tools/machines/frontier-olcf/install_dependencies.sh b/Tools/machines/frontier-olcf/install_dependencies.sh index fd1d28e76b5..8e8565788bc 100755 --- a/Tools/machines/frontier-olcf/install_dependencies.sh +++ b/Tools/machines/frontier-olcf/install_dependencies.sh @@ -87,11 +87,20 @@ python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel python3 -m pip install --upgrade setuptools -# cupy and h5py need an older Cython +# cupy needs an older Cython # https://github.com/cupy/cupy/issues/4610 -# https://github.com/h5py/h5py/issues/2268 python3 -m pip install --upgrade "cython<3.0" +# cupy for ROCm +# https://docs.cupy.dev/en/stable/install.html#building-cupy-for-rocm-from-source +# https://github.com/cupy/cupy/issues/7830 +CC=cc CXX=CC \ +CUPY_INSTALL_USE_HIP=1 \ +ROCM_HOME=${ROCM_PATH} \ +HCC_AMDGPU_TARGET=${AMREX_AMD_ARCH} \ + python3 -m pip install -v cupy +python3 -m pip install --upgrade "cython>=3.0" # for latest mpi4py and everything else python3 -m pip install --upgrade numpy +python3 -m pip install --upgrade h5py python3 -m pip install --upgrade pandas python3 -m pip install --upgrade scipy MPICC="cc -shared" python3 -m pip install --upgrade mpi4py --no-cache-dir --no-build-isolation --no-binary mpi4py @@ -100,14 +109,6 @@ python3 -m pip install --upgrade matplotlib python3 -m pip install --upgrade yt # install or update WarpX dependencies such as picmistandard python3 -m pip install --upgrade -r $HOME/src/warpx/requirements.txt -# cupy for ROCm -# https://docs.cupy.dev/en/stable/install.html#building-cupy-for-rocm-from-source -# https://github.com/cupy/cupy/issues/7830 -CC=cc CXX=CC \ -CUPY_INSTALL_USE_HIP=1 \ -ROCM_HOME=${ROCM_PATH} \ -HCC_AMDGPU_TARGET=${AMREX_AMD_ARCH} \ - python3 -m pip install -v cupy # optional: for optimas (based on libEnsemble & ax->botorch->gpytorch->pytorch) #python3 -m pip install --upgrade torch --index-url https://download.pytorch.org/whl/rocm5.4.2 #python3 -m pip install -r $HOME/src/warpx/Tools/optimas/requirements.txt From e4bdcae730440f7727be560fc2c072849b1e5773 Mon Sep 17 00:00:00 2001 From: Andrew Myers Date: Fri, 17 Jan 2025 11:38:50 -0800 Subject: [PATCH 152/278] Fix restart for implicit simulations (#5489) Fix #5482. Also, with this PR the temporary implicit particle attributes are not written to the checkpoint file, and also are not communicated when doing a redistribute of the particles. Note that this requires AMReX PR #4264. A CI test of this restart is included in PR #5475. --------- Co-authored-by: David Grote --- .../FlushFormats/FlushFormatCheckpoint.cpp | 36 ++++++++++++++----- Source/Diagnostics/WarpXIO.cpp | 8 +++++ .../ImplicitSolvers/ImplicitSolver.H | 2 ++ .../ImplicitSolvers/ImplicitSolver.cpp | 22 ++++++++++++ Source/Initialization/WarpXInitData.cpp | 16 +-------- 5 files changed, 60 insertions(+), 24 deletions(-) diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp index 788e040b0ee..a3a348d90ee 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp @@ -188,31 +188,49 @@ FlushFormatCheckpoint::CheckpointParticles ( Vector real_names; Vector int_names; + Vector write_real_comps; + Vector write_int_comps; // note: positions skipped here, since we reconstruct a plotfile SoA from them - real_names.push_back("weight"); - real_names.push_back("momentum_x"); - real_names.push_back("momentum_y"); - real_names.push_back("momentum_z"); - + std::vector const fixed_names = {"weight", + "momentum_x", + "momentum_y", + "momentum_z" #ifdef WARPX_DIM_RZ - real_names.push_back("theta"); + ,"theta" #endif + }; + + for (auto const& name : fixed_names) { + real_names.push_back(name); + write_real_comps.push_back(1); + } + + int const compile_time_comps = static_cast(real_names.size()); // get the names of the real comps // note: skips the mandatory AMREX_SPACEDIM positions for pure SoA real_names.resize(pc->NumRealComps() - AMREX_SPACEDIM); + write_real_comps.resize(pc->NumRealComps() - AMREX_SPACEDIM); auto runtime_rnames = pc->getParticleRuntimeComps(); for (auto const& x : runtime_rnames) { - real_names[x.second + PIdx::nattribs - AMREX_SPACEDIM] = x.first; + int const i = x.second + PIdx::nattribs - AMREX_SPACEDIM; + real_names[i] = x.first; + write_real_comps[i] = pc->h_redistribute_real_comp[i + compile_time_comps]; } // and the int comps int_names.resize(pc->NumIntComps()); + write_int_comps.resize(pc->NumIntComps()); auto runtime_inames = pc->getParticleRuntimeiComps(); - for (auto const& x : runtime_inames) { int_names[x.second+0] = x.first; } + for (auto const& x : runtime_inames) { + int const i = x.second + 0; + int_names[i] = x.first; + write_int_comps[i] = pc->h_redistribute_int_comp[i+AMREX_SPACEDIM]; + } - pc->Checkpoint(dir, part_diag.getSpeciesName(), true, + pc->Checkpoint(dir, part_diag.getSpeciesName(), + write_real_comps, write_int_comps, real_names, int_names); } } diff --git a/Source/Diagnostics/WarpXIO.cpp b/Source/Diagnostics/WarpXIO.cpp index 43415daf151..f2921f820fd 100644 --- a/Source/Diagnostics/WarpXIO.cpp +++ b/Source/Diagnostics/WarpXIO.cpp @@ -404,4 +404,12 @@ WarpX::InitFromCheckpoint () mypc->AllocData(); mypc->Restart(restart_chkfile); + if (m_implicit_solver) { + + m_implicit_solver->Define(this); + m_implicit_solver->GetParticleSolverParams( max_particle_its_in_implicit_scheme, + particle_tol_in_implicit_scheme ); + m_implicit_solver->CreateParticleAttributes(); + } + } diff --git a/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.H b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.H index f8f0390e17a..0d2083793ac 100644 --- a/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.H +++ b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.H @@ -57,6 +57,8 @@ public: a_particle_tol = m_particle_tolerance; } + void CreateParticleAttributes () const; + /** * \brief Advance fields and particles by one time step using the specified implicit algorithm */ diff --git a/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp index a6cbdfd307d..da60bc62c46 100644 --- a/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp +++ b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp @@ -1,8 +1,30 @@ #include "ImplicitSolver.H" #include "WarpX.H" +#include "Particles/MultiParticleContainer.H" using namespace amrex; +void ImplicitSolver::CreateParticleAttributes () const +{ + // Set comm to false to that the attributes are not communicated + // nor written to the checkpoint files + int const comm = 0; + + // Add space to save the positions and velocities at the start of the time steps + for (auto const& pc : m_WarpX->GetPartContainer()) { +#if (AMREX_SPACEDIM >= 2) + pc->NewRealComp("x_n", comm); +#endif +#if defined(WARPX_DIM_3D) || defined(WARPX_DIM_RZ) + pc->NewRealComp("y_n", comm); +#endif + pc->NewRealComp("z_n", comm); + pc->NewRealComp("ux_n", comm); + pc->NewRealComp("uy_n", comm); + pc->NewRealComp("uz_n", comm); + } +} + const Geometry& ImplicitSolver::GetGeometry (const int a_lvl) const { AMREX_ASSERT((a_lvl >= 0) && (a_lvl < m_num_amr_levels)); diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 5de8912be6a..5aca5ad89da 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -691,21 +691,7 @@ WarpX::InitFromScratch () m_implicit_solver->Define(this); m_implicit_solver->GetParticleSolverParams( max_particle_its_in_implicit_scheme, particle_tol_in_implicit_scheme ); - - // Add space to save the positions and velocities at the start of the time steps - for (auto const& pc : *mypc) { -#if (AMREX_SPACEDIM >= 2) - pc->NewRealComp("x_n"); -#endif -#if defined(WARPX_DIM_3D) || defined(WARPX_DIM_RZ) - pc->NewRealComp("y_n"); -#endif - pc->NewRealComp("z_n"); - pc->NewRealComp("ux_n"); - pc->NewRealComp("uy_n"); - pc->NewRealComp("uz_n"); - } - + m_implicit_solver->CreateParticleAttributes(); } mypc->AllocData(); From 1c8d21ec491dd8a72d0544c2234dc0394153c436 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 17 Jan 2025 17:42:46 -0800 Subject: [PATCH 153/278] CI: install clang dependencies directly from LLVM (#5575) I think this was a suggestion by @WeiqunZhang in the context of debugging the clang sanitizer issue currently addressed in #5492. I'm extracting all related changes from #5492 to implement and test the LLVM installation separately here. This effectively unifies the CI scripts to install clang dependencies into one single script that reads the clang version number from the command line. I think all CI checks should pass here as a prerequisite for debugging the clang sanitizer issue further in #5492. --- .github/workflows/clang_sanitizers.yml | 4 +- .github/workflows/clang_tidy.yml | 2 +- .../dependencies/{clang17.sh => clang.sh} | 51 +++++++++-------- .github/workflows/dependencies/clang15.sh | 56 ------------------- 4 files changed, 31 insertions(+), 82 deletions(-) rename .github/workflows/dependencies/{clang17.sh => clang.sh} (60%) delete mode 100755 .github/workflows/dependencies/clang15.sh diff --git a/.github/workflows/clang_sanitizers.yml b/.github/workflows/clang_sanitizers.yml index 15dbb00756a..e0947916d3f 100644 --- a/.github/workflows/clang_sanitizers.yml +++ b/.github/workflows/clang_sanitizers.yml @@ -27,7 +27,7 @@ jobs: - uses: actions/checkout@v4 - name: install dependencies run: | - .github/workflows/dependencies/clang17.sh + .github/workflows/dependencies/clang.sh 17 - name: CCache Cache uses: actions/cache@v4 with: @@ -89,7 +89,7 @@ jobs: - uses: actions/checkout@v4 - name: install dependencies run: | - .github/workflows/dependencies/clang17.sh + .github/workflows/dependencies/clang.sh 17 - name: CCache Cache uses: actions/cache@v4 with: diff --git a/.github/workflows/clang_tidy.yml b/.github/workflows/clang_tidy.yml index 6e83b07000f..83d5b86c96b 100644 --- a/.github/workflows/clang_tidy.yml +++ b/.github/workflows/clang_tidy.yml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@v4 - name: install dependencies run: | - .github/workflows/dependencies/clang15.sh + .github/workflows/dependencies/clang.sh 15 - name: set up cache uses: actions/cache@v4 with: diff --git a/.github/workflows/dependencies/clang17.sh b/.github/workflows/dependencies/clang.sh similarity index 60% rename from .github/workflows/dependencies/clang17.sh rename to .github/workflows/dependencies/clang.sh index fb04e2a5914..3ffe6dbc675 100755 --- a/.github/workflows/dependencies/clang17.sh +++ b/.github/workflows/dependencies/clang.sh @@ -1,45 +1,50 @@ #!/usr/bin/env bash # -# Copyright 2024 The WarpX Community +# Copyright 2025 The WarpX Community # # License: BSD-3-Clause-LBNL -# Authors: Luca Fedeli set -eu -o pipefail -# `man apt.conf`: -# Number of retries to perform. If this is non-zero APT will retry -# failed files the given number of times. +# `man apt.conf`: number of retries to perform (if non-zero, +# APT will retry failed files the given number of times). echo 'Acquire::Retries "3";' | sudo tee /etc/apt/apt.conf.d/80-retries -# This dependency file is currently used within a docker container, -# which does not come (among others) with wget, xz-utils, curl, git, -# ccache, and pkg-config pre-installed. -sudo apt-get -qqq update +sudo apt-get update sudo apt-get install -y \ cmake \ - clang-17 \ - clang-tidy-17 \ libblas-dev \ - libc++-17-dev \ libboost-math-dev \ libfftw3-dev \ libfftw3-mpi-dev \ libhdf5-openmpi-dev \ liblapack-dev \ libopenmpi-dev \ - libomp-17-dev \ - ninja-build \ - wget \ - xz-utils \ - curl \ - git \ - ccache \ - pkg-config + ninja-build -# Use clang 17 -export CXX=$(which clang++-17) -export CC=$(which clang-17) +# parse clang version number from command line +version_number=${1} + +# add LLVM repository and install clang tools +wget https://apt.llvm.org/llvm.sh +chmod +x llvm.sh +sudo ./llvm.sh ${version_number} + +# install clang, clang-tidy, and +# LLVM implementations of C++ standard library and OpenMP +sudo apt-get update +sudo apt-get install -y \ + clang-${version_number} \ + clang-tidy-${version_number} \ + libc++-${version_number}-dev \ + libomp-${version_number}-dev + +# export compiler flags +export CXX=$(which clang++-${version_number}) +export CC=$(which clang-${version_number}) + +# ccache +$(dirname "$0")/ccache.sh # cmake-easyinstall # diff --git a/.github/workflows/dependencies/clang15.sh b/.github/workflows/dependencies/clang15.sh deleted file mode 100755 index 63d5d70956f..00000000000 --- a/.github/workflows/dependencies/clang15.sh +++ /dev/null @@ -1,56 +0,0 @@ -#!/usr/bin/env bash -# -# Copyright 2023 The WarpX Community -# -# License: BSD-3-Clause-LBNL -# Authors: Luca Fedeli - -set -eu -o pipefail - -# `man apt.conf`: -# Number of retries to perform. If this is non-zero APT will retry -# failed files the given number of times. -echo 'Acquire::Retries "3";' | sudo tee /etc/apt/apt.conf.d/80-retries - -sudo apt-get -qqq update -sudo apt-get install -y \ - cmake \ - clang-15 \ - clang-tidy-15 \ - libblas-dev \ - libc++-15-dev \ - libboost-math-dev \ - libfftw3-dev \ - libfftw3-mpi-dev \ - libhdf5-openmpi-dev \ - liblapack-dev \ - libopenmpi-dev \ - libomp-15-dev \ - ninja-build - -# ccache -$(dirname "$0")/ccache.sh - -# cmake-easyinstall -# -sudo curl -L -o /usr/local/bin/cmake-easyinstall https://raw.githubusercontent.com/ax3l/cmake-easyinstall/main/cmake-easyinstall -sudo chmod a+x /usr/local/bin/cmake-easyinstall -export CEI_SUDO="sudo" -export CEI_TMP="/tmp/cei" - -# BLAS++ & LAPACK++ -cmake-easyinstall \ - --prefix=/usr/local \ - git+https://github.com/icl-utk-edu/blaspp.git \ - -Duse_openmp=OFF \ - -Dbuild_tests=OFF \ - -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ - -DCMAKE_VERBOSE_MAKEFILE=ON - -cmake-easyinstall \ - --prefix=/usr/local \ - git+https://github.com/icl-utk-edu/lapackpp.git \ - -Duse_cmake_find_lapack=ON \ - -Dbuild_tests=OFF \ - -DCMAKE_CXX_COMPILER_LAUNCHER=$(which ccache) \ - -DCMAKE_VERBOSE_MAKEFILE=ON From 3f762336e71f59b078cdfe3b85d6f1413e65f11b Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Sun, 19 Jan 2025 16:02:09 +0100 Subject: [PATCH 154/278] Remove unused code from WarpX class constructor (#5572) This PR removes some unused code. The final goal is to make the WarpX class more readable. --- Source/WarpX.cpp | 6 ------ 1 file changed, 6 deletions(-) diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 96335e10c5e..bd193f11bdc 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -272,12 +272,6 @@ WarpX::WarpX () istep.resize(nlevs_max, 0); nsubsteps.resize(nlevs_max, 1); -#if 0 - // no subcycling yet - for (int lev = 1; lev < nlevs_max; ++lev) { - nsubsteps[lev] = MaxRefRatio(lev-1); - } -#endif t_new.resize(nlevs_max, 0.0); t_old.resize(nlevs_max, std::numeric_limits::lowest()); From c8e6418ab5c50fde0ae890ac7d811566b6382d14 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 21 Jan 2025 12:20:43 +0100 Subject: [PATCH 155/278] [pre-commit.ci] pre-commit autoupdate (#5580) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.1 → v0.9.2](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.1...v0.9.2) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 9279bcd038d..fc28ca58869 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.1 + rev: v0.9.2 hooks: # Run the linter - id: ruff From 54333c77d838a76e7d64f7aa5ce85c214ec41ff4 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Tue, 21 Jan 2025 15:42:02 +0100 Subject: [PATCH 156/278] storePhiOnParticles: fix misplaced pragma omp parallel if (#5577) This PR moves `#pragma omp parallel if` from an outer loop on levels down to the loop on tiles. It seems to me that threads should be used for tiles rather than for levels here. --- Source/Diagnostics/ParticleIO.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Source/Diagnostics/ParticleIO.cpp b/Source/Diagnostics/ParticleIO.cpp index 05c44f5f594..d7a26326e52 100644 --- a/Source/Diagnostics/ParticleIO.cpp +++ b/Source/Diagnostics/ParticleIO.cpp @@ -261,15 +261,15 @@ storePhiOnParticles ( PinnedMemoryParticleContainer& tmp, tmp.NewRealComp("phi"); int const phi_index = tmp.getParticleComps().at("phi"); auto& warpx = WarpX::GetInstance(); -#ifdef AMREX_USE_OMP -#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) -#endif for (int lev=0; lev<=warpx.finestLevel(); lev++) { const amrex::Geometry& geom = warpx.Geom(lev); auto plo = geom.ProbLoArray(); auto dxi = geom.InvCellSizeArray(); amrex::MultiFab const& phi = *warpx.m_fields.get(FieldType::phi_fp, lev); +#ifdef AMREX_USE_OMP + #pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif for (PinnedParIter pti(tmp, lev); pti.isValid(); ++pti) { auto phi_grid = phi[pti].array(); From e3378b06ead93c81712cfd1a8cbe0466f989c600 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Tue, 21 Jan 2025 09:22:28 -0800 Subject: [PATCH 157/278] New embedded boundary data structures in preparation for #5534 (#5574) When updating `E` (and when initializing the field values for `E` and `B` with a parser), we used to check somewhat complicated conditions that relied on the MultiFabs `edge_lengths` and `face_areas`. This PR introduces separate arrays (`m_eb_update_E` and `m_eb_update_B`, which are `iMultiFab` instead of `MultiFab` and thus take up much less memory). These arrays contain flags that keep track of where to udpate the fields (i.e. the black crosses in the above figure). These arrays are initialized in separate functions which keep the same EB definition. (PR #5534 will then change this definition.) The code for the field pusher and field initialization uses these arrays (instead of edge_lengths and face_areas. It is thus significantly simpler and avoids duplicating complicated if conditions. The above changes have not yet been implemented for the hybrid solver. This will instead be done in a separate PR: https://github.com/ECP-WarpX/WarpX/pull/5558. Once this is complete, the MultiFab `edge_lengths` and `face_areas` will not be needed anymore (except for the ECT solver), and we will thus only allocate them for the ECT solver. This should save a significant amount of memory. --------- Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- .../embedded_boundary_cube/inputs_base_3d | 4 +- Source/EmbeddedBoundary/WarpXInitEB.cpp | 219 +++++++++++++++++- .../FiniteDifferenceSolver/EvolveE.cpp | 93 +++----- .../FiniteDifferenceSolver.H | 9 +- .../HybridPICModel/HybridPICModel.cpp | 10 +- .../MacroscopicEvolveE.cpp | 48 ++-- .../ImplicitSolvers/WarpXImplicitOps.cpp | 2 + Source/FieldSolver/WarpXPushFieldsEM.cpp | 4 +- Source/Initialization/WarpXInitData.cpp | 113 +++------ Source/Parallelization/WarpXRegrid.cpp | 10 + Source/WarpX.H | 65 +++++- Source/WarpX.cpp | 17 ++ 12 files changed, 409 insertions(+), 185 deletions(-) diff --git a/Examples/Tests/embedded_boundary_cube/inputs_base_3d b/Examples/Tests/embedded_boundary_cube/inputs_base_3d index 9710701d871..90ae2996635 100644 --- a/Examples/Tests/embedded_boundary_cube/inputs_base_3d +++ b/Examples/Tests/embedded_boundary_cube/inputs_base_3d @@ -13,8 +13,8 @@ boundary.field_lo = pec pec pec boundary.field_hi = pec pec pec eb2.geom_type = box -eb2.box_lo = -0.5 -0.5 -0.5 -eb2.box_hi = 0.5 0.5 0.5 +eb2.box_lo = -0.5 -0.5 -0.5 # Ensures that the stair-case EB is exactly at -0.5 +eb2.box_hi = 0.5 0.5 0.5 # Ensures that the stair-case EB is exactly at 0.5 eb2.box_has_fluid_inside = true # Alternatively one could use parser to build EB # Note that for amrex EB implicit function, >0 is covered, =0 is boundary and <0 is regular. diff --git a/Source/EmbeddedBoundary/WarpXInitEB.cpp b/Source/EmbeddedBoundary/WarpXInitEB.cpp index edbc97a8efe..6ff9a9ca2e0 100644 --- a/Source/EmbeddedBoundary/WarpXInitEB.cpp +++ b/Source/EmbeddedBoundary/WarpXInitEB.cpp @@ -291,9 +291,224 @@ WarpX::ScaleAreas (ablastr::fields::VectorField& face_areas, } } +void +WarpX::MarkUpdateCellsStairCase ( + std::array< std::unique_ptr,3> & eb_update, + ablastr::fields::VectorField const& field, + amrex::EBFArrayBoxFactory const & eb_fact ) +{ + + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + // Extract structures for embedded boundaries + amrex::FabArray const& eb_flag = eb_fact.getMultiEBCellFlagFab(); + + for (int idim = 0; idim < 3; ++idim) { + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for (amrex::MFIter mfi(*field[idim]); mfi.isValid(); ++mfi) { + + const amrex::Box& box = mfi.tilebox(); + amrex::Array4 const & eb_update_arr = eb_update[idim]->array(mfi); + + // Check if the box (including one layer of guard cells) contains a mix of covered and regular cells + const amrex::Box& eb_info_box = mfi.tilebox(amrex::IntVect::TheCellVector()).grow(1); + amrex::FabType const fab_type = eb_flag[mfi].getType( eb_info_box ); + + if (fab_type == amrex::FabType::regular) { // All cells in the box are regular + + // Every cell in box is all regular: update field in every cell + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + eb_update_arr(i, j, k) = 1; + }); + + } else if (fab_type == amrex::FabType::covered) { // All cells in the box are covered + + // Every cell in box is all covered: do not update field + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + eb_update_arr(i, j, k) = 0; + }); + + } else { // The box contains a mix of covered and regular cells + + auto const & flag = eb_flag[mfi].array(); + auto index_type = field[idim]->ixType(); + + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + + // Stair-case approximation: If neighboring cells of this gridpoint + // are either partially or fully covered: do not update field + + // The number of cells that we need to check depend on the index type + // of the `eb_update_arr` in each direction. + // If `eb_update_arr` is nodal in a given direction, we need to check the cells + // to the left and right of this nodal gridpoint. + // For instance, if `eb_update_arr` is nodal in the first dimension, we need + // to check the cells at index i-1 and at index i, since, with AMReX indexing conventions, + // these are the neighboring cells for the nodal gripoint at index i. + // If `eb_update_arr` is cell-centerd in a given direction, we only need to check + // the cell at the same position (e.g., in the first dimension: the cell at index i). + int const i_start = ( index_type.nodeCentered(0) )? i-1 : i; +#if AMREX_SPACEDIM > 1 + int const j_start = ( index_type.nodeCentered(1) )? j-1 : j; +#else + int const j_start = j; +#endif +#if AMREX_SPACEDIM > 2 + int const k_start = ( index_type.nodeCentered(2) )? k-1 : k; +#else + int const k_start = k; +#endif + // Loop over neighboring cells + int eb_update = 1; + for (int i_cell = i_start; i_cell <= i; ++i_cell) { + for (int j_cell = j_start; j_cell <= j; ++j_cell) { + for (int k_cell = k_start; k_cell <= k; ++k_cell) { + // If one of the neighboring is either partially or fully covered + // (i.e. if they are not regular cells), do not update field + // (Note that `flag` is a cell-centered object, and `isRegular` + // returns `false` if the cell is either partially or fully covered.) + if ( !flag(i_cell, j_cell, k_cell).isRegular() ) { + eb_update = 0; + } + } + } + } + eb_update_arr(i, j, k) = eb_update; + }); + + } + + } + + } + +} + +void +WarpX::MarkUpdateECellsECT ( + std::array< std::unique_ptr,3> & eb_update_E, + ablastr::fields::VectorField const& edge_lengths ) +{ + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( amrex::MFIter mfi(*eb_update_E[0], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { + + const amrex::Box& tbx = mfi.tilebox( eb_update_E[0]->ixType().toIntVect(), eb_update_E[0]->nGrowVect() ); + const amrex::Box& tby = mfi.tilebox( eb_update_E[1]->ixType().toIntVect(), eb_update_E[1]->nGrowVect() ); + const amrex::Box& tbz = mfi.tilebox( eb_update_E[2]->ixType().toIntVect(), eb_update_E[2]->nGrowVect() ); + + amrex::Array4 const & eb_update_Ex_arr = eb_update_E[0]->array(mfi); + amrex::Array4 const & eb_update_Ey_arr = eb_update_E[1]->array(mfi); + amrex::Array4 const & eb_update_Ez_arr = eb_update_E[2]->array(mfi); + + amrex::Array4 const & lx_arr = edge_lengths[0]->array(mfi); + amrex::Array4 const & lz_arr = edge_lengths[2]->array(mfi); +#if defined(WARPX_DIM_3D) + amrex::Array4 const & ly_arr = edge_lengths[1]->array(mfi); +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::Dim3 const lx_lo = amrex::lbound(lx_arr); + amrex::Dim3 const lx_hi = amrex::ubound(lx_arr); + amrex::Dim3 const lz_lo = amrex::lbound(lz_arr); + amrex::Dim3 const lz_hi = amrex::ubound(lz_arr); +#endif + + amrex::ParallelFor (tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + // Do not update Ex if the edge on which it lives is fully covered + eb_update_Ex_arr(i, j, k) = (lx_arr(i, j, k) == 0)? 0 : 1; + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { +#ifdef WARPX_DIM_3D + // In 3D: Do not update Ey if the edge on which it lives is fully covered + eb_update_Ey_arr(i, j, k) = (ly_arr(i, j, k) == 0)? 0 : 1; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + // In XZ and RZ: Ey is associated with a mesh node, + // so we need to check if the mesh node is covered + if((lx_arr(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)==0) + ||(lx_arr(std::max(i-1, lx_lo.x), std::min(j , lx_hi.y), k)==0) + ||(lz_arr(std::min(i , lz_hi.x), std::min(j , lz_hi.y), k)==0) + ||(lz_arr(std::min(i , lz_hi.x), std::max(j-1, lz_lo.y), k)==0)) { + eb_update_Ey_arr(i, j, k) = 0; + } else { + eb_update_Ey_arr(i, j, k) = 1; + } +#endif + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + // Do not update Ez if the edge on which it lives is fully covered + eb_update_Ez_arr(i, j, k) = (lz_arr(i, j, k) == 0)? 0 : 1; + } + ); + + } +} + +void +WarpX::MarkUpdateBCellsECT ( + std::array< std::unique_ptr,3> & eb_update_B, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& edge_lengths ) +{ + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( amrex::MFIter mfi(*eb_update_B[0], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { + + const amrex::Box& tbx = mfi.tilebox( eb_update_B[0]->ixType().toIntVect(), eb_update_B[0]->nGrowVect() ); + const amrex::Box& tby = mfi.tilebox( eb_update_B[1]->ixType().toIntVect(), eb_update_B[1]->nGrowVect() ); + const amrex::Box& tbz = mfi.tilebox( eb_update_B[2]->ixType().toIntVect(), eb_update_B[2]->nGrowVect() ); + + amrex::Array4 const & eb_update_Bx_arr = eb_update_B[0]->array(mfi); + amrex::Array4 const & eb_update_By_arr = eb_update_B[1]->array(mfi); + amrex::Array4 const & eb_update_Bz_arr = eb_update_B[2]->array(mfi); + +#ifdef WARPX_DIM_3D + amrex::Array4 const & Sx_arr = face_areas[0]->array(mfi); + amrex::Array4 const & Sy_arr = face_areas[1]->array(mfi); + amrex::Array4 const & Sz_arr = face_areas[2]->array(mfi); + amrex::ignore_unused(edge_lengths); +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::Array4 const & Sy_arr = face_areas[1]->array(mfi); + amrex::Array4 const & lx_arr = edge_lengths[0]->array(mfi); + amrex::Array4 const & lz_arr = edge_lengths[2]->array(mfi); +#endif + amrex::ParallelFor (tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { +#ifdef WARPX_DIM_3D + // In 3D: do not update Bx if the face on which it lives is fully covered + eb_update_Bx_arr(i, j, k) = (Sx_arr(i, j, k) == 0)? 0 : 1; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + //In XZ and RZ, Bx lives on a z-edge ; do not update if fully covered + eb_update_Bx_arr(i, j, k) = (lz_arr(i, j, k) == 0)? 0 : 1; +#endif + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + // Do not update By if the face on which it lives is fully covered + eb_update_By_arr(i, j, k) = (Sy_arr(i, j, k) == 0)? 0 : 1; + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { +#ifdef WARPX_DIM_3D + // In 3D: do not update Bz if the face on which it lives is fully covered + eb_update_Bz_arr(i, j, k) = (Sz_arr(i, j, k) == 0)? 0 : 1; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + //In XZ and RZ, Bz lives on a x-edge ; do not update if fully covered + eb_update_Bz_arr(i, j, k) = (lx_arr(i, j, k) == 0)? 0 : 1; +#endif + } + ); + + } +} void -WarpX::MarkCells () +WarpX::MarkExtensionCells () { using ablastr::fields::Direction; using warpx::fields::FieldType; @@ -302,7 +517,7 @@ WarpX::MarkCells () auto const &cell_size = CellSize(maxLevel()); #if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) - WARPX_ABORT_WITH_MESSAGE("MarkCells only implemented in 2D and 3D"); + WARPX_ABORT_WITH_MESSAGE("MarkExtensionCells only implemented in 2D and 3D"); #endif for (int idim = 0; idim < 3; ++idim) { diff --git a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp index 03a9866fb98..926f52aa8ee 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/EvolveE.cpp @@ -55,6 +55,7 @@ void FiniteDifferenceSolver::EvolveE ( int lev, PatchType patch_type, ablastr::fields::VectorField const& Efield, + std::array< std::unique_ptr,3 > const& eb_update_E, amrex::Real const dt ) { @@ -72,40 +73,23 @@ void FiniteDifferenceSolver::EvolveE ( fields.get(FieldType::F_fp, lev) : fields.get(FieldType::F_cp, lev); } - ablastr::fields::VectorField edge_lengths; - if (fields.has_vector(FieldType::edge_lengths, lev)) { - edge_lengths = fields.get_alldirs(FieldType::edge_lengths, lev); - } - ablastr::fields::VectorField face_areas; - if (fields.has_vector(FieldType::face_areas, lev)) { - face_areas = fields.get_alldirs(FieldType::face_areas, lev); - } - ablastr::fields::VectorField area_mod; - if (fields.has_vector(FieldType::area_mod, lev)) { - area_mod = fields.get_alldirs(FieldType::area_mod, lev); - } - ablastr::fields::VectorField ECTRhofield; - if (fields.has_vector(FieldType::ECTRhofield, lev)) { - ECTRhofield = fields.get_alldirs(FieldType::ECTRhofield, lev); - } - // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) #ifdef WARPX_DIM_RZ if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee){ - EvolveECylindrical ( Efield, Bfield, Jfield, edge_lengths, Ffield, lev, dt ); + EvolveECylindrical ( Efield, Bfield, Jfield, eb_update_E, Ffield, lev, dt ); #else if (m_grid_type == GridType::Collocated) { - EvolveECartesian ( Efield, Bfield, Jfield, edge_lengths, Ffield, lev, dt ); + EvolveECartesian ( Efield, Bfield, Jfield, eb_update_E, Ffield, lev, dt ); } else if (m_fdtd_algo == ElectromagneticSolverAlgo::Yee || m_fdtd_algo == ElectromagneticSolverAlgo::ECT) { - EvolveECartesian ( Efield, Bfield, Jfield, edge_lengths, Ffield, lev, dt ); + EvolveECartesian ( Efield, Bfield, Jfield, eb_update_E, Ffield, lev, dt ); } else if (m_fdtd_algo == ElectromagneticSolverAlgo::CKC) { - EvolveECartesian ( Efield, Bfield, Jfield, edge_lengths, Ffield, lev, dt ); + EvolveECartesian ( Efield, Bfield, Jfield, eb_update_E, Ffield, lev, dt ); #endif } else { @@ -122,14 +106,10 @@ void FiniteDifferenceSolver::EvolveECartesian ( ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Bfield, ablastr::fields::VectorField const& Jfield, - VectorField const& edge_lengths, + std::array< std::unique_ptr,3> const& eb_update_E, amrex::MultiFab const* Ffield, int lev, amrex::Real const dt ) { -#ifndef AMREX_USE_EB - amrex::ignore_unused(edge_lengths); -#endif - amrex::LayoutData* cost = WarpX::getCosts(lev); Real constexpr c2 = PhysConst::c * PhysConst::c; @@ -155,11 +135,12 @@ void FiniteDifferenceSolver::EvolveECartesian ( Array4 const& jy = Jfield[1]->array(mfi); Array4 const& jz = Jfield[2]->array(mfi); - amrex::Array4 lx, ly, lz; + // Extract structures indicating whether the E field should be updated + amrex::Array4 update_Ex_arr, update_Ey_arr, update_Ez_arr; if (EB::enabled()) { - lx = edge_lengths[0]->array(mfi); - ly = edge_lengths[1]->array(mfi); - lz = edge_lengths[2]->array(mfi); + update_Ex_arr = eb_update_E[0]->array(mfi); + update_Ey_arr = eb_update_E[1]->array(mfi); + update_Ez_arr = eb_update_E[2]->array(mfi); } // Extract stencil coefficients @@ -179,8 +160,9 @@ void FiniteDifferenceSolver::EvolveECartesian ( amrex::ParallelFor(tex, tey, tez, [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // Skip field push if this cell is fully covered by embedded boundaries - if (lx && lx(i, j, k) <= 0) { return; } + + // Skip field push in the embedded boundaries + if (update_Ex_arr && update_Ex_arr(i, j, k) == 0) { return; } Ex(i, j, k) += c2 * dt * ( - T_Algo::DownwardDz(By, coefs_z, n_coefs_z, i, j, k) @@ -189,14 +171,9 @@ void FiniteDifferenceSolver::EvolveECartesian ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // Skip field push if this cell is fully covered by embedded boundaries -#ifdef WARPX_DIM_3D - if (ly && ly(i,j,k) <= 0) { return; } -#elif defined(WARPX_DIM_XZ) - //In XZ Ey is associated with a mesh node, so we need to check if the mesh node is covered - amrex::ignore_unused(ly); - if (lx && (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0)) { return; } -#endif + + // Skip field push in the embedded boundaries + if (update_Ey_arr && update_Ey_arr(i, j, k) == 0) { return; } Ey(i, j, k) += c2 * dt * ( - T_Algo::DownwardDx(Bz, coefs_x, n_coefs_x, i, j, k) @@ -205,8 +182,10 @@ void FiniteDifferenceSolver::EvolveECartesian ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // Skip field push if this cell is fully covered by embedded boundaries - if (lz && lz(i,j,k) <= 0) { return; } + + // Skip field push in the embedded boundaries + if (update_Ez_arr && update_Ez_arr(i, j, k) == 0) { return; } + Ez(i, j, k) += c2 * dt * ( - T_Algo::DownwardDy(Bx, coefs_y, n_coefs_y, i, j, k) + T_Algo::DownwardDx(By, coefs_x, n_coefs_x, i, j, k) @@ -256,14 +235,10 @@ void FiniteDifferenceSolver::EvolveECylindrical ( ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Bfield, ablastr::fields::VectorField const& Jfield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 > const& eb_update_E, amrex::MultiFab const* Ffield, int lev, amrex::Real const dt ) { -#ifndef AMREX_USE_EB - amrex::ignore_unused(edge_lengths); -#endif - amrex::LayoutData* cost = WarpX::getCosts(lev); // Loop through the grids, and over the tiles within each grid @@ -288,10 +263,12 @@ void FiniteDifferenceSolver::EvolveECylindrical ( Array4 const& jt = Jfield[1]->array(mfi); Array4 const& jz = Jfield[2]->array(mfi); - amrex::Array4 lr, lz; + // Extract structures indicating whether the E field should be updated + amrex::Array4 update_Er_arr, update_Et_arr, update_Ez_arr; if (EB::enabled()) { - lr = edge_lengths[0]->array(mfi); - lz = edge_lengths[2]->array(mfi); + update_Er_arr = eb_update_E[0]->array(mfi); + update_Et_arr = eb_update_E[1]->array(mfi); + update_Ez_arr = eb_update_E[2]->array(mfi); } // Extract stencil coefficients @@ -316,8 +293,9 @@ void FiniteDifferenceSolver::EvolveECylindrical ( amrex::ParallelFor(ter, tet, tez, [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ - // Skip field push if this cell is fully covered by embedded boundaries - if (lr && lr(i, j, 0) <= 0) { return; } + + // Skip field push in the embedded boundaries + if (update_Er_arr && update_Er_arr(i, j, 0) == 0) { return; } Real const r = rmin + (i + 0.5_rt)*dr; // r on cell-centered point (Er is cell-centered in r) Er(i, j, 0, 0) += c2 * dt*( @@ -336,9 +314,9 @@ void FiniteDifferenceSolver::EvolveECylindrical ( }, [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ - // Skip field push if this cell is fully covered by embedded boundaries - // The Et field is at a node, so we need to check if the node is covered - if (lr && (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0)) { return; } + + // Skip field push in the embedded boundaries + if (update_Et_arr && update_Et_arr(i, j, 0) == 0) { return; } Real const r = rmin + i*dr; // r on a nodal grid (Et is nodal in r) if (r != 0) { // Off-axis, regular Maxwell equations @@ -381,8 +359,9 @@ void FiniteDifferenceSolver::EvolveECylindrical ( }, [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ - // Skip field push if this cell is fully covered by embedded boundaries - if (lz && lz(i, j, 0) <= 0) { return; } + + // Skip field push in the embedded boundaries + if (update_Ez_arr && update_Ez_arr(i, j, 0) == 0) { return; } Real const r = rmin + i*dr; // r on a nodal grid (Ez is nodal in r) if (r != 0) { // Off-axis, regular Maxwell equations diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 45c06584fda..7726a2ed5bd 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -63,6 +63,7 @@ class FiniteDifferenceSolver int lev, PatchType patch_type, ablastr::fields::VectorField const& Efield, + std::array< std::unique_ptr,3 > const& eb_update_E, amrex::Real dt ); void EvolveF ( amrex::MultiFab* Ffield, @@ -110,7 +111,7 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Bfield, ablastr::fields::VectorField const& Jfield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 > const& eb_update_E, amrex::Real dt, std::unique_ptr const& macroscopic_properties); @@ -215,7 +216,7 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Bfield, ablastr::fields::VectorField const& Jfield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 > const& eb_update_E, amrex::MultiFab const* Ffield, int lev, amrex::Real dt ); @@ -267,7 +268,7 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Bfield, ablastr::fields::VectorField const& Jfield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 > const& eb_update_E, amrex::MultiFab const* Ffield, int lev, amrex::Real dt ); @@ -312,7 +313,7 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Bfield, ablastr::fields::VectorField const& Jfield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 > const& eb_update_E, amrex::Real dt, std::unique_ptr const& macroscopic_properties); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 20989cbeca9..abda59e40ba 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -226,9 +226,8 @@ void HybridPICModel::InitData () m_J_external[0], m_J_external[1], m_J_external[2], - lev, PatchType::fine, 'e', - warpx.m_fields.get_alldirs(FieldType::edge_lengths, lev), - warpx.m_fields.get_alldirs(FieldType::face_areas, lev)); + lev, PatchType::fine, + warpx.GetEBUpdateEFlag()); } } @@ -244,9 +243,8 @@ void HybridPICModel::GetCurrentExternal () m_J_external[0], m_J_external[1], m_J_external[2], - lev, PatchType::fine, 'e', - warpx.m_fields.get_alldirs(FieldType::edge_lengths, lev), - warpx.m_fields.get_alldirs(FieldType::face_areas, lev)); + lev, PatchType::fine, + warpx.GetEBUpdateEFlag()); } } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp index 708728c4e5b..33d368925f7 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/MacroscopicEvolveE.cpp @@ -40,7 +40,7 @@ void FiniteDifferenceSolver::MacroscopicEvolveE ( ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Bfield, ablastr::fields::VectorField const& Jfield, - VectorField const& edge_lengths, + std::array< std::unique_ptr,3 > const& eb_update_E, amrex::Real const dt, std::unique_ptr const& macroscopic_properties) { @@ -48,7 +48,7 @@ void FiniteDifferenceSolver::MacroscopicEvolveE ( // Select algorithm (The choice of algorithm is a runtime option, // but we compile code for each algorithm, using templates) #ifdef WARPX_DIM_RZ - amrex::ignore_unused(Efield, Bfield, Jfield, edge_lengths, dt, macroscopic_properties); + amrex::ignore_unused(Efield, Bfield, Jfield, eb_update_E, dt, macroscopic_properties); WARPX_ABORT_WITH_MESSAGE("currently macro E-push does not work for RZ"); #else @@ -61,13 +61,13 @@ void FiniteDifferenceSolver::MacroscopicEvolveE ( if (WarpX::macroscopic_solver_algo == MacroscopicSolverAlgo::LaxWendroff) { MacroscopicEvolveECartesian - ( Efield, Bfield, Jfield, edge_lengths, dt, macroscopic_properties); + ( Efield, Bfield, Jfield, eb_update_E, dt, macroscopic_properties); } if (WarpX::macroscopic_solver_algo == MacroscopicSolverAlgo::BackwardEuler) { MacroscopicEvolveECartesian - ( Efield, Bfield, Jfield, edge_lengths, dt, macroscopic_properties); + ( Efield, Bfield, Jfield, eb_update_E, dt, macroscopic_properties); } @@ -78,12 +78,12 @@ void FiniteDifferenceSolver::MacroscopicEvolveE ( if (WarpX::macroscopic_solver_algo == MacroscopicSolverAlgo::LaxWendroff) { MacroscopicEvolveECartesian - ( Efield, Bfield, Jfield, edge_lengths, dt, macroscopic_properties); + ( Efield, Bfield, Jfield, eb_update_E, dt, macroscopic_properties); } else if (WarpX::macroscopic_solver_algo == MacroscopicSolverAlgo::BackwardEuler) { MacroscopicEvolveECartesian - ( Efield, Bfield, Jfield, edge_lengths, dt, macroscopic_properties); + ( Efield, Bfield, Jfield, eb_update_E, dt, macroscopic_properties); } @@ -103,14 +103,10 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( ablastr::fields::VectorField const& Efield, ablastr::fields::VectorField const& Bfield, ablastr::fields::VectorField const& Jfield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 > const& eb_update_E, amrex::Real const dt, std::unique_ptr const& macroscopic_properties) { -#ifndef AMREX_USE_EB - amrex::ignore_unused(edge_lengths); -#endif - amrex::MultiFab& sigma_mf = macroscopic_properties->getsigma_mf(); amrex::MultiFab& epsilon_mf = macroscopic_properties->getepsilon_mf(); amrex::MultiFab& mu_mf = macroscopic_properties->getmu_mf(); @@ -141,15 +137,12 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( Array4 const& jy = Jfield[1]->array(mfi); Array4 const& jz = Jfield[2]->array(mfi); - amrex::Array4 eb_lx, eb_ly, eb_lz; + amrex::Array4 update_Ex_arr, update_Ey_arr, update_Ez_arr; if (EB::enabled()) { - eb_lx = edge_lengths[0]->array(mfi); - eb_ly = edge_lengths[1]->array(mfi); - eb_lz = edge_lengths[2]->array(mfi); + update_Ex_arr = eb_update_E[0]->array(mfi); + update_Ey_arr = eb_update_E[1]->array(mfi); + update_Ez_arr = eb_update_E[2]->array(mfi); } -#ifdef WARPX_DIM_XZ - amrex::ignore_unused(eb_ly); -#endif // material prop // amrex::Array4 const& sigma_arr = sigma_mf.array(mfi); @@ -180,8 +173,9 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( // Loop over the cells and update the fields amrex::ParallelFor(tex, tey, tez, [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // Skip field push if this cell is fully covered by embedded boundaries - if (eb_lx && eb_lx(i, j, k) <= 0) { return; } + + // Skip field push in the embedded boundaries + if (update_Ex_arr && update_Ex_arr(i, j, k) == 0) { return; } // Interpolate conductivity, sigma, to Ex position on the grid amrex::Real const sigma_interp = ablastr::coarsen::sample::Interp(sigma_arr, sigma_stag, @@ -198,12 +192,9 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef WARPX_DIM_3D - if (eb_ly && eb_ly(i,j,k) <= 0) { return; } -#elif defined(WARPX_DIM_XZ) - //In XZ Ey is associated with a mesh node, so we need to check if the mesh node is covered - if (eb_lx && (eb_lx(i, j, k)<=0 || eb_lx(i-1, j, k)<=0 || eb_lz(i, j, k)<=0 || eb_lz(i, j-1, k)<=0)) { return; } -#endif + + // Skip field push in the embedded boundaries + if (update_Ey_arr && update_Ey_arr(i, j, k) == 0) { return; } // Interpolate conductivity, sigma, to Ey position on the grid amrex::Real const sigma_interp = ablastr::coarsen::sample::Interp(sigma_arr, sigma_stag, @@ -221,8 +212,9 @@ void FiniteDifferenceSolver::MacroscopicEvolveECartesian ( }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // Skip field push if this cell is fully covered by embedded boundaries - if (eb_lz && eb_lz(i, j, k) <= 0) { return; } + + // Skip field push in the embedded boundaries + if (update_Ez_arr && update_Ez_arr(i, j, k) == 0) { return; } // Interpolate conductivity, sigma, to Ez position on the grid amrex::Real const sigma_interp = ablastr::coarsen::sample::Interp(sigma_arr, sigma_stag, diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp index eaf96cf77ec..9b62bd91b0c 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp +++ b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp @@ -385,12 +385,14 @@ WarpX::ImplicitComputeRHSE (int lev, PatchType patch_type, amrex::Real a_dt, War lev, patch_type, a_Erhs_vec.getArrayVec()[lev], + m_eb_update_E[lev], a_dt ); } else { m_fdtd_solver_cp[lev]->EvolveE( m_fields, lev, patch_type, a_Erhs_vec.getArrayVec()[lev], + m_eb_update_E[lev], a_dt ); } diff --git a/Source/FieldSolver/WarpXPushFieldsEM.cpp b/Source/FieldSolver/WarpXPushFieldsEM.cpp index 24640fc63c7..139988b69a2 100644 --- a/Source/FieldSolver/WarpXPushFieldsEM.cpp +++ b/Source/FieldSolver/WarpXPushFieldsEM.cpp @@ -963,12 +963,14 @@ WarpX::EvolveE (int lev, PatchType patch_type, amrex::Real a_dt, amrex::Real sta lev, patch_type, m_fields.get_alldirs(FieldType::Efield_fp, lev), + m_eb_update_E[lev], a_dt ); } else { m_fdtd_solver_cp[lev]->EvolveE( m_fields, lev, patch_type, m_fields.get_alldirs(FieldType::Efield_cp, lev), + m_eb_update_E[lev], a_dt ); } @@ -1155,7 +1157,7 @@ WarpX::MacroscopicEvolveE (int lev, PatchType patch_type, amrex::Real a_dt, amre m_fields.get_alldirs(FieldType::Efield_fp, lev), m_fields.get_alldirs(FieldType::Bfield_fp, lev), m_fields.get_alldirs(FieldType::current_fp, lev), - m_fields.get_alldirs(FieldType::edge_lengths, lev), + m_eb_update_E[lev], a_dt, m_macroscopic_properties); if (do_pml && pml[lev]->ok()) { diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 5aca5ad89da..36b6b2d2254 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -965,23 +965,20 @@ WarpX::InitLevelData (int lev, Real /*time*/) // The default maxlevel_extEMfield_init value is the total number of levels in the simulation if ((m_p_ext_field_params->B_ext_grid_type == ExternalFieldType::parse_ext_grid_function) && (lev > 0) && (lev <= maxlevel_extEMfield_init)) { + ComputeExternalFieldOnGridUsingParser( FieldType::Bfield_aux, m_p_ext_field_params->Bxfield_parser->compile<4>(), m_p_ext_field_params->Byfield_parser->compile<4>(), m_p_ext_field_params->Bzfield_parser->compile<4>(), - lev, PatchType::fine, 'f', - m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev)); + lev, PatchType::fine, m_eb_update_B); ComputeExternalFieldOnGridUsingParser( FieldType::Bfield_cp, m_p_ext_field_params->Bxfield_parser->compile<4>(), m_p_ext_field_params->Byfield_parser->compile<4>(), m_p_ext_field_params->Bzfield_parser->compile<4>(), - lev, PatchType::coarse, 'f', - m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_mr_levels_alldirs(FieldType::face_areas, max_level)[lev]); + lev, PatchType::coarse, m_eb_update_B); } // if the input string for the E-field is "parse_e_ext_grid_function", @@ -1012,18 +1009,14 @@ WarpX::InitLevelData (int lev, Real /*time*/) m_p_ext_field_params->Exfield_parser->compile<4>(), m_p_ext_field_params->Eyfield_parser->compile<4>(), m_p_ext_field_params->Ezfield_parser->compile<4>(), - lev, PatchType::fine, 'e', - m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev)); + lev, PatchType::fine, m_eb_update_E); ComputeExternalFieldOnGridUsingParser( FieldType::Efield_cp, m_p_ext_field_params->Exfield_parser->compile<4>(), m_p_ext_field_params->Eyfield_parser->compile<4>(), m_p_ext_field_params->Ezfield_parser->compile<4>(), - lev, PatchType::coarse, 'e', - m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev)); + lev, PatchType::coarse, m_eb_update_E); #ifdef AMREX_USE_EB if (eb_enabled) { if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { @@ -1057,9 +1050,8 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( amrex::ParserExecutor<4> const& fx_parser, amrex::ParserExecutor<4> const& fy_parser, amrex::ParserExecutor<4> const& fz_parser, - int lev, PatchType patch_type, [[maybe_unused]] const char topology, - std::optional const& edge_lengths, - std::optional const& face_areas) + int lev, PatchType patch_type, + amrex::Vector,3 > > const& eb_update_field) { auto t = gett_new(lev); @@ -1082,8 +1074,6 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); const amrex::IntVect z_nodal_flag = mfz->ixType().toIntVect(); - const bool eb_enabled = EB::enabled(); - for ( MFIter mfi(*mfx, TilingIfNotGPU()); mfi.isValid(); ++mfi) { const amrex::Box& tbx = mfi.tilebox( x_nodal_flag, mfx->nGrowVect() ); const amrex::Box& tby = mfi.tilebox( y_nodal_flag, mfy->nGrowVect() ); @@ -1093,44 +1083,19 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( auto const& mfyfab = mfy->array(mfi); auto const& mfzfab = mfz->array(mfi); - amrex::Array4 lx, ly, lz, Sx, Sy, Sz; - if (eb_enabled) { - if (edge_lengths.has_value()) { - const auto& edge_lengths_array = edge_lengths.value(); - lx = edge_lengths_array[0]->array(mfi); - ly = edge_lengths_array[1]->array(mfi); - lz = edge_lengths_array[2]->array(mfi); - } - if (face_areas.has_value()) { - const auto& face_areas_array = face_areas.value(); - Sx = face_areas_array[0]->array(mfi); - Sy = face_areas_array[1]->array(mfi); - Sz = face_areas_array[2]->array(mfi); - } - } - -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::Dim3 lx_lo, lx_hi, lz_lo, lz_hi; -#endif - if (eb_enabled) { -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - lx_lo = amrex::lbound(lx); - lx_hi = amrex::ubound(lx); - lz_lo = amrex::lbound(lz); - lz_hi = amrex::ubound(lz); -#endif + amrex::Array4 update_fx_arr, update_fy_arr, update_fz_arr; + if (EB::enabled()) { + update_fx_arr = eb_update_field[lev][0]->array(mfi); + update_fy_arr = eb_update_field[lev][1]->array(mfi); + update_fz_arr = eb_update_field[lev][2]->array(mfi); } amrex::ParallelFor (tbx, tby, tbz, [=] AMREX_GPU_DEVICE (int i, int j, int k) { -#ifdef AMREX_USE_EB -#ifdef WARPX_DIM_3D - if(lx && ((topology=='e' and lx(i, j, k)<=0) or (topology=='f' and Sx(i, j, k)<=0))) { return; } -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - //In XZ and RZ Ex is associated with a x-edge, while Bx is associated with a z-edge - if(lx && ((topology=='e' and lx(i, j, k)<=0) or (topology=='f' and lz(i, j, k)<=0))) { return; } -#endif -#endif + + // Do not set fields inside the embedded boundary + if (update_fx_arr && update_fx_arr(i,j,k) == 0) { return; } + // Shift required in the x-, y-, or z- position // depending on the index type of the multifab #if defined(WARPX_DIM_1D_Z) @@ -1156,19 +1121,10 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( mfxfab(i,j,k) = fx_parser(x,y,z,t); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { -#ifdef AMREX_USE_EB -#ifdef WARPX_DIM_3D - if(ly && ((topology=='e' and ly(i, j, k)<=0) or (topology=='f' and Sy(i, j, k)<=0))) { return; } -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - //In XZ and RZ Ey is associated with a mesh node, so we need to check if the mesh node is covered - if(lx && - ((topology=='e' and (lx(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)<=0 - || lx(std::max(i-1, lx_lo.x), std::min(j , lx_hi.y), k)<=0 - || lz(std::min(i , lz_hi.x), std::min(j , lz_hi.y), k)<=0 - || lz(std::min(i , lz_hi.x), std::max(j-1, lz_lo.y), k)<=0)) or - (topology=='f' and Sy(i,j,k)<=0))) { return; } -#endif -#endif + + // Do not set fields inside the embedded boundary + if (update_fy_arr && update_fy_arr(i,j,k) == 0) { return; } + #if defined(WARPX_DIM_1D_Z) const amrex::Real x = 0._rt; const amrex::Real y = 0._rt; @@ -1192,14 +1148,10 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( mfyfab(i,j,k) = fy_parser(x,y,z,t); }, [=] AMREX_GPU_DEVICE (int i, int j, int k) { -#ifdef AMREX_USE_EB -#ifdef WARPX_DIM_3D - if(lz && ((topology=='e' and lz(i, j, k)<=0) or (topology=='f' and Sz(i, j, k)<=0))) { return; } -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - //In XZ and RZ Ez is associated with a z-edge, while Bz is associated with a x-edge - if(lz && ((topology=='e' and lz(i, j, k)<=0) or (topology=='f' and lx(i, j, k)<=0))) { return; } -#endif -#endif + + // Do not set fields inside the embedded boundary + if (update_fz_arr && update_fz_arr(i,j,k) == 0) { return; } + #if defined(WARPX_DIM_1D_Z) const amrex::Real x = 0._rt; const amrex::Real y = 0._rt; @@ -1298,9 +1250,16 @@ void WarpX::InitializeEBGridData (int lev) ScaleAreas(face_areas_lev, CellSize(lev)); if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - MarkCells(); + // Compute additional quantities required for the ECT solver + MarkExtensionCells(); ComputeFaceExtensions(); } + + // Mark on which grid points E should be updated + MarkUpdateECellsECT( m_eb_update_E[lev], edge_lengths_lev ); + // Mark on which grid points B should be updated + MarkUpdateBCellsECT( m_eb_update_B[lev], face_areas_lev, edge_lengths_lev); + } ComputeDistanceToEB(); @@ -1384,9 +1343,7 @@ WarpX::LoadExternalFields (int const lev) m_p_ext_field_params->Bxfield_parser->compile<4>(), m_p_ext_field_params->Byfield_parser->compile<4>(), m_p_ext_field_params->Bzfield_parser->compile<4>(), - lev, PatchType::fine, 'f', - m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev)); + lev, PatchType::fine, m_eb_update_B); } else if (m_p_ext_field_params->B_ext_grid_type == ExternalFieldType::read_from_file) { #if defined(WARPX_DIM_RZ) @@ -1409,9 +1366,7 @@ WarpX::LoadExternalFields (int const lev) m_p_ext_field_params->Exfield_parser->compile<4>(), m_p_ext_field_params->Eyfield_parser->compile<4>(), m_p_ext_field_params->Ezfield_parser->compile<4>(), - lev, PatchType::fine, 'e', - m_fields.get_alldirs(FieldType::edge_lengths, lev), - m_fields.get_alldirs(FieldType::face_areas, lev)); + lev, PatchType::fine, m_eb_update_E ); } else if (m_p_ext_field_params->E_ext_grid_type == ExternalFieldType::read_from_file) { #if defined(WARPX_DIM_RZ) diff --git a/Source/Parallelization/WarpXRegrid.cpp b/Source/Parallelization/WarpXRegrid.cpp index 81cbb55c2c7..79975397196 100644 --- a/Source/Parallelization/WarpXRegrid.cpp +++ b/Source/Parallelization/WarpXRegrid.cpp @@ -174,6 +174,14 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi using ablastr::fields::Direction; using warpx::fields::FieldType; + const auto RemakeMultiFab = [&](auto& mf){ + if (mf == nullptr) { return; } + const IntVect& ng = mf->nGrowVect(); + auto pmf = std::remove_reference_t{}; + AllocInitMultiFab(pmf, mf->boxArray(), dm, mf->nComp(), ng, lev, mf->tags()[0]); + mf = std::move(pmf); + }; + bool const eb_enabled = EB::enabled(); if (ba == boxArray(lev)) { @@ -187,6 +195,8 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi { if (eb_enabled) { if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { + RemakeMultiFab( m_eb_update_E[lev][idim] ); + RemakeMultiFab( m_eb_update_B[lev][idim] ); if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { m_borrowing[lev][idim] = std::make_unique>(amrex::convert(ba, Bfield_fp[lev][idim]->ixType().toIntVect()), dm); } diff --git a/Source/WarpX.H b/Source/WarpX.H index 2c949cb514a..98fe9b924fd 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -169,6 +169,7 @@ public: } #endif ParticleBoundaryBuffer& GetParticleBoundaryBuffer () { return *m_particle_boundary_buffer; } + amrex::Vector,3 > >& GetEBUpdateEFlag() { return m_eb_update_E; } static void shiftMF (amrex::MultiFab& mf, const amrex::Geometry& geom, int num_shift, int dir, int lev, bool update_cost_flag, @@ -937,9 +938,8 @@ public: amrex::ParserExecutor<4> const& fx_parser, amrex::ParserExecutor<4> const& fy_parser, amrex::ParserExecutor<4> const& fz_parser, - int lev, PatchType patch_type, [[maybe_unused]] char topology, - std::optional const& edge_lengths = std::nullopt, - std::optional const& face_areas = std::nullopt); + int lev, PatchType patch_type, + amrex::Vector,3 > > const& eb_update_field); /** * \brief Load field values from a user-specified openPMD file, @@ -1013,6 +1013,53 @@ public: void InitEB (); #ifdef AMREX_USE_EB + /** \brief Set a flag to indicate on which grid points the field `field` + * should be updated, depending on their position relative to the embedded boundary. + * + * This function is used by all finite-difference solvers, except the + * ECT solver, which instead uses `MarkUpdateECellsECT` and `MarkUpdateBCellsECT`. + * It uses a stair-case approximation of the embedded boundary: + * If a grid point touches cells that are either partially or fully covered + * by the embedded boundary: the corresponding field is not updated. + * + * More specifically, this function fills the iMultiFabs in `eb_update` + * (which have the same indexType as the MultiFabs in `field`) with 1 + * or 0, depending on whether the grid point should be updated or not. + */ + void MarkUpdateCellsStairCase ( + std::array< std::unique_ptr,3> & eb_update, + ablastr::fields::VectorField const & field, + amrex::EBFArrayBoxFactory const & eb_fact ); + + /** \brief Set a flag to indicate on which grid points the E field + * should be updated, depending on their position relative to the embedded boundary. + * + * This function is used by ECT solver. The E field is not updated if + * the edge on which it is defined is fully covered by the embedded boundary. + * + * More specifically, this function fills the iMultiFabs in `eb_update_E` + * (which have the same indexType as the E field) with 1 or 0, depending + * on whether the grid point should be updated or not. + */ + void MarkUpdateECellsECT ( + std::array< std::unique_ptr,3> & eb_update_E, + ablastr::fields::VectorField const& edge_lengths ); + + /** \brief Set a flag to indicate on which grid points the B field + * should be updated, depending on their position relative to the embedded boundary. + * + * This function is used by ECT solver. The B field is not updated if + * the face on which it is defined is fully covered by the embedded boundary. + * + * More specifically, this function fills the iMultiFabs in `eb_update_B` + * (which have the same indexType as the B field) with 1 or 0, depending + * on whether the grid point should be updated or not. + */ + void MarkUpdateBCellsECT ( + std::array< std::unique_ptr,3> & eb_update_B, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& edge_lengths ); + /** * \brief Compute the length of the mesh edges. Here the length is a value in [0, 1]. * An edge of length 0 is fully covered. @@ -1044,7 +1091,7 @@ public: * - 2 for stable cells which have been intruded * Here we cannot know if a cell is intruded or not so we initialize all stable cells with 1 */ - void MarkCells(); + void MarkExtensionCells(); #endif /** @@ -1393,17 +1440,23 @@ private: mutable amrex::Vector,3 > > Afield_dotMask; mutable amrex::Vector< std::unique_ptr > phi_dotMask; + /** EB: Flag to indicate whether a gridpoint is inside the embedded boundary and therefore + * whether the E or B should not be updated. (One array per level and per direction, due to staggering) + */ + amrex::Vector,3 > > m_eb_update_E; + amrex::Vector,3 > > m_eb_update_B; + /** EB: for every mesh face flag_info_face contains a: * * 0 if the face needs to be extended * * 1 if the face is large enough to lend area to other faces * * 2 if the face is actually intruded by other face - * It is initialized in WarpX::MarkCells + * It is initialized in WarpX::MarkExtensionCells * This is only used for the ECT solver.*/ amrex::Vector, 3 > > m_flag_info_face; /** EB: for every mesh face face flag_ext_face contains a: * * 1 if the face needs to be extended * * 0 otherwise - * It is initialized in WarpX::MarkCells and then modified in WarpX::ComputeOneWayExtensions + * It is initialized in WarpX::MarkExtensionCells and then modified in WarpX::ComputeOneWayExtensions * and in WarpX::ComputeEightWaysExtensions * This is only used for the ECT solver.*/ amrex::Vector, 3 > > m_flag_ext_face; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index bd193f11bdc..1c20c35578c 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -316,6 +316,8 @@ WarpX::WarpX () Afield_dotMask.resize(nlevs_max); phi_dotMask.resize(nlevs_max); + m_eb_update_E.resize(nlevs_max); + m_eb_update_B.resize(nlevs_max); m_flag_info_face.resize(nlevs_max); m_flag_ext_face.resize(nlevs_max); m_borrowing.resize(nlevs_max); @@ -2295,6 +2297,21 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm // EB info are needed only at the finest level if (lev == maxLevel()) { if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { + + AllocInitMultiFab(m_eb_update_E[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_eb_update_E[x]"); + AllocInitMultiFab(m_eb_update_E[lev][1], amrex::convert(ba, Ey_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_eb_update_E[y]"); + AllocInitMultiFab(m_eb_update_E[lev][2], amrex::convert(ba, Ez_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_eb_update_E[z]"); + + AllocInitMultiFab(m_eb_update_B[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_eb_update_B[x]"); + AllocInitMultiFab(m_eb_update_B[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_eb_update_B[y]"); + AllocInitMultiFab(m_eb_update_B[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, + guard_cells.ng_FieldSolver, lev, "m_eb_update_B[z]"); + //! EB: Lengths of the mesh edges m_fields.alloc_init(FieldType::edge_lengths, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); From d07326b98f5409b245be4c08f4759384420f452e Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 21 Jan 2025 09:54:59 -0800 Subject: [PATCH 158/278] Doc: More Developer Packages (#5568) Add missing developer packages to our conda developer environment. X-ref: https://github.com/ECP-WarpX/impactx/pull/801 --- Docs/source/install/dependencies.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index 200677807d7..fb1f95e5eac 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -80,7 +80,7 @@ Conda (Linux/macOS/Windows) .. code-block:: bash - conda create -n warpx-cpu-mpich-dev -c conda-forge blaspp boost ccache cmake compilers git lapackpp "openpmd-api=*=mpi_mpich*" openpmd-viewer python make numpy pandas scipy yt "fftw=*=mpi_mpich*" pkg-config matplotlib mamba mpich mpi4py ninja pip virtualenv + conda create -n warpx-cpu-mpich-dev -c conda-forge blaspp boost ccache cmake compilers git lapackpp "openpmd-api=*=mpi_mpich*" openpmd-viewer packaging pytest python python-build make numpy pandas scipy setuptools yt "fftw=*=mpi_mpich*" pkg-config matplotlib mamba mpich mpi4py ninja pip virtualenv wheel conda activate warpx-cpu-mpich-dev # compile WarpX with -DWarpX_MPI=ON @@ -90,7 +90,7 @@ Conda (Linux/macOS/Windows) .. code-block:: bash - conda create -n warpx-cpu-dev -c conda-forge blaspp boost ccache cmake compilers git lapackpp openpmd-api openpmd-viewer python make numpy pandas scipy yt fftw pkg-config matplotlib mamba ninja pip virtualenv + conda create -n warpx-cpu-dev -c conda-forge blaspp boost ccache cmake compilers git lapackpp openpmd-api openpmd-viewer packaging pytest python python-build make numpy pandas scipy setuptools yt fftw pkg-config matplotlib mamba ninja pip virtualenv wheel conda activate warpx-cpu-dev # compile WarpX with -DWarpX_MPI=OFF From 11808eac8d7e630e0170c75b726f5e365c5c70d1 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Tue, 21 Jan 2025 19:29:34 +0100 Subject: [PATCH 159/278] Move `CheckDims` and `ParseGeometryInput` out of WarpXUtil (#5354) This PR moves the function `ParseGeometryInput` into `WarpXAMReXInit.cpp`, and the function `CheckDims` into `WarpXInit.H/cpp`. It also makes sure that `CheckDims` is called only once. Note that the check on `geom.ProbLo(0)` depending on the EM/ES solver is extracted from `ParseGeometryInput` and placed inside `WarpX.cpp` . Note also that `parse_geometry_input`, despite being conceptually related to the initialization of AMReX, must run **after** having called `amrex::Initialize` . The idea is to distribute the content of the miscellaneous `WarpXUtil.H/cpp` files into more appropriate source files. --- Source/Initialization/WarpXAMReXInit.cpp | 85 ++++++++++++++++++-- Source/Initialization/WarpXInit.H | 4 + Source/Initialization/WarpXInit.cpp | 32 ++++++++ Source/Utils/WarpXUtil.H | 6 -- Source/Utils/WarpXUtil.cpp | 99 ------------------------ Source/WarpX.cpp | 18 ++++- 6 files changed, 128 insertions(+), 116 deletions(-) diff --git a/Source/Initialization/WarpXAMReXInit.cpp b/Source/Initialization/WarpXAMReXInit.cpp index 5009d2def59..2c43168f5ef 100644 --- a/Source/Initialization/WarpXAMReXInit.cpp +++ b/Source/Initialization/WarpXAMReXInit.cpp @@ -7,13 +7,17 @@ #include "Initialization/WarpXAMReXInit.H" +#include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include +#include #include +#include #include -#include +#include +#include #include namespace { @@ -105,6 +109,66 @@ namespace { set_device_synchronization(); override_default_tiling_option_for_particles(); } + + /** Parse prob_lo and hi + * + * Parse prob_lo and hi evaluating any expressions since geometry + * does not parse its input. Note that this operation has to be + * performed after having initialized AMReX + */ + void parse_geometry_input () + { + auto pp_geometry = amrex::ParmParse {"geometry"}; + + auto prob_lo = amrex::Vector(AMREX_SPACEDIM); + auto prob_hi = amrex::Vector(AMREX_SPACEDIM); + + utils::parser::getArrWithParser( + pp_geometry, "prob_lo", prob_lo, 0, AMREX_SPACEDIM); + utils::parser::getArrWithParser( + pp_geometry, "prob_hi", prob_hi, 0, AMREX_SPACEDIM); + + AMREX_ALWAYS_ASSERT(prob_lo.size() == AMREX_SPACEDIM); + AMREX_ALWAYS_ASSERT(prob_hi.size() == AMREX_SPACEDIM); + + pp_geometry.addarr("prob_lo", prob_lo); + pp_geometry.addarr("prob_hi", prob_hi); + + // Parse amr input, evaluating any expressions since amr does not parse its input + auto pp_amr = amrex::ParmParse{"amr"}; + + // Note that n_cell is replaced so that only the parsed version is written out to the + // warpx_job_info file. This must be done since yt expects to be able to parse + // the value of n_cell from that file. For the rest, this doesn't matter. + auto preparse_amrex_input_int_array = + [&pp_amr](const std::string& input_str, const bool replace = false) + { + const auto *const c_input_str = input_str.c_str(); + if (pp_amr.contains(c_input_str)) { + amrex::Vector input_array; + utils::parser::getArrWithParser(pp_amr,c_input_str, input_array); + if (replace) { + pp_amr.remove(c_input_str); + } + pp_amr.addarr(c_input_str, input_array); + } + }; + + preparse_amrex_input_int_array("n_cell", true); + + const auto params_to_parse = std::vector{ + "max_grid_size", "max_grid_size_x", "max_grid_size_y", "max_grid_size_z", + "blocking_factor", "blocking_factor_x", "blocking_factor_y", "blocking_factor_z"}; + std::for_each(params_to_parse.begin(), params_to_parse.end(), preparse_amrex_input_int_array); + } + + /** This method groups calls to functions related to the initialization of AMReX + * that can run only after having called amrex::Initialize + */ + void amrex_post_initialize () + { + parse_geometry_input(); + } } namespace warpx::initialization @@ -113,13 +177,18 @@ namespace warpx::initialization amrex::AMReX* amrex_init (int& argc, char**& argv, bool build_parm_parse) { - return amrex::Initialize( - argc, - argv, - build_parm_parse, - MPI_COMM_WORLD, - ::overwrite_amrex_parser_defaults - ); + amrex::AMReX* amrex = + amrex::Initialize( + argc, + argv, + build_parm_parse, + MPI_COMM_WORLD, + ::overwrite_amrex_parser_defaults + ); + + ::amrex_post_initialize(); + + return amrex; } } diff --git a/Source/Initialization/WarpXInit.H b/Source/Initialization/WarpXInit.H index ce179e2e997..cb9de99c3bc 100644 --- a/Source/Initialization/WarpXInit.H +++ b/Source/Initialization/WarpXInit.H @@ -25,6 +25,10 @@ namespace warpx::initialization * - the MPI library through the mpi_finalize helper function in ablastr */ void finalize_external_libraries(); + + /** Check that warpx.dims matches the binary name + */ + void check_dims (); } #endif //WARPX_INIT_H_ diff --git a/Source/Initialization/WarpXInit.cpp b/Source/Initialization/WarpXInit.cpp index 7e00760bf30..e9f3dc95a59 100644 --- a/Source/Initialization/WarpXInit.cpp +++ b/Source/Initialization/WarpXInit.cpp @@ -8,12 +8,16 @@ #include "WarpXInit.H" #include "Initialization/WarpXAMReXInit.H" +#include "Utils/TextMsg.H" #include +#include #include #include +#include + void warpx::initialization::initialize_external_libraries(int argc, char* argv[]) { ablastr::parallelization::mpi_init(argc, argv); @@ -27,3 +31,31 @@ void warpx::initialization::finalize_external_libraries() amrex::Finalize(); ablastr::parallelization::mpi_finalize(); } + +void warpx::initialization::check_dims() +{ + // Ensure that geometry.dims is set properly. +#if defined(WARPX_DIM_3D) + std::string const dims_compiled = "3"; +#elif defined(WARPX_DIM_XZ) + std::string const dims_compiled = "2"; +#elif defined(WARPX_DIM_1D_Z) + std::string const dims_compiled = "1"; +#elif defined(WARPX_DIM_RZ) + std::string const dims_compiled = "RZ"; +#endif + const amrex::ParmParse pp_geometry("geometry"); + std::string dims; + std::string dims_error = "The selected WarpX executable was built as '"; + dims_error.append(dims_compiled).append("'-dimensional, but the "); + if (pp_geometry.contains("dims")) { + pp_geometry.get("dims", dims); + dims_error.append("inputs file declares 'geometry.dims = ").append(dims).append("'.\n"); + dims_error.append("Please re-compile with a different WarpX_DIMS option or select the right executable name."); + } else { + dims = "Not specified"; + dims_error.append("inputs file does not declare 'geometry.dims'. Please add 'geometry.dims = "); + dims_error.append(dims_compiled).append("' to inputs file."); + } + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(dims == dims_compiled, dims_error); +} diff --git a/Source/Utils/WarpXUtil.H b/Source/Utils/WarpXUtil.H index 46399b439d6..808e0f7ab63 100644 --- a/Source/Utils/WarpXUtil.H +++ b/Source/Utils/WarpXUtil.H @@ -28,8 +28,6 @@ #include #include -void ParseGeometryInput(); - void ReadBoostedFrameParameters(amrex::Real& gamma_boost, amrex::Real& beta_boost, amrex::Vector& boost_direction); @@ -44,10 +42,6 @@ void ConvertLabParamsToBoost(); */ void ReadBCParams (); -/** Check the warpx.dims matches the binary name - */ -void CheckDims (); - /** Check the warpx.dims matches the binary name & set up RZ gridding * * Ensures that the blocks are setup correctly for the RZ spectral solver diff --git a/Source/Utils/WarpXUtil.cpp b/Source/Utils/WarpXUtil.cpp index d6f465fa901..dcaa3118ab4 100644 --- a/Source/Utils/WarpXUtil.cpp +++ b/Source/Utils/WarpXUtil.cpp @@ -43,74 +43,6 @@ using namespace amrex; -void PreparseAMReXInputIntArray(amrex::ParmParse& a_pp, char const * const input_str, const bool replace) -{ - const int cnt = a_pp.countval(input_str); - if (cnt > 0) { - Vector input_array; - utils::parser::getArrWithParser(a_pp, input_str, input_array); - if (replace) { - a_pp.remove(input_str); - } - a_pp.addarr(input_str, input_array); - } -} - -void ParseGeometryInput() -{ - // Ensure that geometry.dims is set properly. - CheckDims(); - - // Parse prob_lo and hi, evaluating any expressions since geometry does not - // parse its input - ParmParse pp_geometry("geometry"); - - Vector prob_lo(AMREX_SPACEDIM); - Vector prob_hi(AMREX_SPACEDIM); - - utils::parser::getArrWithParser( - pp_geometry, "prob_lo", prob_lo, 0, AMREX_SPACEDIM); - AMREX_ALWAYS_ASSERT(prob_lo.size() == AMREX_SPACEDIM); - utils::parser::getArrWithParser( - pp_geometry, "prob_hi", prob_hi, 0, AMREX_SPACEDIM); - AMREX_ALWAYS_ASSERT(prob_hi.size() == AMREX_SPACEDIM); - -#ifdef WARPX_DIM_RZ - const ParmParse pp_algo("algo"); - auto electromagnetic_solver_id = ElectromagneticSolverAlgo::Default; - pp_algo.query_enum_sloppy("maxwell_solver", electromagnetic_solver_id, "-_"); - if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) - { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(prob_lo[0] == 0., - "Lower bound of radial coordinate (prob_lo[0]) with RZ PSATD solver must be zero"); - } - else - { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(prob_lo[0] >= 0., - "Lower bound of radial coordinate (prob_lo[0]) with RZ FDTD solver must be non-negative"); - } -#endif - - pp_geometry.addarr("prob_lo", prob_lo); - pp_geometry.addarr("prob_hi", prob_hi); - - // Parse amr input, evaluating any expressions since amr does not parse its input - ParmParse pp_amr("amr"); - - // Note that n_cell is replaced so that only the parsed version is written out to the - // warpx_job_info file. This must be done since yt expects to be able to parse - // the value of n_cell from that file. For the rest, this doesn't matter. - PreparseAMReXInputIntArray(pp_amr, "n_cell", true); - PreparseAMReXInputIntArray(pp_amr, "max_grid_size", false); - PreparseAMReXInputIntArray(pp_amr, "max_grid_size_x", false); - PreparseAMReXInputIntArray(pp_amr, "max_grid_size_y", false); - PreparseAMReXInputIntArray(pp_amr, "max_grid_size_z", false); - PreparseAMReXInputIntArray(pp_amr, "blocking_factor", false); - PreparseAMReXInputIntArray(pp_amr, "blocking_factor_x", false); - PreparseAMReXInputIntArray(pp_amr, "blocking_factor_y", false); - PreparseAMReXInputIntArray(pp_amr, "blocking_factor_z", false); -} - void ReadBoostedFrameParameters(Real& gamma_boost, Real& beta_boost, Vector& boost_direction) { @@ -352,40 +284,9 @@ namespace WarpXUtilIO{ } } -void CheckDims () -{ - // Ensure that geometry.dims is set properly. -#if defined(WARPX_DIM_3D) - std::string const dims_compiled = "3"; -#elif defined(WARPX_DIM_XZ) - std::string const dims_compiled = "2"; -#elif defined(WARPX_DIM_1D_Z) - std::string const dims_compiled = "1"; -#elif defined(WARPX_DIM_RZ) - std::string const dims_compiled = "RZ"; -#endif - const ParmParse pp_geometry("geometry"); - std::string dims; - std::string dims_error = "The selected WarpX executable was built as '"; - dims_error.append(dims_compiled).append("'-dimensional, but the "); - if (pp_geometry.contains("dims")) { - pp_geometry.get("dims", dims); - dims_error.append("inputs file declares 'geometry.dims = ").append(dims).append("'.\n"); - dims_error.append("Please re-compile with a different WarpX_DIMS option or select the right executable name."); - } else { - dims = "Not specified"; - dims_error.append("inputs file does not declare 'geometry.dims'. Please add 'geometry.dims = "); - dims_error.append(dims_compiled).append("' to inputs file."); - } - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(dims == dims_compiled, dims_error); -} - void CheckGriddingForRZSpectral () { #ifdef WARPX_DIM_RZ - // Ensure that geometry.dims is set properly. - CheckDims(); - const ParmParse pp_algo("algo"); auto electromagnetic_solver_id = ElectromagneticSolverAlgo::Default; pp_algo.query_enum_sloppy("maxwell_solver", electromagnetic_solver_id, "-_"); diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 1c20c35578c..84a5d271a1e 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -36,6 +36,7 @@ #include "FieldSolver/WarpX_FDTD.H" #include "Filter/NCIGodfreyFilter.H" #include "Initialization/ExternalField.H" +#include "Initialization/WarpXInit.H" #include "Particles/MultiParticleContainer.H" #include "Fluids/MultiFluidContainer.H" #include "Fluids/WarpXFluidContainer.H" @@ -214,7 +215,7 @@ namespace void WarpX::MakeWarpX () { - ParseGeometryInput(); + warpx::initialization::check_dims(); ReadMovingWindowParameters( do_moving_window, start_moving_window_step, end_moving_window_step, @@ -467,8 +468,6 @@ WarpX::~WarpX () void WarpX::ReadParameters () { - // Ensure that geometry.dims is set properly. - CheckDims(); { const ParmParse pp;// Traditionally, max_step and stop_time do not have prefix. @@ -489,6 +488,19 @@ WarpX::ReadParameters () if (electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT && !EB::enabled()) { throw std::runtime_error("ECP Solver requires to enable embedded boundaries at runtime."); } +#ifdef WARPX_DIM_RZ + if (electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) + { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(Geom(0).ProbLo(0) == 0., + "Lower bound of radial coordinate (prob_lo[0]) with RZ PSATD solver must be zero"); + } + else + { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(Geom(0).ProbLo(0) >= 0., + "Lower bound of radial coordinate (prob_lo[0]) with RZ FDTD solver must be non-negative"); + } +#endif + pp_algo.query_enum_sloppy("evolve_scheme", evolve_scheme, "-_"); } From bc0ad1897b4009c14a117ed3818678959602b0c4 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Tue, 21 Jan 2025 23:07:00 +0100 Subject: [PATCH 160/278] WarpXUtil.H: remove unused function `getCellCoordinates` (#5583) --- Source/Utils/WarpXUtil.H | 38 -------------------------------------- 1 file changed, 38 deletions(-) diff --git a/Source/Utils/WarpXUtil.H b/Source/Utils/WarpXUtil.H index 808e0f7ab63..f76db974f9d 100644 --- a/Source/Utils/WarpXUtil.H +++ b/Source/Utils/WarpXUtil.H @@ -112,44 +112,6 @@ bool WriteBinaryDataOnFile(const std::string& filename, const amrex::Vector const mf_type, - amrex::GpuArray const domain_lo, - amrex::GpuArray const dx, - amrex::Real &x, amrex::Real &y, amrex::Real &z) -{ - using namespace amrex::literals; - x = domain_lo[0] + i*dx[0] + (1._rt - mf_type[0]) * dx[0]*0.5_rt; -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::ignore_unused(j); - y = 0._rt; - z = domain_lo[1] + k*dx[1] + (1._rt - mf_type[1]) * dx[1]*0.5_rt; -#else - y = domain_lo[1] + j*dx[1] + (1._rt - mf_type[1]) * dx[1]*0.5_rt; - z = domain_lo[2] + k*dx[2] + (1._rt - mf_type[2]) * dx[2]*0.5_rt; -#endif -} - -} - - namespace WarpXUtilLoadBalance { /** \brief We only want to update the cost data if the grids we are working on From d7b195fa5f68b74113871d62567a110de3cc1e4a Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Tue, 21 Jan 2025 23:09:06 +0100 Subject: [PATCH 161/278] WarpX.cpp: move free function `TagWithLevelSuffix` inside `WarpX::AllocInitMultiFab` (#5582) `TagWithLevelSuffix` is used only once inside `WarpX::AllocInitMultiFab`, and it can be replaced with a one-liner. For this reason, in order to simplify the WarpX class, I would suggest implementing the functionality directly inside `WarpX::AllocInitMultiFab` --- Source/WarpX.cpp | 11 ++--------- 1 file changed, 2 insertions(+), 9 deletions(-) diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 84a5d271a1e..5cc486b9d12 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -3343,14 +3343,6 @@ WarpX::isAnyParticleBoundaryThermal () return false; } -std::string -TagWithLevelSuffix (std::string name, int const level) -{ - // Add the suffix "[level=level]" - name.append("[level=").append(std::to_string(level)).append("]"); - return name; -} - void WarpX::AllocInitMultiFab ( std::unique_ptr& mf, @@ -3362,7 +3354,8 @@ WarpX::AllocInitMultiFab ( const std::string& name, std::optional initial_value) { - const auto name_with_suffix = TagWithLevelSuffix(name, level); + // Add the suffix "[level=level]" + const auto name_with_suffix = name + "[level=" + std::to_string(level) + "]"; const auto tag = amrex::MFInfo().SetTag(name_with_suffix); mf = std::make_unique(ba, dm, ncomp, ngrow, tag); if (initial_value) { From 1231aedf72eacdf34b64c8d54f280e146f7f73b6 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Tue, 21 Jan 2025 14:10:50 -0800 Subject: [PATCH 162/278] Modify stair-case approximation to the EB (#5534) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit # Overview This PR changes the definition of the stair-case approximation of the EB (used in the Yee solver), so that the actual EB boundary (where e.g. particles are removed from the simulation) is **inside** the stair-case-approximated boundary - as opposed to the definition used in the current `development` branch, for which the actual EB is **outside** the stair-case-approximated boundary. This ensures that the algorithm remains charge conserving, when charged particles are absorbed or emitted by the embedded boundary, **for particle shape of order 1**. (Higher-order particle shapes will be addressed in #5209.) As illustrated in the figure below (and as discussed in [this paper](https://www.researchgate.net/publication/318642364_Charge_Conserving_Emission_from_Conformal_Boundaries_in_Electromagnetic_PIC_simulations)), this is fundamentally because the particle does not deposit any charge in the valid cells, at the time when it is removed/emitted. Screenshot 2025-01-12 at 8 56 17 AM (The black crosses show the locations where the electric field is not updated, and thus usually remains equal to 0. The red dots show the locations where the particle deposits charge, for particle shape of order 1.) The better behavior with respect to charge-conservation can be observed in the following animations, which show two particles of opposing charge separating and going into the embedded boundary. (Note that a static error in `divE` remains at the position where the particle was absorbed, with the `development` branch. The propagating errors in `divE` are expected: they are due to electromagnetic waves reflecting on the EB.) - **development branch** ![movie](https://github.com/user-attachments/assets/d486663d-a182-4751-b1d1-709b1a74ea44) - **this PR** ![movie](https://github.com/user-attachments/assets/94a5dea3-2bb8-4548-b320-7615cac86fe7) Input script: [inputs.txt](https://github.com/user-attachments/files/18428873/inputs.txt) Analysis script: [openPMD-visualization.ipynb.txt](https://github.com/user-attachments/files/18428878/openPMD-visualization.ipynb.txt) (An automated tests using a similar configuration has been added in a separate, follow-up PR: https://github.com/ECP-WarpX/WarpX/pull/5562) Note that, as part of this PR, the above new definition has been adopted for all the finite-difference solvers, except for the ECT solver (which uses a cut-cell representation instead of a stair-case representation). # Implementation This PR uses the changes of #5574. It still uses `MarkUpdateECellsECT` and `MarkUpdateBCellsECT` for the ECT sover - which preserve the previous behavior of the embedded boundary for this solver, but now uses `MarkUpdateCellsStairCase` for the other FDTD solvers - which introduce the above-mentioned new stair-case definition. --- .../embedded_boundary_cube/inputs_base_3d | 4 ++-- .../inputs_test_2d_embedded_boundary_cube | 8 ++++---- .../test_2d_embedded_boundary_cube.json | 4 ++-- .../benchmarks_json/test_2d_field_probe.json | 8 ++++---- .../benchmarks_json/test_3d_eb_picmi.json | 14 ++++++------- .../test_3d_embedded_boundary_cube.json | 10 +++++----- ...3d_embedded_boundary_cube_macroscopic.json | 10 +++++----- ...est_3d_embedded_boundary_rotated_cube.json | 3 +-- .../test_3d_particle_absorption.json | 14 ++++++------- .../test_3d_particle_scrape.json | 14 ++++++------- .../test_3d_particle_scrape_picmi.json | 14 ++++++------- ...test_rz_embedded_boundary_diffraction.json | 12 +++++------ Source/EmbeddedBoundary/WarpXInitEB.cpp | 7 +++---- Source/Initialization/WarpXInitData.cpp | 20 ++++++++++++++----- 14 files changed, 75 insertions(+), 67 deletions(-) diff --git a/Examples/Tests/embedded_boundary_cube/inputs_base_3d b/Examples/Tests/embedded_boundary_cube/inputs_base_3d index 90ae2996635..70ddd8f8f64 100644 --- a/Examples/Tests/embedded_boundary_cube/inputs_base_3d +++ b/Examples/Tests/embedded_boundary_cube/inputs_base_3d @@ -13,8 +13,8 @@ boundary.field_lo = pec pec pec boundary.field_hi = pec pec pec eb2.geom_type = box -eb2.box_lo = -0.5 -0.5 -0.5 # Ensures that the stair-case EB is exactly at -0.5 -eb2.box_hi = 0.5 0.5 0.5 # Ensures that the stair-case EB is exactly at 0.5 +eb2.box_lo = -0.501 -0.501 -0.501 # Ensures that the stair-case EB is exactly at -0.5 +eb2.box_hi = 0.501 0.501 0.501 # Ensures that the stair-case EB is exactly at 0.5 eb2.box_has_fluid_inside = true # Alternatively one could use parser to build EB # Note that for amrex EB implicit function, >0 is covered, =0 is boundary and <0 is regular. diff --git a/Examples/Tests/embedded_boundary_cube/inputs_test_2d_embedded_boundary_cube b/Examples/Tests/embedded_boundary_cube/inputs_test_2d_embedded_boundary_cube index 684325dc030..46272052c2c 100644 --- a/Examples/Tests/embedded_boundary_cube/inputs_test_2d_embedded_boundary_cube +++ b/Examples/Tests/embedded_boundary_cube/inputs_test_2d_embedded_boundary_cube @@ -12,10 +12,10 @@ warpx.abort_on_warning_threshold = medium boundary.field_lo = pec pec boundary.field_hi = pec pec -my_constants.xmin = -0.5 -my_constants.zmin = -0.5 -my_constants.xmax = 0.5 -my_constants.zmax = 0.5 +my_constants.xmin = -0.501 # Ensures that the stair-case EB is exactly at -0.5 +my_constants.zmin = -0.501 # Ensures that the stair-case EB is exactly at -0.5 +my_constants.xmax = 0.501 # Ensures that the stair-case EB is exactly at 0.5 +my_constants.zmax = 0.501 # Ensures that the stair-case EB is exactly at 0.5 # Note that for amrex EB implicit function, >0 is covered, =0 is boundary and <0 is regular. warpx.eb_implicit_function = "max(max(x+xmin,-(x+xmax)), max(z+zmin,-(z+zmax)))" diff --git a/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_cube.json b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_cube.json index a3e609bd9a9..dbb5ffa39ae 100644 --- a/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_cube.json +++ b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_cube.json @@ -3,8 +3,8 @@ "Bx": 9.263694545408503e-05, "By": 0.00031905198933489145, "Bz": 7.328424783762594e-05, - "Ex": 8553.906698053046, + "Ex": 8553.90669811286, "Ey": 60867.04830538045, - "Ez": 8.439422682267567e-07 + "Ez": 4.223902107031194e-06 } } \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_2d_field_probe.json b/Regression/Checksum/benchmarks_json/test_2d_field_probe.json index cb82acfc067..8aabe6c8301 100644 --- a/Regression/Checksum/benchmarks_json/test_2d_field_probe.json +++ b/Regression/Checksum/benchmarks_json/test_2d_field_probe.json @@ -1,10 +1,10 @@ { "lev=0": { "Bx": 0.0, - "By": 126826.78487921853, + "By": 123510.69657444415, "Bz": 0.0, - "Ex": 32517064310550.266, + "Ex": 31206368949280.34, "Ey": 0.0, - "Ez": 17321323003697.61 + "Ez": 16921005306450.537 } -} +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_3d_eb_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_eb_picmi.json index ad0d2cee5a3..1f9f0a77b5a 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_eb_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_3d_eb_picmi.json @@ -1,10 +1,10 @@ { "lev=0": { - "Bx": 148673.005859208, - "By": 148673.00585920806, - "Bz": 3371.758117878558, - "Ex": 55378581103426.71, - "Ey": 55378581103426.72, - "Ez": 68412803445328.25 + "Bx": 144495.08082507108, + "By": 144495.08082507114, + "Bz": 8481.958724628861, + "Ex": 54500496182517.92, + "Ey": 54500496182517.91, + "Ez": 70231240245509.39 } -} +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_cube.json b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_cube.json index 58ee8806540..9563c52adbe 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_cube.json +++ b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_cube.json @@ -1,10 +1,10 @@ { "lev=0": { - "Bx": 4.060477854092961e-18, + "Bx": 4.166971025838921e-18, "By": 0.006628374119786834, "Bz": 0.006628374119786834, - "Ex": 5102618.4711524295, - "Ey": 6.323754160591239e-05, - "Ez": 6.323754160591239e-05 + "Ex": 5102618.471153786, + "Ey": 1.4283859321773714e-05, + "Ez": 1.4283859321773714e-05 } -} +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_cube_macroscopic.json b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_cube_macroscopic.json index 8cc6af7cb93..67bdbea18ca 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_cube_macroscopic.json +++ b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_cube_macroscopic.json @@ -1,10 +1,10 @@ { "lev=0": { - "Bx": 4.20930075273562e-18, + "Bx": 4.228863291892693e-18, "By": 0.005101824310293573, "Bz": 0.005101824310293573, - "Ex": 4414725.184731115, - "Ey": 6.32375413967707e-05, - "Ez": 6.32375413967707e-05 + "Ex": 4414725.184732471, + "Ey": 1.4283895626502055e-05, + "Ez": 1.4283895626502055e-05 } -} +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_rotated_cube.json b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_rotated_cube.json index b2b4aa569c1..118214948a5 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_rotated_cube.json +++ b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_rotated_cube.json @@ -7,5 +7,4 @@ "Ey": 1.042254197269831e+04, "Ez": 1.040011664019071e+04 } -} - +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_3d_particle_absorption.json b/Regression/Checksum/benchmarks_json/test_3d_particle_absorption.json index ce6e2fcf79b..3dc9d956b79 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_particle_absorption.json +++ b/Regression/Checksum/benchmarks_json/test_3d_particle_absorption.json @@ -1,10 +1,10 @@ { "lev=0": { - "Bx": 202106.71291347666, - "By": 202106.71291347663, - "Bz": 3371.897999274175, - "Ex": 38304043178806.11, - "Ey": 38304043178806.11, - "Ez": 83057027925874.84 + "Bx": 198610.0530604908, + "By": 198610.0530604909, + "Bz": 8482.656173586969, + "Ex": 37232105734622.53, + "Ey": 37232105734622.54, + "Ez": 85094015810307.19 } -} +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_3d_particle_scrape.json b/Regression/Checksum/benchmarks_json/test_3d_particle_scrape.json index b03a954397a..9437ebed275 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_particle_scrape.json +++ b/Regression/Checksum/benchmarks_json/test_3d_particle_scrape.json @@ -1,10 +1,10 @@ { "lev=0": { - "Bx": 148673.005859208, - "By": 148673.00585920803, - "Bz": 3371.758117878557, - "Ex": 55378581103426.695, - "Ey": 55378581103426.7, - "Ez": 68412803445328.25 + "Bx": 144495.08082507108, + "By": 144495.0808250711, + "Bz": 8481.95872462886, + "Ex": 54500496182517.914, + "Ey": 54500496182517.914, + "Ez": 70231240245509.4 } -} +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_3d_particle_scrape_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_particle_scrape_picmi.json index b03a954397a..1f9f0a77b5a 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_particle_scrape_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_3d_particle_scrape_picmi.json @@ -1,10 +1,10 @@ { "lev=0": { - "Bx": 148673.005859208, - "By": 148673.00585920803, - "Bz": 3371.758117878557, - "Ex": 55378581103426.695, - "Ey": 55378581103426.7, - "Ez": 68412803445328.25 + "Bx": 144495.08082507108, + "By": 144495.08082507114, + "Bz": 8481.958724628861, + "Ex": 54500496182517.92, + "Ey": 54500496182517.91, + "Ez": 70231240245509.39 } -} +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_diffraction.json b/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_diffraction.json index 0e5fad8db8a..e4b9d9c07ff 100644 --- a/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_diffraction.json +++ b/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_diffraction.json @@ -1,10 +1,10 @@ { "lev=0": { - "Br": 6.821267675779345e-19, - "Bt": 5.564905732478707e-05, - "Bz": 2.368259586613272e-19, - "Er": 16503.98082446463, - "Et": 1.5299584682447838e-10, - "Ez": 1466.854467399728 + "Br": 6.7914286131989935e-19, + "Bt": 5.4557350206853276e-05, + "Bz": 2.357229221622199e-19, + "Er": 16481.39008058988, + "Et": 1.5258937379236053e-10, + "Ez": 1508.1064116028576 } } \ No newline at end of file diff --git a/Source/EmbeddedBoundary/WarpXInitEB.cpp b/Source/EmbeddedBoundary/WarpXInitEB.cpp index 6ff9a9ca2e0..87d7db0d4fc 100644 --- a/Source/EmbeddedBoundary/WarpXInitEB.cpp +++ b/Source/EmbeddedBoundary/WarpXInitEB.cpp @@ -320,14 +320,14 @@ WarpX::MarkUpdateCellsStairCase ( if (fab_type == amrex::FabType::regular) { // All cells in the box are regular - // Every cell in box is all regular: update field in every cell + // Every cell in box is regular: update field in every cell amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { eb_update_arr(i, j, k) = 1; }); } else if (fab_type == amrex::FabType::covered) { // All cells in the box are covered - // Every cell in box is all covered: do not update field + // Every cell in box is fully covered: do not update field amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { eb_update_arr(i, j, k) = 0; }); @@ -369,8 +369,7 @@ WarpX::MarkUpdateCellsStairCase ( for (int k_cell = k_start; k_cell <= k; ++k_cell) { // If one of the neighboring is either partially or fully covered // (i.e. if they are not regular cells), do not update field - // (Note that `flag` is a cell-centered object, and `isRegular` - // returns `false` if the cell is either partially or fully covered.) + // (`isRegular` returns `false` if the cell is either partially or fully covered.) if ( !flag(i_cell, j_cell, k_cell).isRegular() ) { eb_update = 0; } diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 36b6b2d2254..c8c3f57193d 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1253,13 +1253,23 @@ void WarpX::InitializeEBGridData (int lev) // Compute additional quantities required for the ECT solver MarkExtensionCells(); ComputeFaceExtensions(); + // Mark on which grid points E should be updated + MarkUpdateECellsECT( m_eb_update_E[lev], edge_lengths_lev ); + // Mark on which grid points B should be updated + MarkUpdateBCellsECT( m_eb_update_B[lev], face_areas_lev, edge_lengths_lev); + } else { + // Mark on which grid points E should be updated (stair-case approximation) + MarkUpdateCellsStairCase( + m_eb_update_E[lev], + m_fields.get_alldirs(FieldType::Efield_fp, lev), + eb_fact ); + // Mark on which grid points B should be updated (stair-case approximation) + MarkUpdateCellsStairCase( + m_eb_update_B[lev], + m_fields.get_alldirs(FieldType::Bfield_fp, lev), + eb_fact ); } - // Mark on which grid points E should be updated - MarkUpdateECellsECT( m_eb_update_E[lev], edge_lengths_lev ); - // Mark on which grid points B should be updated - MarkUpdateBCellsECT( m_eb_update_B[lev], face_areas_lev, edge_lengths_lev); - } ComputeDistanceToEB(); From 961d9a6af1f631ba3691fd7cd59a4319f344826e Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Tue, 21 Jan 2025 17:25:33 -0800 Subject: [PATCH 163/278] Use `self_fields_verbosity` value for magnetostatic solver (#5560) This is another temporary fix (similar to #5517) to close #5530. --- Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp | 3 +-- Source/WarpX.H | 1 + Source/WarpX.cpp | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp index ce39265e720..fb93342ed08 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp @@ -136,13 +136,12 @@ WarpX::AddMagnetostaticFieldLabFrame() else { required_precision = 1e-11; } - const int verbosity = 2; computeVectorPotential( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), m_fields.get_mr_levels_alldirs(FieldType::vector_potential_fp_nodal, finest_level), required_precision, absolute_tolerance, magnetostatic_solver_max_iters, - verbosity + magnetostatic_solver_verbosity ); } diff --git a/Source/WarpX.H b/Source/WarpX.H index 98fe9b924fd..363b0f7ad75 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -903,6 +903,7 @@ public: // Magnetostatic Solver Interface MagnetostaticSolver::VectorPoissonBoundaryHandler m_vector_poisson_boundary_handler; int magnetostatic_solver_max_iters = 200; + int magnetostatic_solver_verbosity = 2; void ComputeMagnetostaticField (); void AddMagnetostaticFieldLabFrame (); void computeVectorPotential (ablastr::fields::MultiLevelVectorField const& curr, diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 5cc486b9d12..05bbc36854d 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -692,6 +692,7 @@ WarpX::ReadParameters () "To use the FFT Poisson solver, compile with WARPX_USE_FFT=ON."); #endif utils::parser::queryWithParser(pp_warpx, "self_fields_max_iters", magnetostatic_solver_max_iters); + utils::parser::queryWithParser(pp_warpx, "self_fields_verbosity", magnetostatic_solver_verbosity); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( ( From 88412f3c21ec9068e782680e56885465fed9e5ec Mon Sep 17 00:00:00 2001 From: Olga Shapoval <30510597+oshapoval@users.noreply.github.com> Date: Tue, 21 Jan 2025 20:22:34 -0800 Subject: [PATCH 164/278] Clean up analysis script of `test_rz_particle_boundary_interaction_picmi` (#5586) --- .../particle_boundary_interaction/analysis.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/Examples/Tests/particle_boundary_interaction/analysis.py b/Examples/Tests/particle_boundary_interaction/analysis.py index edf9d463f98..2e8b57cb1e6 100755 --- a/Examples/Tests/particle_boundary_interaction/analysis.py +++ b/Examples/Tests/particle_boundary_interaction/analysis.py @@ -29,20 +29,20 @@ z_analytic = -0.20531 print("NUMERICAL coordinates of the point of contact:") -print("x=%5.5f, y=%5.5f, z=%5.5f" % (x[0], y[0], z[0])) +print(f"x={x[0]:5.5f}, y={y[0]:5.5f}, z={z[0]:5.5f}") print("\n") print("ANALYTICAL coordinates of the point of contact:") -print("x=%5.5f, y=%5.5f, z=%5.5f" % (x_analytic, y_analytic, z_analytic)) +print(f"x={x_analytic:5.5f}, y={y_analytic:5.5f}, z={z_analytic:5.5f}") tolerance = 1e-5 -diff_x = np.abs((x[0] - x_analytic) / x_analytic) -diff_z = np.abs((z[0] - z_analytic) / z_analytic) +rel_err_x = np.abs((x[0] - x_analytic) / x_analytic) +rel_err_z = np.abs((z[0] - z_analytic) / z_analytic) print("\n") -print("percentage error for x = %5.4f %%" % (diff_x * 100)) -print("percentage error for z = %5.4f %%" % (diff_z * 100)) +print(f"Relative percentage error for x = {rel_err_x * 100:5.4f} %") +print(f"Relative percentage error for z = {rel_err_z * 100:5.4f} %") -assert (diff_x < tolerance) and (y[0] < 1e-8) and (diff_z < tolerance), ( +assert (rel_err_x < tolerance) and (y[0] < 1e-8) and (rel_err_z < tolerance), ( "Test particle_boundary_interaction did not pass" ) From eaaaef565a49aff3901127b680ec0f10567cf69f Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Tue, 21 Jan 2025 20:38:05 -0800 Subject: [PATCH 165/278] Use new stair-case approximation in hybrid solver (#5558) Merge #5534 first. This extends the changes of #5534 and #5574 to the hybrid solver. Note that this effectively changes the definition of the stair-cased embedded boundary for the hybrid solver. --------- Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../FiniteDifferenceSolver.H | 18 +-- .../HybridPICModel/HybridPICModel.H | 18 +-- .../HybridPICModel/HybridPICModel.cpp | 40 ++--- .../HybridPICSolveE.cpp | 146 ++++++++++-------- .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 9 +- Source/Fields.H | 4 +- Source/WarpX.H | 4 +- Source/WarpX.cpp | 2 + 8 files changed, 128 insertions(+), 113 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 7726a2ed5bd..19b822e3628 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -103,7 +103,7 @@ class FiniteDifferenceSolver * \param[out] Efield vector of electric field MultiFabs updated at a given level * \param[in] Bfield vector of magnetic field MultiFabs at a given level * \param[in] Jfield vector of current density MultiFabs at a given level - * \param[in] edge_lengths length of edges along embedded boundaries + * \param[in] eb_update_E indicate in which cell E should be updated (related to embedded boundaries) * \param[in] dt timestep of the simulation * \param[in] macroscopic_properties contains user-defined properties of the medium. */ @@ -147,7 +147,7 @@ class FiniteDifferenceSolver * \param[in] Bfield vector of magnetic field MultiFabs at a given level * \param[in] rhofield scalar ion charge density Multifab at a given level * \param[in] Pefield scalar electron pressure MultiFab at a given level - * \param[in] edge_lengths length of edges along embedded boundaries + * \param[in] eb_update_E indicate in which cell E should be updated (related to embedded boundaries) * \param[in] lev level number for the calculation * \param[in] hybrid_model instance of the hybrid-PIC model * \param[in] solve_for_Faraday boolean flag for whether the E-field is solved to be used in Faraday's equation @@ -158,7 +158,7 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3> const& eb_update_E, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); @@ -168,13 +168,13 @@ class FiniteDifferenceSolver * * \param[out] Jfield vector of current MultiFabs at a given level * \param[in] Bfield vector of magnetic field MultiFabs at a given level - * \param[in] edge_lengths length of edges along embedded boundaries + * \param[in] eb_update_E indicate in which cell E should be updated (related to embedded boundaries) * \param[in] lev level number for the calculation */ void CalculateCurrentAmpere ( ablastr::fields::VectorField& Jfield, ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3> const& eb_update_E, int lev ); private: @@ -243,7 +243,7 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3> const& eb_update_E, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); @@ -251,7 +251,7 @@ class FiniteDifferenceSolver void CalculateCurrentAmpereCylindrical ( ablastr::fields::VectorField& Jfield, ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3> const& eb_update_E, int lev ); @@ -347,7 +347,7 @@ class FiniteDifferenceSolver ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3> const& eb_update_E, int lev, HybridPICModel const* hybrid_model, bool solve_for_Faraday ); @@ -355,7 +355,7 @@ class FiniteDifferenceSolver void CalculateCurrentAmpereCartesian ( ablastr::fields::VectorField& Jfield, ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3> const& eb_update_E, int lev ); #endif diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 7e8dd260a6e..4b50c16a0c8 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -62,15 +62,15 @@ public: * subtracted as well. Used in the Ohm's law solver (kinetic-fluid hybrid model). * * \param[in] Bfield Magnetic field from which the current is calculated. - * \param[in] edge_lengths Length of cell edges taking embedded boundaries into account + * \param[in] eb_update_E Indicate in which cell J should be calculated (related to embedded boundaries). */ void CalculatePlasmaCurrent ( ablastr::fields::MultiLevelVectorField const& Bfield, - ablastr::fields::MultiLevelVectorField const& edge_lengths + amrex::Vector,3 > >& eb_update_E ); void CalculatePlasmaCurrent ( ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 >& eb_update_E, int lev ); @@ -83,7 +83,7 @@ public: ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelVectorField const& Bfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, + amrex::Vector,3 > >& eb_update_E, bool solve_for_Faraday) const; void HybridPICSolveE ( @@ -91,7 +91,7 @@ public: ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 >& eb_update_E, int lev, bool solve_for_Faraday) const; void HybridPICSolveE ( @@ -99,7 +99,7 @@ public: ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 >& eb_update_E, int lev, PatchType patch_type, bool solve_for_Faraday) const; void BfieldEvolveRK ( @@ -107,7 +107,7 @@ public: ablastr::fields::MultiLevelVectorField const& Efield, ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, + amrex::Vector,3 > >& eb_update_E, amrex::Real dt, DtType a_dt_type, amrex::IntVect ng, std::optional nodal_sync); @@ -116,7 +116,7 @@ public: ablastr::fields::MultiLevelVectorField const& Efield, ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, + amrex::Vector,3 > >& eb_update_E, amrex::Real dt, int lev, DtType dt_type, amrex::IntVect ng, std::optional nodal_sync); @@ -125,7 +125,7 @@ public: ablastr::fields::MultiLevelVectorField const& Efield, ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, + amrex::Vector,3 > >& eb_update_E, amrex::Real dt, DtType dt_type, amrex::IntVect ng, std::optional nodal_sync); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index abda59e40ba..ec3742d1ff8 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -250,18 +250,18 @@ void HybridPICModel::GetCurrentExternal () void HybridPICModel::CalculatePlasmaCurrent ( ablastr::fields::MultiLevelVectorField const& Bfield, - ablastr::fields::MultiLevelVectorField const& edge_lengths) + amrex::Vector,3 > >& eb_update_E) { auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - CalculatePlasmaCurrent(Bfield[lev], edge_lengths[lev], lev); + CalculatePlasmaCurrent(Bfield[lev], eb_update_E[lev], lev); } } void HybridPICModel::CalculatePlasmaCurrent ( ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 >& eb_update_E, const int lev) { WARPX_PROFILE("HybridPICModel::CalculatePlasmaCurrent()"); @@ -269,7 +269,7 @@ void HybridPICModel::CalculatePlasmaCurrent ( auto& warpx = WarpX::GetInstance(); ablastr::fields::VectorField current_fp_plasma = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_plasma, lev); warpx.get_pointer_fdtd_solver_fp(lev)->CalculateCurrentAmpere( - current_fp_plasma, Bfield, edge_lengths, lev + current_fp_plasma, Bfield, eb_update_E, lev ); // we shouldn't apply the boundary condition to J since J = J_i - J_e but @@ -293,7 +293,7 @@ void HybridPICModel::HybridPICSolveE ( ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelVectorField const& Bfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, + amrex::Vector,3 > >& eb_update_E, const bool solve_for_Faraday) const { auto& warpx = WarpX::GetInstance(); @@ -301,7 +301,7 @@ void HybridPICModel::HybridPICSolveE ( { HybridPICSolveE( Efield[lev], Jfield[lev], Bfield[lev], *rhofield[lev], - edge_lengths[lev], lev, solve_for_Faraday + eb_update_E[lev], lev, solve_for_Faraday ); } } @@ -311,13 +311,13 @@ void HybridPICModel::HybridPICSolveE ( ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 >& eb_update_E, const int lev, const bool solve_for_Faraday) const { WARPX_PROFILE("WarpX::HybridPICSolveE()"); HybridPICSolveE( - Efield, Jfield, Bfield, rhofield, edge_lengths, lev, + Efield, Jfield, Bfield, rhofield, eb_update_E, lev, PatchType::fine, solve_for_Faraday ); if (lev > 0) @@ -332,7 +332,7 @@ void HybridPICModel::HybridPICSolveE ( ablastr::fields::VectorField const& Jfield, ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 >& eb_update_E, const int lev, PatchType patch_type, const bool solve_for_Faraday) const { @@ -344,7 +344,7 @@ void HybridPICModel::HybridPICSolveE ( // Solve E field in regular cells warpx.get_pointer_fdtd_solver_fp(lev)->HybridPICSolveE( Efield, current_fp_plasma, Jfield, Bfield, rhofield, - *electron_pressure_fp, edge_lengths, lev, this, solve_for_Faraday + *electron_pressure_fp, eb_update_E, lev, this, solve_for_Faraday ); amrex::Real const time = warpx.gett_old(0) + warpx.getdt(0); warpx.ApplyEfieldBoundary(lev, patch_type, time); @@ -411,7 +411,7 @@ void HybridPICModel::BfieldEvolveRK ( ablastr::fields::MultiLevelVectorField const& Efield, ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, + amrex::Vector,3 > >& eb_update_E, amrex::Real dt, DtType dt_type, IntVect ng, std::optional nodal_sync ) { @@ -419,7 +419,7 @@ void HybridPICModel::BfieldEvolveRK ( for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { BfieldEvolveRK( - Bfield, Efield, Jfield, rhofield, edge_lengths, dt, lev, dt_type, + Bfield, Efield, Jfield, rhofield, eb_update_E, dt, lev, dt_type, ng, nodal_sync ); } @@ -430,7 +430,7 @@ void HybridPICModel::BfieldEvolveRK ( ablastr::fields::MultiLevelVectorField const& Efield, ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, + amrex::Vector,3 > >& eb_update_E, amrex::Real dt, int lev, DtType dt_type, IntVect ng, std::optional nodal_sync ) { @@ -457,7 +457,7 @@ void HybridPICModel::BfieldEvolveRK ( // The Runge-Kutta scheme begins here. // Step 1: FieldPush( - Bfield, Efield, Jfield, rhofield, edge_lengths, + Bfield, Efield, Jfield, rhofield, eb_update_E, 0.5_rt*dt, dt_type, ng, nodal_sync ); @@ -473,7 +473,7 @@ void HybridPICModel::BfieldEvolveRK ( // Step 2: FieldPush( - Bfield, Efield, Jfield, rhofield, edge_lengths, + Bfield, Efield, Jfield, rhofield, eb_update_E, 0.5_rt*dt, dt_type, ng, nodal_sync ); @@ -493,7 +493,7 @@ void HybridPICModel::BfieldEvolveRK ( // Step 3: FieldPush( - Bfield, Efield, Jfield, rhofield, edge_lengths, + Bfield, Efield, Jfield, rhofield, eb_update_E, dt, dt_type, ng, nodal_sync ); @@ -509,7 +509,7 @@ void HybridPICModel::BfieldEvolveRK ( // Step 4: FieldPush( - Bfield, Efield, Jfield, rhofield, edge_lengths, + Bfield, Efield, Jfield, rhofield, eb_update_E, 0.5_rt*dt, dt_type, ng, nodal_sync ); @@ -543,16 +543,16 @@ void HybridPICModel::FieldPush ( ablastr::fields::MultiLevelVectorField const& Efield, ablastr::fields::MultiLevelVectorField const& Jfield, ablastr::fields::MultiLevelScalarField const& rhofield, - ablastr::fields::MultiLevelVectorField const& edge_lengths, + amrex::Vector,3 > >& eb_update_E, amrex::Real dt, DtType dt_type, IntVect ng, std::optional nodal_sync ) { auto& warpx = WarpX::GetInstance(); // Calculate J = curl x B / mu0 - J_ext - CalculatePlasmaCurrent(Bfield, edge_lengths); + CalculatePlasmaCurrent(Bfield, eb_update_E); // Calculate the E-field from Ohm's law - HybridPICSolveE(Efield, Jfield, Bfield, rhofield, edge_lengths, true); + HybridPICSolveE(Efield, Jfield, Bfield, rhofield, eb_update_E, true); warpx.FillBoundaryE(ng, nodal_sync); // Push forward the B-field using Faraday's law amrex::Real const t_old = warpx.gett_old(0); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 76fedbf4dea..47e45bbe753 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -26,7 +26,7 @@ using namespace amrex; void FiniteDifferenceSolver::CalculateCurrentAmpere ( ablastr::fields::VectorField & Jfield, ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 > const& eb_update_E, int lev ) { // Select algorithm (The choice of algorithm is a runtime option, @@ -34,12 +34,12 @@ void FiniteDifferenceSolver::CalculateCurrentAmpere ( if (m_fdtd_algo == ElectromagneticSolverAlgo::HybridPIC) { #ifdef WARPX_DIM_RZ CalculateCurrentAmpereCylindrical ( - Jfield, Bfield, edge_lengths, lev + Jfield, Bfield, eb_update_E, lev ); #else CalculateCurrentAmpereCartesian ( - Jfield, Bfield, edge_lengths, lev + Jfield, Bfield, eb_update_E, lev ); #endif @@ -61,7 +61,7 @@ template void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( ablastr::fields::VectorField& Jfield, ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 > const& eb_update_E, int lev ) { @@ -92,12 +92,16 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( Array4 const& Bt = Bfield[1]->array(mfi); Array4 const& Bz = Bfield[2]->array(mfi); - amrex::Array4 lr, lt, lz; - + // Extract structures indicating where the fields + // should be updated, given the position of the embedded boundaries. + // The plasma current is stored at the same locations as the E-field, + // therefore the `eb_update_E` multifab also appropriately specifies + // where the plasma current should be calculated. + amrex::Array4 update_Jr_arr, update_Jt_arr, update_Jz_arr; if (EB::enabled()) { - lr = edge_lengths[0]->array(mfi); - lt = edge_lengths[1]->array(mfi); - lz = edge_lengths[2]->array(mfi); + update_Jr_arr = eb_update_E[0]->array(mfi); + update_Jt_arr = eb_update_E[1]->array(mfi); + update_Jz_arr = eb_update_E[2]->array(mfi); } // Extract stencil coefficients @@ -124,8 +128,10 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( // Jr calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ - // Skip if this cell is fully covered by embedded boundaries - if (lr && lr(i, j, 0) <= 0) { return; } + + // Skip field update in the embedded boundaries + if (update_Jr_arr && update_Jr_arr(i, j, 0) == 0) { return; } + // Mode m=0 Jr(i, j, 0, 0) = one_over_mu0 * ( - T_Algo::DownwardDz(Bt, coefs_z, n_coefs_z, i, j, 0, 0) @@ -148,8 +154,9 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( // Jt calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ - // In RZ Jt is associated with a mesh node, so we need to check if the mesh node is covered - if (lr && (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0)) { return; } + + // Skip field update in the embedded boundaries + if (update_Jt_arr && update_Jt_arr(i, j, 0) == 0) { return; } // r on a nodal point (Jt is nodal in r) Real const r = rmin + i*dr; @@ -194,8 +201,10 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCylindrical ( // Jz calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ - // Skip if this cell is fully covered by embedded boundaries - if (lz && lz(i, j, 0) <= 0) { return; } + + // Skip field update in the embedded boundaries + if (update_Jz_arr && update_Jz_arr(i, j, 0) == 0) { return; } + // r on a nodal point (Jz is nodal in r) Real const r = rmin + i*dr; // Off-axis, regular curl @@ -244,7 +253,7 @@ template void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( ablastr::fields::VectorField& Jfield, ablastr::fields::VectorField const& Bfield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 > const& eb_update_E, int lev ) { @@ -274,11 +283,16 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( Array4 const &By = Bfield[1]->const_array(mfi); Array4 const &Bz = Bfield[2]->const_array(mfi); - amrex::Array4 lx, ly, lz; + // Extract structures indicating where the fields + // should be updated, given the position of the embedded boundaries. + // The plasma current is stored at the same locations as the E-field, + // therefore the `eb_update_E` multifab also appropriately specifies + // where the plasma current should be calculated. + amrex::Array4 update_Jx_arr, update_Jy_arr, update_Jz_arr; if (EB::enabled()) { - lx = edge_lengths[0]->array(mfi); - ly = edge_lengths[1]->array(mfi); - lz = edge_lengths[2]->array(mfi); + update_Jx_arr = eb_update_E[0]->array(mfi); + update_Jy_arr = eb_update_E[1]->array(mfi); + update_Jz_arr = eb_update_E[2]->array(mfi); } // Extract stencil coefficients @@ -302,8 +316,9 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( // Jx calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // Skip if this cell is fully covered by embedded boundaries - if (lx && lx(i, j, k) <= 0) { return; } + + // Skip field update in the embedded boundaries + if (update_Jx_arr && update_Jx_arr(i, j, k) == 0) { return; } Jx(i, j, k) = one_over_mu0 * ( - T_Algo::DownwardDz(By, coefs_z, n_coefs_z, i, j, k) @@ -313,14 +328,10 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( // Jy calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // Skip if this cell is fully covered by embedded boundaries -#ifdef WARPX_DIM_3D - if (ly && ly(i,j,k) <= 0) { return; } -#elif defined(WARPX_DIM_XZ) - // In XZ Jy is associated with a mesh node, so we need to check if the mesh node is covered - amrex::ignore_unused(ly); - if (lx && (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0)) { return; } -#endif + + // Skip field update in the embedded boundaries + if (update_Jy_arr && update_Jy_arr(i, j, k) == 0) { return; } + Jy(i, j, k) = one_over_mu0 * ( - T_Algo::DownwardDx(Bz, coefs_x, n_coefs_x, i, j, k) + T_Algo::DownwardDz(Bx, coefs_z, n_coefs_z, i, j, k) @@ -329,8 +340,9 @@ void FiniteDifferenceSolver::CalculateCurrentAmpereCartesian ( // Jz calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // Skip if this cell is fully covered by embedded boundaries - if (lz && lz(i,j,k) <= 0) { return; } + + // Skip field update in the embedded boundaries + if (update_Jz_arr && update_Jz_arr(i, j, k) == 0) { return; } Jz(i, j, k) = one_over_mu0 * ( - T_Algo::DownwardDy(Bx, coefs_y, n_coefs_y, i, j, k) @@ -357,7 +369,7 @@ void FiniteDifferenceSolver::HybridPICSolveE ( ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 > const& eb_update_E, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday) { @@ -368,14 +380,14 @@ void FiniteDifferenceSolver::HybridPICSolveE ( HybridPICSolveECylindrical ( Efield, Jfield, Jifield, Bfield, rhofield, Pefield, - edge_lengths, lev, hybrid_model, solve_for_Faraday + eb_update_E, lev, hybrid_model, solve_for_Faraday ); #else HybridPICSolveECartesian ( Efield, Jfield, Jifield, Bfield, rhofield, Pefield, - edge_lengths, lev, hybrid_model, solve_for_Faraday + eb_update_E, lev, hybrid_model, solve_for_Faraday ); #endif @@ -394,7 +406,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 > const& eb_update_E, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday ) { @@ -537,11 +549,13 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& rho = rhofield.const_array(mfi); Array4 const& Pe = Pefield.const_array(mfi); - amrex::Array4 lr, lz; + // Extract structures indicating where the fields + // should be updated, given the position of the embedded boundaries + amrex::Array4 update_Er_arr, update_Et_arr, update_Ez_arr; if (EB::enabled()) { - lr = edge_lengths[0]->array(mfi); - // edge_lengths[1] is `lt` and is not needed - lz = edge_lengths[2]->array(mfi); + update_Er_arr = eb_update_E[0]->array(mfi); + update_Et_arr = eb_update_E[1]->array(mfi); + update_Ez_arr = eb_update_E[2]->array(mfi); } // Extract stencil coefficients @@ -563,8 +577,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Er calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ - // Skip if this cell is fully covered by embedded boundaries - if (lr && lr(i, j, 0) <= 0) { return; } + + // Skip field update in the embedded boundaries + if (update_Er_arr && update_Er_arr(i, j, 0) == 0) { return; } // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Er_stag, coarsen, i, j, 0, 0); @@ -605,8 +620,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Et calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ - // In RZ Et is associated with a mesh node, so we need to check if the mesh node is covered - if (lr && (lr(i, j, 0)<=0 || lr(i-1, j, 0)<=0 || lz(i, j-1, 0)<=0 || lz(i, j, 0)<=0)) { return; } + + // Skip field update in the embedded boundaries + if (update_Et_arr && update_Et_arr(i, j, 0) == 0) { return; } // r on a nodal grid (Et is nodal in r) Real const r = rmin + i*dr; @@ -648,8 +664,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Ez calculation [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ - // Skip field solve if this cell is fully covered by embedded boundaries - if (lz && lz(i,j,0) <= 0) { return; } + + // Skip field update in the embedded boundaries + if (update_Ez_arr && update_Ez_arr(i, j, 0) == 0) { return; } // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, 0, 0); @@ -705,7 +722,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( ablastr::fields::VectorField const& Bfield, amrex::MultiFab const& rhofield, amrex::MultiFab const& Pefield, - ablastr::fields::VectorField const& edge_lengths, + std::array< std::unique_ptr,3 > const& eb_update_E, int lev, HybridPICModel const* hybrid_model, const bool solve_for_Faraday ) { @@ -842,11 +859,13 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& rho = rhofield.const_array(mfi); Array4 const& Pe = Pefield.array(mfi); - amrex::Array4 lx, ly, lz; + // Extract structures indicating where the fields + // should be updated, given the position of the embedded boundaries + amrex::Array4 update_Ex_arr, update_Ey_arr, update_Ez_arr; if (EB::enabled()) { - lx = edge_lengths[0]->array(mfi); - ly = edge_lengths[1]->array(mfi); - lz = edge_lengths[2]->array(mfi); + update_Ex_arr = eb_update_E[0]->array(mfi); + update_Ey_arr = eb_update_E[1]->array(mfi); + update_Ez_arr = eb_update_E[2]->array(mfi); } // Extract stencil coefficients @@ -866,8 +885,9 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Ex calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ - // Skip if this cell is fully covered by embedded boundaries - if (lx && lx(i, j, k) <= 0) { return; } + + // Skip field update in the embedded boundaries + if (update_Ex_arr && update_Ex_arr(i, j, k) == 0) { return; } // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ex_stag, coarsen, i, j, k, 0); @@ -905,14 +925,10 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Ey calculation [=] AMREX_GPU_DEVICE (int i, int j, int k) { - // Skip field solve if this cell is fully covered by embedded boundaries -#ifdef WARPX_DIM_3D - if (ly && ly(i,j,k) <= 0) { return; } -#elif defined(WARPX_DIM_XZ) - //In XZ Ey is associated with a mesh node, so we need to check if the mesh node is covered - amrex::ignore_unused(ly); - if (lx && (lx(i, j, k)<=0 || lx(i-1, j, k)<=0 || lz(i, j-1, k)<=0 || lz(i, j, k)<=0)) { return; } -#endif + + // Skip field update in the embedded boundaries + if (update_Ey_arr && update_Ey_arr(i, j, k) == 0) { return; } + // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ey_stag, coarsen, i, j, k, 0); @@ -949,10 +965,10 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Ez calculation [=] AMREX_GPU_DEVICE (int i, int j, int k){ -#ifdef AMREX_USE_EB - // Skip field solve if this cell is fully covered by embedded boundaries - if (lz && lz(i,j,k) <= 0) { return; } -#endif + + // Skip field update in the embedded boundaries + if (update_Ez_arr && update_Ez_arr(i, j, k) == 0) { return; } + // Interpolate to get the appropriate charge density in space Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, k, 0); diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 8e9e0daa274..46950030322 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -108,7 +108,7 @@ void WarpX::HybridPICEvolveFields () m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level), current_fp_temp, rho_fp_temp, - m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), + m_eb_update_E, 0.5_rt/sub_steps*dt[0], DtType::FirstHalf, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points @@ -135,7 +135,7 @@ void WarpX::HybridPICEvolveFields () m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level), m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), rho_fp_temp, - m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), + m_eb_update_E, 0.5_rt/sub_steps*dt[0], DtType::SecondHalf, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points @@ -166,14 +166,13 @@ void WarpX::HybridPICEvolveFields () // Update the E field to t=n+1 using the extrapolated J_i^n+1 value m_hybrid_pic_model->CalculatePlasmaCurrent( m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), - m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level)); + m_eb_update_E); m_hybrid_pic_model->HybridPICSolveE( m_fields.get_mr_levels_alldirs(FieldType::Efield_fp, finest_level), current_fp_temp, m_fields.get_mr_levels_alldirs(FieldType::Bfield_fp, finest_level), m_fields.get_mr_levels(FieldType::rho_fp, finest_level), - m_fields.get_mr_levels_alldirs(FieldType::edge_lengths, finest_level), false - ); + m_eb_update_E, false); FillBoundaryE(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); // Copy the rho^{n+1} values to rho_fp_temp and the J_i^{n+1/2} values to diff --git a/Source/Fields.H b/Source/Fields.H index b07661254c4..77589c4675e 100644 --- a/Source/Fields.H +++ b/Source/Fields.H @@ -61,8 +61,8 @@ namespace warpx::fields E_external_particle_field, /**< Stores external particle fields provided by the user as through an openPMD file */ B_external_particle_field, /**< Stores external particle fields provided by the user as through an openPMD file */ distance_to_eb, /**< Only used with embedded boundaries (EB). Stores the distance to the nearest EB */ - edge_lengths, /**< Only used with embedded boundaries (EB). Indicates the length of the cell edge that is covered by the EB, in SI units */ - face_areas, /**< Only used with embedded boundaries (EB). Indicates the area of the cell face that is covered by the EB, in SI units */ + edge_lengths, /**< Only used with the ECT solver. Indicates the length of the cell edge that is covered by the EB, in SI units */ + face_areas, /**< Only used with the ECT solver. Indicates the area of the cell face that is covered by the EB, in SI units */ area_mod, pml_E_fp, pml_B_fp, diff --git a/Source/WarpX.H b/Source/WarpX.H index 363b0f7ad75..fa577f5a0ee 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -928,11 +928,9 @@ public: * \param[in] fx_parser parser function to initialize x-field * \param[in] fy_parser parser function to initialize y-field * \param[in] fz_parser parser function to initialize z-field - * \param[in] edge_lengths edge lengths information - * \param[in] face_areas face areas information - * \param[in] topology flag indicating if field is edge-based or face-based * \param[in] lev level of the Multifabs that is initialized * \param[in] patch_type PatchType on which the field is initialized (fine or coarse) + * \param[in] eb_update_field flag indicating which gridpoints should be modified by this functions */ void ComputeExternalFieldOnGridUsingParser ( warpx::fields::FieldType field, diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 05bbc36854d..40fa75fda9b 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -2340,8 +2340,10 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); m_fields.alloc_init(FieldType::face_areas, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); + } if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { + AllocInitMultiFab(m_flag_info_face[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_info_face[x]"); AllocInitMultiFab(m_flag_info_face[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, From 6e4def5e861b550af6ca91232266dc645096594d Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 22 Jan 2025 15:51:40 +0100 Subject: [PATCH 166/278] WarpXInitEB: fix " int eb_update" shadowing "std::array< std::unique_ptr,3> & eb_update" (#5591) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR fixes the following issue: ``` /home/vsts/work/1/s/Source/EmbeddedBoundary/WarpXInitEB.cpp:366:25: warning: declaration of ‘int eb_update’ shadows a parameter [-Wshadow] 366 | int eb_update = 1; | ^~~~~~~~~ /home/vsts/work/1/s/Source/EmbeddedBoundary/WarpXInitEB.cpp:296:56: note: shadowed declaration is here 296 | std::array< std::unique_ptr,3> & eb_update, | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~^~~~~~~~~ ``` Found in https://dev.azure.com/ECP-WarpX/WarpX/_build/results?buildId=20383&view=logs&j=8aa019fd-d859-51bf-081f-826e7fa9e37a --- Source/EmbeddedBoundary/WarpXInitEB.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Source/EmbeddedBoundary/WarpXInitEB.cpp b/Source/EmbeddedBoundary/WarpXInitEB.cpp index 87d7db0d4fc..6a6f594b380 100644 --- a/Source/EmbeddedBoundary/WarpXInitEB.cpp +++ b/Source/EmbeddedBoundary/WarpXInitEB.cpp @@ -363,7 +363,7 @@ WarpX::MarkUpdateCellsStairCase ( int const k_start = k; #endif // Loop over neighboring cells - int eb_update = 1; + int eb_update_flag = 1; for (int i_cell = i_start; i_cell <= i; ++i_cell) { for (int j_cell = j_start; j_cell <= j; ++j_cell) { for (int k_cell = k_start; k_cell <= k; ++k_cell) { @@ -371,12 +371,12 @@ WarpX::MarkUpdateCellsStairCase ( // (i.e. if they are not regular cells), do not update field // (`isRegular` returns `false` if the cell is either partially or fully covered.) if ( !flag(i_cell, j_cell, k_cell).isRegular() ) { - eb_update = 0; + eb_update_flag = 0; } } } } - eb_update_arr(i, j, k) = eb_update; + eb_update_arr(i, j, k) = eb_update_flag; }); } From e0c17e755f318fdac59aa37a8e51e1bbef5e396a Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 22 Jan 2025 15:52:26 +0100 Subject: [PATCH 167/278] WarpX header: remove unused GetDistanceToEB function (#5589) This PR removes a WarpX member function that is no longer used in WarpX. --- Source/WarpX.H | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/Source/WarpX.H b/Source/WarpX.H index fa577f5a0ee..8d468c7ae2a 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -162,12 +162,7 @@ public: HybridPICModel& GetHybridPICModel () { return *m_hybrid_pic_model; } [[nodiscard]] HybridPICModel * get_pointer_HybridPICModel () const { return m_hybrid_pic_model.get(); } MultiDiagnostics& GetMultiDiags () {return *multi_diags;} -#ifdef AMREX_USE_EB - ablastr::fields::MultiLevelScalarField GetDistanceToEB () { - using warpx::fields::FieldType; - return m_fields.get_mr_levels(FieldType::distance_to_eb, finestLevel()); - } -#endif + ParticleBoundaryBuffer& GetParticleBoundaryBuffer () { return *m_particle_boundary_buffer; } amrex::Vector,3 > >& GetEBUpdateEFlag() { return m_eb_update_E; } From d5f9b578cae32fdce49c6abfb03c841f59c7274e Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 22 Jan 2025 19:04:33 +0100 Subject: [PATCH 168/278] WarpX class: move `ReorderFornbergCoefficients` from WarpX.H to WarpX.cpp (#5578) `ReorderFornbergCoefficients` is a pure function only used inside `WarpX.cpp`. Therefore, this PR moves it to an anonymous namespace inside `WarpX.cpp` (now it is a member function of the WarpX class). The final goal is to simplify the WarpX header. --------- Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- Source/WarpX.H | 12 ---------- Source/WarpX.cpp | 57 ++++++++++++++++++++++++++++++++---------------- 2 files changed, 38 insertions(+), 31 deletions(-) diff --git a/Source/WarpX.H b/Source/WarpX.H index 8d468c7ae2a..c5a58febe69 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -1350,18 +1350,6 @@ private: return gather_buffer_masks[lev].get(); } - /** - * \brief Re-orders the Fornberg coefficients so that they can be used more conveniently for - * finite-order centering operations. For example, for finite-order centering of order 6, - * the Fornberg coefficients \c (c_0,c_1,c_2) are re-ordered as \c (c_2,c_1,c_0,c_0,c_1,c_2). - * - * \param[in,out] ordered_coeffs host vector where the re-ordered Fornberg coefficients will be stored - * \param[in] unordered_coeffs host vector storing the original sequence of Fornberg coefficients - * \param[in] order order of the finite-order centering along a given direction - */ - void ReorderFornbergCoefficients (amrex::Vector& ordered_coeffs, - amrex::Vector& unordered_coeffs, - int order); /** * \brief Allocates and initializes the stencil coefficients used for the finite-order centering * of fields and currents, and stores them in the given device vectors. diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 40fa75fda9b..5085272acb1 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -211,6 +211,29 @@ namespace std::any_of(field_boundary_hi.begin(), field_boundary_hi.end(), is_pml); return is_any_pml; } + + /** + * \brief Re-orders the Fornberg coefficients so that they can be used more conveniently for + * finite-order centering operations. For example, for finite-order centering of order 6, + * the Fornberg coefficients \c (c_0,c_1,c_2) are re-ordered as \c (c_2,c_1,c_0,c_0,c_1,c_2). + * + * \param[in,out] ordered_coeffs host vector where the re-ordered Fornberg coefficients will be stored + * \param[in] unordered_coeffs host vector storing the original sequence of Fornberg coefficients + * \param[in] order order of the finite-order centering along a given direction + */ + void ReorderFornbergCoefficients ( + amrex::Vector& ordered_coeffs, + const amrex::Vector& unordered_coeffs, + const int order) + { + const int n = order / 2; + for (int i = 0; i < n; i++) { + ordered_coeffs[i] = unordered_coeffs[n-1-i]; + } + for (int i = n; i < order; i++) { + ordered_coeffs[i] = unordered_coeffs[i-n]; + } + } } void WarpX::MakeWarpX () @@ -3224,19 +3247,6 @@ amrex::Vector WarpX::getFornbergStencilCoefficients (const int n_or return coeffs; } -void WarpX::ReorderFornbergCoefficients (amrex::Vector& ordered_coeffs, - amrex::Vector& unordered_coeffs, - const int order) -{ - const int n = order / 2; - for (int i = 0; i < n; i++) { - ordered_coeffs[i] = unordered_coeffs[n-1-i]; - } - for (int i = n; i < order; i++) { - ordered_coeffs[i] = unordered_coeffs[i-n]; - } -} - void WarpX::AllocateCenteringCoefficients (amrex::Gpu::DeviceVector& device_centering_stencil_coeffs_x, amrex::Gpu::DeviceVector& device_centering_stencil_coeffs_y, amrex::Gpu::DeviceVector& device_centering_stencil_coeffs_z, @@ -3265,12 +3275,21 @@ void WarpX::AllocateCenteringCoefficients (amrex::Gpu::DeviceVector // Re-order Fornberg stencil coefficients: // example for order 6: (c_0,c_1,c_2) becomes (c_2,c_1,c_0,c_0,c_1,c_2) - ReorderFornbergCoefficients(host_centering_stencil_coeffs_x, - Fornberg_stencil_coeffs_x, centering_nox); - ReorderFornbergCoefficients(host_centering_stencil_coeffs_y, - Fornberg_stencil_coeffs_y, centering_noy); - ReorderFornbergCoefficients(host_centering_stencil_coeffs_z, - Fornberg_stencil_coeffs_z, centering_noz); + ::ReorderFornbergCoefficients( + host_centering_stencil_coeffs_x, + Fornberg_stencil_coeffs_x, + centering_nox + ); + ::ReorderFornbergCoefficients( + host_centering_stencil_coeffs_y, + Fornberg_stencil_coeffs_y, + centering_noy + ); + ::ReorderFornbergCoefficients( + host_centering_stencil_coeffs_z, + Fornberg_stencil_coeffs_z, + centering_noz + ); // Device vectors of stencil coefficients used for finite-order centering From 70dbbb18552ad0a5ae763f8e20fd3e98de63511a Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Wed, 22 Jan 2025 11:35:05 -0800 Subject: [PATCH 169/278] Only allocate `edge_lengths` and `face_areas` when using the ECT solver (#5593) Following #5558, the MultiFab `edge_lengths` and `face_areas` are not used anymore, expect for the ECT solver. Thus, only allocateing them when using the ECT solver saves a considerable amount of memory. --- .../inputs_test_3d_embedded_boundary_picmi.py | 2 +- Source/Initialization/WarpXInitData.cpp | 19 ++++++++++++------- Source/WarpX.cpp | 5 ++--- 3 files changed, 15 insertions(+), 11 deletions(-) diff --git a/Examples/Tests/embedded_boundary_python_api/inputs_test_3d_embedded_boundary_picmi.py b/Examples/Tests/embedded_boundary_python_api/inputs_test_3d_embedded_boundary_picmi.py index 7148cde2d3e..7bd7cd68f25 100755 --- a/Examples/Tests/embedded_boundary_python_api/inputs_test_3d_embedded_boundary_picmi.py +++ b/Examples/Tests/embedded_boundary_python_api/inputs_test_3d_embedded_boundary_picmi.py @@ -43,7 +43,7 @@ flag_correct_div = False -solver = picmi.ElectromagneticSolver(grid=grid, method="Yee", cfl=1.0) +solver = picmi.ElectromagneticSolver(grid=grid, method="ECT", cfl=1.0) n_cavity = 30 L_cavity = n_cavity * unit diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index c8c3f57193d..69e87ba7b7e 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1241,23 +1241,27 @@ void WarpX::InitializeEBGridData (int lev) auto const eb_fact = fieldEBFactory(lev); - auto edge_lengths_lev = m_fields.get_alldirs(FieldType::edge_lengths, lev); - ComputeEdgeLengths(edge_lengths_lev, eb_fact); - ScaleEdges(edge_lengths_lev, CellSize(lev)); + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - auto face_areas_lev = m_fields.get_alldirs(FieldType::face_areas, lev); - ComputeFaceAreas(face_areas_lev, eb_fact); - ScaleAreas(face_areas_lev, CellSize(lev)); + auto edge_lengths_lev = m_fields.get_alldirs(FieldType::edge_lengths, lev); + ComputeEdgeLengths(edge_lengths_lev, eb_fact); + ScaleEdges(edge_lengths_lev, CellSize(lev)); + + auto face_areas_lev = m_fields.get_alldirs(FieldType::face_areas, lev); + ComputeFaceAreas(face_areas_lev, eb_fact); + ScaleAreas(face_areas_lev, CellSize(lev)); - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { // Compute additional quantities required for the ECT solver MarkExtensionCells(); ComputeFaceExtensions(); + // Mark on which grid points E should be updated MarkUpdateECellsECT( m_eb_update_E[lev], edge_lengths_lev ); // Mark on which grid points B should be updated MarkUpdateBCellsECT( m_eb_update_B[lev], face_areas_lev, edge_lengths_lev); + } else { + // Mark on which grid points E should be updated (stair-case approximation) MarkUpdateCellsStairCase( m_eb_update_E[lev], @@ -1268,6 +1272,7 @@ void WarpX::InitializeEBGridData (int lev) m_eb_update_B[lev], m_fields.get_alldirs(FieldType::Bfield_fp, lev), eb_fact ); + } } diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 5085272acb1..c08906cf289 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -2347,6 +2347,8 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm guard_cells.ng_FieldSolver, lev, "m_eb_update_B[y]"); AllocInitMultiFab(m_eb_update_B[lev][2], amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_eb_update_B[z]"); + } + if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { //! EB: Lengths of the mesh edges m_fields.alloc_init(FieldType::edge_lengths, Direction{0}, lev, amrex::convert(ba, Ex_nodal_flag), @@ -2364,9 +2366,6 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm m_fields.alloc_init(FieldType::face_areas, Direction{2}, lev, amrex::convert(ba, Bz_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, 0.0_rt); - } - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { - AllocInitMultiFab(m_flag_info_face[lev][0], amrex::convert(ba, Bx_nodal_flag), dm, ncomps, guard_cells.ng_FieldSolver, lev, "m_flag_info_face[x]"); AllocInitMultiFab(m_flag_info_face[lev][1], amrex::convert(ba, By_nodal_flag), dm, ncomps, From f13c05a929a32f56290d2cb57e7e4beb5ecf35a2 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Wed, 22 Jan 2025 14:07:05 -0800 Subject: [PATCH 170/278] Add automated test for particle absorption on new stair-case approximation (#5562) This introduces automated tests in 2D, 3D and RZ, as a follow up to #5534, to check that a particle does not leave a spurious charge behind, when absorbed by the embedded boundary, and when using an EM solver. In this test, the embedded boundary is a cylinder aligned with the z axis. (In 2D, this reduces to two parallel plates.) The tests fail on the `development` branch, before #5534 is merged. They pass after #5534 is merged. - `development` branch, before #5534 is merged: ![movie](https://github.com/user-attachments/assets/7d00f181-0744-47bd-ab30-4cc0b9e62956) - after #5534 is merged: ![movie](https://github.com/user-attachments/assets/dc4af640-502b-4c42-a51d-f30007228061) Note that these tests fails for higher-order shapes. This will be fixed in https://github.com/ECP-WarpX/WarpX/pull/5209 --------- Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Examples/Tests/CMakeLists.txt | 1 + .../CMakeLists.txt | 39 ++++++++++++++ .../analysis.py | 53 +++++++++++++++++++ .../analysis_default_regression.py | 1 + .../inputs_base | 35 ++++++++++++ ...oundary_em_particle_absorption_sh_factor_1 | 11 ++++ ...oundary_em_particle_absorption_sh_factor_1 | 11 ++++ ...oundary_em_particle_absorption_sh_factor_1 | 11 ++++ ...ry_em_particle_absorption_sh_factor_1.json | 24 +++++++++ ...ry_em_particle_absorption_sh_factor_1.json | 24 +++++++++ ...ry_em_particle_absorption_sh_factor_1.json | 24 +++++++++ 11 files changed, 234 insertions(+) create mode 100644 Examples/Tests/embedded_boundary_em_particle_absorption/CMakeLists.txt create mode 100755 Examples/Tests/embedded_boundary_em_particle_absorption/analysis.py create mode 120000 Examples/Tests/embedded_boundary_em_particle_absorption/analysis_default_regression.py create mode 100644 Examples/Tests/embedded_boundary_em_particle_absorption/inputs_base create mode 100644 Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_2d_embedded_boundary_em_particle_absorption_sh_factor_1 create mode 100644 Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_3d_embedded_boundary_em_particle_absorption_sh_factor_1 create mode 100644 Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_rz_embedded_boundary_em_particle_absorption_sh_factor_1 create mode 100644 Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_em_particle_absorption_sh_factor_1.json create mode 100644 Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_em_particle_absorption_sh_factor_1.json create mode 100644 Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_em_particle_absorption_sh_factor_1.json diff --git a/Examples/Tests/CMakeLists.txt b/Examples/Tests/CMakeLists.txt index c4713123ae6..d9e9404ae3e 100644 --- a/Examples/Tests/CMakeLists.txt +++ b/Examples/Tests/CMakeLists.txt @@ -15,6 +15,7 @@ add_subdirectory(electrostatic_sphere) add_subdirectory(electrostatic_sphere_eb) add_subdirectory(embedded_boundary_cube) add_subdirectory(embedded_boundary_diffraction) +add_subdirectory(embedded_boundary_em_particle_absorption) add_subdirectory(embedded_boundary_python_api) add_subdirectory(embedded_boundary_rotated_cube) add_subdirectory(embedded_circle) diff --git a/Examples/Tests/embedded_boundary_em_particle_absorption/CMakeLists.txt b/Examples/Tests/embedded_boundary_em_particle_absorption/CMakeLists.txt new file mode 100644 index 00000000000..fb5d54c0dbe --- /dev/null +++ b/Examples/Tests/embedded_boundary_em_particle_absorption/CMakeLists.txt @@ -0,0 +1,39 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) + add_warpx_test( + test_3d_embedded_boundary_em_particle_absorption_sh_factor_1 # name + 3 # dims + 1 # nprocs + inputs_test_3d_embedded_boundary_em_particle_absorption_sh_factor_1 # inputs + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1" # checksum + OFF # dependency + ) +endif() + + +if(WarpX_EB) + add_warpx_test( + test_2d_embedded_boundary_em_particle_absorption_sh_factor_1 # name + 2 # dims + 1 # nprocs + inputs_test_2d_embedded_boundary_em_particle_absorption_sh_factor_1 # inputs + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1" # checksum + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_rz_embedded_boundary_em_particle_absorption_sh_factor_1 # name + RZ # dims + 1 # nprocs + inputs_test_rz_embedded_boundary_em_particle_absorption_sh_factor_1 # inputs + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1" # checksum + OFF # dependency + ) +endif() diff --git a/Examples/Tests/embedded_boundary_em_particle_absorption/analysis.py b/Examples/Tests/embedded_boundary_em_particle_absorption/analysis.py new file mode 100755 index 00000000000..7647c23d846 --- /dev/null +++ b/Examples/Tests/embedded_boundary_em_particle_absorption/analysis.py @@ -0,0 +1,53 @@ +#!/usr/bin/env python + +""" +This analysis script checks that there is no spurious charge build-up when a particle is absorbed by an embedded boundary. + +More specifically, this test simulates two particles of oppposite charge that are initialized at +the same position and then move in opposite directions. The particles are surrounded by a cylindrical +embedded boundary, and are absorbed when their trajectory intersects this boundary. With an +electromagnetic solver, this can lead to spurious charge build-up (i.e., div(E)!= rho/epsion_0) +that remains at the position where particle was absorbed. + +Note that, in this test, there will also be a (non-spurious) component of div(E) that propagates +along the embedded boundary, due to electromagnetic waves reflecting on this boundary. +When checking for static, spurious charge build-up, we average div(E) in time to remove this component. + +The test is performed in 2D, 3D and RZ. +(In 2D, the cylindrical embedded boundary becomes two parallel plates) +""" + +from openpmd_viewer import OpenPMDTimeSeries + +ts = OpenPMDTimeSeries("./diags/diag1/") + +divE_stacked = ts.iterate( + lambda iteration: ts.get_field("divE", iteration=iteration)[0] +) +start_avg_iter = 25 +end_avg_iter = 100 +divE_avg = divE_stacked[start_avg_iter:end_avg_iter].mean(axis=0) + +# Adjust the tolerance so that the remaining error due to the propagating +# div(E) (after averaging) is below this tolerance, but so that any typical +# spurious charge build-up is above this tolerance. This is dimension-dependent. +dim = ts.fields_metadata["divE"]["geometry"] +if dim == "3dcartesian": + tolerance = 7e-11 +elif dim == "2dcartesian": + tolerance = 3.5e-10 +elif dim == "thetaMode": + # In RZ: there are issues with divE on axis + # Set the few cells around the axis to 0 for this test + divE_avg[13:19] = 0 + tolerance = 4e-12 + + +def check_tolerance(array, tolerance): + assert abs(array).max() <= tolerance, ( + f"Test did not pass: the max error {abs(array).max()} exceeded the tolerance of {tolerance}." + ) + print("All elements of are within the tolerance.") + + +check_tolerance(divE_avg, tolerance) diff --git a/Examples/Tests/embedded_boundary_em_particle_absorption/analysis_default_regression.py b/Examples/Tests/embedded_boundary_em_particle_absorption/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/embedded_boundary_em_particle_absorption/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_base b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_base new file mode 100644 index 00000000000..6c940d2298e --- /dev/null +++ b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_base @@ -0,0 +1,35 @@ +max_step = 100 +amr.max_level = 0 +amr.blocking_factor = 8 +amr.max_grid_size = 256 + +algo.charge_deposition = standard +algo.field_gathering = energy-conserving +warpx.const_dt = 1.17957283598e-09 +warpx.use_filter = 0 + +my_constants.R = 6.35 +warpx.eb_implicit_function = "(x**2 + y**2 - R**2)" + +particles.species_names = electron positron + +electron.charge = -q_e +electron.mass = m_e +electron.injection_style = "SingleParticle" +electron.single_particle_pos = 0.0 0.0 0.0 +electron.single_particle_u = 1.e20 0.0 0.4843221e20 # gamma*beta +electron.single_particle_weight = 1.0 + +positron.charge = q_e +positron.mass = m_e +positron.injection_style = "SingleParticle" +positron.single_particle_pos = 0.0 0.0 0.0 +positron.single_particle_u = -1.e20 0.0 -0.4843221e20 # gamma*beta +positron.single_particle_weight = 1.0 + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 1 +diag1.diag_type = Full +diag1.fields_to_plot = divE rho +diag1.format = openpmd diff --git a/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_2d_embedded_boundary_em_particle_absorption_sh_factor_1 b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_2d_embedded_boundary_em_particle_absorption_sh_factor_1 new file mode 100644 index 00000000000..99110df1634 --- /dev/null +++ b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_2d_embedded_boundary_em_particle_absorption_sh_factor_1 @@ -0,0 +1,11 @@ +# base input parameters +FILE = inputs_base + +geometry.dims = 2 +amr.n_cell = 32 32 +geometry.prob_lo = -10 -10 +geometry.prob_hi = 10 10 +boundary.field_lo = pec absorbing_silver_mueller +boundary.field_hi = pec absorbing_silver_mueller + +algo.particle_shape = 1 diff --git a/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_3d_embedded_boundary_em_particle_absorption_sh_factor_1 b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_3d_embedded_boundary_em_particle_absorption_sh_factor_1 new file mode 100644 index 00000000000..ea977877a2d --- /dev/null +++ b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_3d_embedded_boundary_em_particle_absorption_sh_factor_1 @@ -0,0 +1,11 @@ +# base input parameters +FILE = inputs_base + +geometry.dims = 3 +amr.n_cell = 32 32 32 +geometry.prob_lo = -10 -10 -10 +geometry.prob_hi = 10 10 10 +boundary.field_lo = pec pec absorbing_silver_mueller +boundary.field_hi = pec pec absorbing_silver_mueller + +algo.particle_shape = 1 diff --git a/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_rz_embedded_boundary_em_particle_absorption_sh_factor_1 b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_rz_embedded_boundary_em_particle_absorption_sh_factor_1 new file mode 100644 index 00000000000..7faf7fd8934 --- /dev/null +++ b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_rz_embedded_boundary_em_particle_absorption_sh_factor_1 @@ -0,0 +1,11 @@ +# base input parameters +FILE = inputs_base + +geometry.dims = RZ +amr.n_cell = 16 32 +geometry.prob_lo = 0 -10 +geometry.prob_hi = 10 10 +boundary.field_lo = none absorbing_silver_mueller +boundary.field_hi = pec absorbing_silver_mueller + +algo.particle_shape = 1 diff --git a/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_em_particle_absorption_sh_factor_1.json b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_em_particle_absorption_sh_factor_1.json new file mode 100644 index 00000000000..de3d125c744 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_em_particle_absorption_sh_factor_1.json @@ -0,0 +1,24 @@ +{ + "lev=0": { + "divE": 3.059581906777539e-08, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_em_particle_absorption_sh_factor_1.json b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_em_particle_absorption_sh_factor_1.json new file mode 100644 index 00000000000..d3e08d9723e --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_em_particle_absorption_sh_factor_1.json @@ -0,0 +1,24 @@ +{ + "lev=0": { + "divE": 4.928354322096152e-07, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_em_particle_absorption_sh_factor_1.json b/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_em_particle_absorption_sh_factor_1.json new file mode 100644 index 00000000000..30d7d0ba081 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_em_particle_absorption_sh_factor_1.json @@ -0,0 +1,24 @@ +{ + "lev=0": { + "divE": 1.4599714697029335e-08, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} \ No newline at end of file From 040447c8727544c135def7ac9ccfd9f99ee2fbe9 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 22 Jan 2025 14:31:05 -0800 Subject: [PATCH 171/278] openPMD-api: 0.16.1+ (#5375) Update to the latest stable release of openPMD-api, version 0.16.1. --- .github/workflows/clang_tidy.yml | 2 +- .github/workflows/cuda.yml | 2 +- .github/workflows/dependencies/icc.sh | 2 +- Docs/source/developers/gnumake/openpmd.rst | 2 +- Docs/source/install/cmake.rst | 2 +- Docs/source/install/dependencies.rst | 2 +- Docs/source/usage/parameters.rst | 14 +++++++------- Source/Diagnostics/requirements.txt | 2 +- Source/Make.WarpX | 2 +- .../karolina-it4i/spack-karolina-cuda.yaml | 2 +- cmake/dependencies/openPMD.cmake | 6 +++--- setup.py | 2 +- 12 files changed, 20 insertions(+), 20 deletions(-) diff --git a/.github/workflows/clang_tidy.yml b/.github/workflows/clang_tidy.yml index 83d5b86c96b..dda7f2185f5 100644 --- a/.github/workflows/clang_tidy.yml +++ b/.github/workflows/clang_tidy.yml @@ -20,7 +20,7 @@ jobs: dim: [1, 2, RZ, 3] name: clang-tidy-${{ matrix.dim }}D runs-on: ubuntu-22.04 - timeout-minutes: 180 + timeout-minutes: 220 if: github.event.pull_request.draft == false steps: - uses: actions/checkout@v4 diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 1309dc4bb81..32aa1fe114f 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -56,7 +56,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" cmake-easyinstall --prefix=/usr/local \ - git+https://github.com/openPMD/openPMD-api.git@0.15.1 \ + git+https://github.com/openPMD/openPMD-api.git@0.16.1 \ -DopenPMD_USE_PYTHON=OFF \ -DBUILD_TESTING=OFF \ -DBUILD_EXAMPLES=OFF \ diff --git a/.github/workflows/dependencies/icc.sh b/.github/workflows/dependencies/icc.sh index fae6e22d45a..63763421d31 100755 --- a/.github/workflows/dependencies/icc.sh +++ b/.github/workflows/dependencies/icc.sh @@ -58,7 +58,7 @@ export CEI_TMP="/tmp/cei" CXX=$(which icpc) CC=$(which icc) \ cmake-easyinstall \ --prefix=/usr/local \ - git+https://github.com/openPMD/openPMD-api.git@0.15.2 \ + git+https://github.com/openPMD/openPMD-api.git@0.16.1 \ -DopenPMD_USE_PYTHON=OFF \ -DBUILD_TESTING=OFF \ -DBUILD_EXAMPLES=OFF \ diff --git a/Docs/source/developers/gnumake/openpmd.rst b/Docs/source/developers/gnumake/openpmd.rst index 3215c9461a1..0497ecf044b 100644 --- a/Docs/source/developers/gnumake/openpmd.rst +++ b/Docs/source/developers/gnumake/openpmd.rst @@ -9,7 +9,7 @@ therefore we recommend to use `spack `__ in order to facilitate the installation. More specifically, we recommend that you try installing the -`openPMD-api library 0.15.1 or newer `__ +`openPMD-api library 0.16.1 or newer `__ using spack (first section below). If this fails, a back-up solution is to install parallel HDF5 with spack, and then install the openPMD-api library from source. diff --git a/Docs/source/install/cmake.rst b/Docs/source/install/cmake.rst index f3f881d4504..5c02fb03b9e 100644 --- a/Docs/source/install/cmake.rst +++ b/Docs/source/install/cmake.rst @@ -128,7 +128,7 @@ CMake Option Default & Values Des ``WarpX_amrex_internal`` **ON**/OFF Needs a pre-installed AMReX library if set to ``OFF`` ``WarpX_openpmd_src`` *None* Path to openPMD-api source directory (preferred if set) ``WarpX_openpmd_repo`` ``https://github.com/openPMD/openPMD-api.git`` Repository URI to pull and build openPMD-api from -``WarpX_openpmd_branch`` ``0.15.2`` Repository branch for ``WarpX_openpmd_repo`` +``WarpX_openpmd_branch`` ``0.16.1`` Repository branch for ``WarpX_openpmd_repo`` ``WarpX_openpmd_internal`` **ON**/OFF Needs a pre-installed openPMD-api library if set to ``OFF`` ``WarpX_picsar_src`` *None* Path to PICSAR source directory (preferred if set) ``WarpX_picsar_repo`` ``https://github.com/ECP-WarpX/picsar.git`` Repository URI to pull and build PICSAR from diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index fb1f95e5eac..dcad6a00869 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -30,7 +30,7 @@ Optional dependencies include: - also needs the ``pkg-config`` tool on Unix - `BLAS++ `__ and `LAPACK++ `__: for spectral solver (PSATD) support in RZ geometry - `Boost 1.66.0+ `__: for QED lookup tables generation support -- `openPMD-api 0.15.1+ `__: we automatically download and compile a copy of openPMD-api for openPMD I/O support +- `openPMD-api 0.16.1+ `__: we automatically download and compile a copy of openPMD-api for openPMD I/O support - see `optional I/O backends `__, i.e., ADIOS2 and/or HDF5 - `Ascent 0.8.0+ `__: for in situ 3D visualization diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 31f0e06ab5b..7c92b5cf9e7 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2780,18 +2780,18 @@ In-situ capabilities can be used by turning on Sensei or Ascent (provided they a When WarpX is compiled with openPMD support, the first available backend in the order given above is taken. * ``.openpmd_encoding`` (optional, ``v`` (variable based), ``f`` (file based) or ``g`` (group based) ) only read if ``.format = openpmd``. - openPMD `file output encoding `__. + openPMD `file output encoding `__. File based: one file per timestep (slower), group/variable based: one file for all steps (faster)). - ``variable based`` is an `experimental feature with ADIOS2 `__ and not supported for back-transformed diagnostics. + ``variable based`` is an `experimental feature with ADIOS2 `__ and not supported for back-transformed diagnostics. Default: ``f`` (full diagnostics) * ``.adios2_operator.type`` (``zfp``, ``blosc``) optional, - `ADIOS2 I/O operator type `__ for `openPMD `_ data dumps. + `ADIOS2 I/O operator type `__ for `openPMD `_ data dumps. * ``.adios2_operator.parameters.*`` optional, - `ADIOS2 I/O operator parameters `__ for `openPMD `_ data dumps. + `ADIOS2 I/O operator parameters `__ for `openPMD `_ data dumps. - A typical example for `ADIOS2 output using lossless compression `__ with ``blosc`` using the ``zstd`` compressor and 6 CPU treads per MPI Rank (e.g. for a `GPU run with spare CPU resources `__): + A typical example for `ADIOS2 output using lossless compression `__ with ``blosc`` using the ``zstd`` compressor and 6 CPU treads per MPI Rank (e.g. for a `GPU run with spare CPU resources `__): .. code-block:: text @@ -2810,11 +2810,11 @@ In-situ capabilities can be used by turning on Sensei or Ascent (provided they a .adios2_operator.parameters.precision = 3 * ``.adios2_engine.type`` (``bp4``, ``sst``, ``ssc``, ``dataman``) optional, - `ADIOS2 Engine type `__ for `openPMD `_ data dumps. + `ADIOS2 Engine type `__ for `openPMD `_ data dumps. See full list of engines at `ADIOS2 readthedocs `__ * ``.adios2_engine.parameters.*`` optional, - `ADIOS2 Engine parameters `__ for `openPMD `_ data dumps. + `ADIOS2 Engine parameters `__ for `openPMD `_ data dumps. An example for parameters for the BP engine are setting the number of writers (``NumAggregators``), transparently redirecting data to burst buffers etc. A detailed list of engine-specific parameters are available at the official `ADIOS2 documentation `__ diff --git a/Source/Diagnostics/requirements.txt b/Source/Diagnostics/requirements.txt index d9f5cb553ee..9c85c8a621d 100644 --- a/Source/Diagnostics/requirements.txt +++ b/Source/Diagnostics/requirements.txt @@ -5,4 +5,4 @@ # License: BSD-3-Clause-LBNL # keep this entry for GitHub's dependency graph -openPMD-api>=0.15.1 +openPMD-api>=0.16.1 diff --git a/Source/Make.WarpX b/Source/Make.WarpX index 57bac56e9a4..6563121e336 100644 --- a/Source/Make.WarpX +++ b/Source/Make.WarpX @@ -151,7 +151,7 @@ endif ifeq ($(USE_OPENPMD), TRUE) # try pkg-config query - ifeq (0, $(shell pkg-config "openPMD >= 0.15.1"; echo $$?)) + ifeq (0, $(shell pkg-config "openPMD >= 0.16.1"; echo $$?)) CXXFLAGS += $(shell pkg-config --cflags openPMD) LIBRARY_LOCATIONS += $(shell pkg-config --variable=libdir openPMD) libraries += $(shell pkg-config --libs-only-l openPMD) diff --git a/Tools/machines/karolina-it4i/spack-karolina-cuda.yaml b/Tools/machines/karolina-it4i/spack-karolina-cuda.yaml index 1cb6a4ac209..ead49f06fab 100644 --- a/Tools/machines/karolina-it4i/spack-karolina-cuda.yaml +++ b/Tools/machines/karolina-it4i/spack-karolina-cuda.yaml @@ -20,7 +20,7 @@ spack: - py-cython - py-mpi4py - py-numpy@1.24.2 - - openpmd-api@0.15.2 +python + - openpmd-api@0.16.1 +python - py-periodictable@1.5.0 - py-h5py # optional diff --git a/cmake/dependencies/openPMD.cmake b/cmake/dependencies/openPMD.cmake index ce6ec4d0967..a5a80f25790 100644 --- a/cmake/dependencies/openPMD.cmake +++ b/cmake/dependencies/openPMD.cmake @@ -13,7 +13,7 @@ function(find_openpmd) if(WarpX_openpmd_internal OR WarpX_openpmd_src) set(CMAKE_POLICY_DEFAULT_CMP0077 NEW) - # see https://openpmd-api.readthedocs.io/en/0.15.2/dev/buildoptions.html + # see https://openpmd-api.readthedocs.io/en/0.16.1/dev/buildoptions.html set(openPMD_USE_ADIOS1 OFF CACHE INTERNAL "") set(openPMD_USE_MPI ${WarpX_MPI} CACHE INTERNAL "") set(openPMD_USE_PYTHON OFF CACHE INTERNAL "") @@ -71,7 +71,7 @@ function(find_openpmd) else() set(COMPONENT_WMPI NOMPI) endif() - find_package(openPMD 0.15.1 CONFIG REQUIRED COMPONENTS ${COMPONENT_WMPI}) + find_package(openPMD 0.16.1 CONFIG REQUIRED COMPONENTS ${COMPONENT_WMPI}) message(STATUS "openPMD-api: Found version '${openPMD_VERSION}'") endif() endfunction() @@ -87,7 +87,7 @@ if(WarpX_OPENPMD) set(WarpX_openpmd_repo "https://github.com/openPMD/openPMD-api.git" CACHE STRING "Repository URI to pull and build openPMD-api from if(WarpX_openpmd_internal)") - set(WarpX_openpmd_branch "0.15.2" + set(WarpX_openpmd_branch "0.16.1" CACHE STRING "Repository branch for WarpX_openpmd_repo if(WarpX_openpmd_internal)") diff --git a/setup.py b/setup.py index ad5501371c5..9538adcb106 100644 --- a/setup.py +++ b/setup.py @@ -318,7 +318,7 @@ def build_extension(self, ext): # }, extras_require={ "all": [ - "openPMD-api~=0.15.1", + "openPMD-api>=0.16.1", "openPMD-viewer~=1.1", "yt>=4.1.0", "matplotlib", From 4cae61128ee76306a665f1850af687f7dbead6ee Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 24 Jan 2025 12:09:41 -0800 Subject: [PATCH 172/278] Maintainer: Weekly Update Script (#5565) A weekly update script to simplify PRs like #5564 for @EZoni, me and other maintainers. --- Tools/Release/updateAMReX.py | 4 +- Tools/Release/updatePICSAR.py | 4 +- Tools/Release/updatepyAMReX.py | 4 +- Tools/Release/weeklyUpdate.py | 185 +++++++++++++++++++++++++++++++++ 4 files changed, 191 insertions(+), 6 deletions(-) create mode 100755 Tools/Release/weeklyUpdate.py diff --git a/Tools/Release/updateAMReX.py b/Tools/Release/updateAMReX.py index beeb12e85ff..99bd4899826 100755 --- a/Tools/Release/updateAMReX.py +++ b/Tools/Release/updateAMReX.py @@ -75,8 +75,8 @@ print(f"AMReX HEAD commit (development branch): {amrex_HEAD}") amrex_new_branch = input("Update AMReX commit/branch/sha: ").strip() if not amrex_new_branch: - amrex_new_branch = amrex_branch - print(f"--> Nothing entered, will keep: {amrex_branch}") + amrex_new_branch = amrex_HEAD + print(f"--> Nothing entered, use: {amrex_HEAD}") print() print( diff --git a/Tools/Release/updatePICSAR.py b/Tools/Release/updatePICSAR.py index 5148c16727e..90c928472a5 100755 --- a/Tools/Release/updatePICSAR.py +++ b/Tools/Release/updatePICSAR.py @@ -75,8 +75,8 @@ print(f"PICSAR HEAD commit (development branch): {PICSAR_HEAD}") PICSAR_new_branch = input("Update PICSAR commit/branch/sha: ").strip() if not PICSAR_new_branch: - PICSAR_new_branch = PICSAR_branch - print(f"--> Nothing entered, will keep: {PICSAR_branch}") + PICSAR_new_branch = PICSAR_HEAD + print(f"--> Nothing entered, will use: {PICSAR_HEAD}") print() print( diff --git a/Tools/Release/updatepyAMReX.py b/Tools/Release/updatepyAMReX.py index 68001222241..13c044b3bdd 100755 --- a/Tools/Release/updatepyAMReX.py +++ b/Tools/Release/updatepyAMReX.py @@ -77,8 +77,8 @@ print(f"pyAMReX HEAD commit (development branch): {pyamrex_HEAD}") pyamrex_new_branch = input("Update pyAMReX commit/branch/sha: ").strip() if not pyamrex_new_branch: - pyamrex_new_branch = pyamrex_branch - print(f"--> Nothing entered, will keep: {pyamrex_branch}") + pyamrex_new_branch = pyamrex_HEAD + print(f"--> Nothing entered, will use: {pyamrex_HEAD}") print() print( diff --git a/Tools/Release/weeklyUpdate.py b/Tools/Release/weeklyUpdate.py new file mode 100755 index 00000000000..005c8c5d373 --- /dev/null +++ b/Tools/Release/weeklyUpdate.py @@ -0,0 +1,185 @@ +#!/usr/bin/env python3 +# +# Copyright 2025 The WarpX Community +# +# This file is part of WarpX. +# +# Authors: Axel Huebl +# + +# This file is a maintainer tool to open a weekly dependency update PR for WarpX. +# +# You also need to have git and the GitHub CLI tool "gh" installed and properly +# configured for it to work: +# https://cli.github.com/ +# +import subprocess +import sys +from pathlib import Path + +# Maintainer Inputs ########################################################### + +print("""Hi there, this is a WarpX maintainer tool to ...\n. +For it to work, you need write access on the source directory and +you should be working in a clean git branch without ongoing +rebase/merge/conflict resolves and without unstaged changes.""") + +# check source dir +REPO_DIR = Path(__file__).parent.parent.parent.absolute() +print(f"\nYour current source directory is: {REPO_DIR}") + +REPLY = input("Are you sure you want to continue? [y/N] ") +print() +if REPLY not in ["Y", "y"]: + print("You did not confirm with 'y', aborting.") + sys.exit(1) + +update_repo = input("What is the name of your git remote? (e.g., ax3l) ") +commit_sign = input("How to sign the commit? (e.g., -sS) ") + + +# Helpers ##################################################################### + + +def concat_answers(answers): + return "\n".join(answers) + "\n" + + +# Stash current work ########################################################## + +subprocess.run(["git", "stash"], capture_output=True, text=True) + + +# Git Branch ################################################################## + +update_branch = "topic-amrexWeekly" +subprocess.run(["git", "checkout", "development"], capture_output=True, text=True) +subprocess.run(["git", "fetch"], capture_output=True, text=True) +subprocess.run(["git", "pull", "--ff-only"], capture_output=True, text=True) +subprocess.run(["git", "branch", "-D", update_branch], capture_output=True, text=True) +subprocess.run(["git", "checkout", "-b", update_branch], capture_output=True, text=True) + + +# AMReX New Version ########################################################### + +answers = concat_answers(["y", "", "", "y"]) + +process = subprocess.Popen( + [Path(REPO_DIR).joinpath("Tools/Release/updateAMReX.py")], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, +) + +process.communicate(answers) +del process + +# commit +subprocess.run(["git", "add", "-u"], capture_output=True, text=True) +amrex_diff = subprocess.run(["git", "diff", "--cached"], capture_output=True, text=True) +print("AMReX Commit...") +subprocess.run( + ["git", "commit", commit_sign, "-m", "AMReX: Weekly Update"], + capture_output=True, + text=True, +) + + +# PICSAR New Version ########################################################## + +PICSAR_version = "24.09" +answers = concat_answers(["y", PICSAR_version, PICSAR_version, "y"]) + +process = subprocess.Popen( + [Path(REPO_DIR).joinpath("Tools/Release/updatePICSAR.py")], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, +) + +process.communicate(answers) +del process + +# commit +subprocess.run(["git", "add", "-u"], capture_output=True, text=True) +picsar_diff = subprocess.run( + ["git", "diff", "--cached"], capture_output=True, text=True +) +print("PICSAR Commit...") +subprocess.run( + ["git", "commit", commit_sign, "-m", "PICSAR: Weekly Update"], + capture_output=True, + text=True, +) + + +# pyAMReX New Version ######################################################### + +answers = concat_answers(["y", "", "", "y"]) + +process = subprocess.Popen( + [Path(REPO_DIR).joinpath("Tools/Release/updatepyAMReX.py")], + stdin=subprocess.PIPE, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + text=True, +) + +process.communicate(answers) +del process + +# commit +subprocess.run(["git", "add", "-u"], capture_output=True, text=True) +pyamrex_diff = subprocess.run( + ["git", "diff", "--cached"], capture_output=True, text=True +) +print("pyAMReX Commit...") +subprocess.run( + ["git", "commit", commit_sign, "-m", "pyAMReX: Weekly Update"], + capture_output=True, + text=True, +) + +# GitHub PR ################################################################### + +subprocess.run(["git", "push", "-f", "-u", update_repo, update_branch], text=True) + +amrex_changes = " (no changes)" if amrex_diff.stdout == "" else "" +picsar_changes = " (no changes)" if picsar_diff.stdout == "" else "" +pyamrex_changes = " (no changes)" if pyamrex_diff.stdout == "" else "" + +subprocess.run( + [ + "gh", + "pr", + "create", + "--title", + "AMReX/pyAMReX/PICSAR: Weekly Update", + "--body", + f"""Weekly update to latest AMReX{amrex_changes}. +Weekly update to latest pyAMReX{pyamrex_changes}. +Weekly update to latest PICSAR{picsar_changes}. + +```console +./Tools/Release/updateAMReX.py +./Tools/Release/updatepyAMReX.py +./Tools/Release/updatePICSAR.py +``` +""", + "--label", + "component: documentation", + "--label", + "component: third party", + "--web", + ], + text=True, +) + + +# Epilogue #################################################################### + +print("""Done. Please check your source, e.g. via + git diff +now and commit the changes if no errors occurred.""") From 1f6737fb242b5ab1bf92389a5e7579796bb2c173 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 24 Jan 2025 14:45:15 -0800 Subject: [PATCH 173/278] AMReX/pyAMReX/PICSAR: Weekly Update (#5607) Weekly update to latest AMReX. Weekly update to latest pyAMReX. Weekly update to latest PICSAR. ```console ./Tools/Release/updateAMReX.py ./Tools/Release/updatepyAMReX.py ./Tools/Release/updatePICSAR.py ``` --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- cmake/dependencies/PICSAR.cmake | 2 +- cmake/dependencies/pyAMReX.cmake | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 32aa1fe114f..0d8ad0e0566 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -127,7 +127,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach e761abff95afbfa442cbe108027094bbddef5b11 && cd - + cd ../amrex && git checkout --detach 0f46a1615c17f0bbeaedb20c27a97c9f6e439781 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 6bde3785176..d529712534b 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -294,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "e761abff95afbfa442cbe108027094bbddef5b11" +set(WarpX_amrex_branch "0f46a1615c17f0bbeaedb20c27a97c9f6e439781" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/PICSAR.cmake b/cmake/dependencies/PICSAR.cmake index 9eb9162238a..067ea464d88 100644 --- a/cmake/dependencies/PICSAR.cmake +++ b/cmake/dependencies/PICSAR.cmake @@ -109,7 +109,7 @@ if(WarpX_QED) set(WarpX_picsar_repo "https://github.com/ECP-WarpX/picsar.git" CACHE STRING "Repository URI to pull and build PICSAR from if(WarpX_picsar_internal)") - set(WarpX_picsar_branch "24.09" + set(WarpX_picsar_branch "47b393993f860943e387b4b5d79407ee7f52d1ab" CACHE STRING "Repository branch for WarpX_picsar_repo if(WarpX_picsar_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index c9ee2732b62..3cb849587dc 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "47331d7891bda9b02e75cf452d2c55fe76c77d06" +set(WarpX_pyamrex_branch "6d9b9da849f5941777555ec9c9619be299d04912" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") From 1e82e895ac506963cf50cf8853e70806cdbe70bd Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 24 Jan 2025 23:46:15 +0100 Subject: [PATCH 174/278] WarpX class: make psatd_solution_type a private member variable (#5602) This PR changes `psatd_solution_type` from a static WarpX class variable to a private member variable (renamed `m_psatd_solution_type`). This is a small step towards reducing the use of static variables in the WarpX class. --- Source/Initialization/WarpXInitData.cpp | 4 ++-- Source/WarpX.H | 10 +++++----- Source/WarpX.cpp | 6 +++--- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 69e87ba7b7e..6900061ea3f 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -730,7 +730,7 @@ WarpX::InitPML () pml_ncell, pml_delta, amrex::IntVect::TheZeroVector(), dt[0], nox_fft, noy_fft, noz_fft, grid_type, do_moving_window, pml_has_particles, do_pml_in_domain, - psatd_solution_type, J_in_time, rho_in_time, + m_psatd_solution_type, J_in_time, rho_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, amrex::IntVect(0), amrex::IntVect(0), eb_enabled, @@ -771,7 +771,7 @@ WarpX::InitPML () pml_ncell, pml_delta, refRatio(lev-1), dt[lev], nox_fft, noy_fft, noz_fft, grid_type, do_moving_window, pml_has_particles, do_pml_in_domain, - psatd_solution_type, J_in_time, rho_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, + m_psatd_solution_type, J_in_time, rho_in_time, do_pml_dive_cleaning, do_pml_divb_cleaning, amrex::IntVect(0), amrex::IntVect(0), eb_enabled, guard_cells.ng_FieldSolver.max(), diff --git a/Source/WarpX.H b/Source/WarpX.H index c5a58febe69..6e575a6da5d 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -244,11 +244,6 @@ public: ParticleBoundaryType::Default, ParticleBoundaryType::Default)}; - //! Integer that corresponds to the order of the PSATD solution - //! (whether the PSATD equations are derived from first-order or - //! second-order solution) - static inline auto psatd_solution_type = PSATDSolutionType::Default; - //! Integers that correspond to the time dependency of J (constant, linear) //! and rho (linear, quadratic) for the PSATD algorithm static inline auto J_in_time = JInTime::Default; @@ -1642,6 +1637,11 @@ private: */ void ExplicitFillBoundaryEBUpdateAux (); + //! Integer that corresponds to the order of the PSATD solution + //! (whether the PSATD equations are derived from first-order or + //! second-order solution) + PSATDSolutionType m_psatd_solution_type = PSATDSolutionType::Default; + void PushPSATD (amrex::Real start_time); #ifdef WARPX_USE_FFT diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index c08906cf289..900b70964b6 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -1502,14 +1502,14 @@ WarpX::ReadParameters () // Integer that corresponds to the order of the PSATD solution // (whether the PSATD equations are derived from first-order or // second-order solution) - pp_psatd.query_enum_sloppy("solution_type", psatd_solution_type, "-_"); + pp_psatd.query_enum_sloppy("solution_type", m_psatd_solution_type, "-_"); // Integers that correspond to the time dependency of J (constant, linear) // and rho (linear, quadratic) for the PSATD algorithm pp_psatd.query_enum_sloppy("J_in_time", J_in_time, "-_"); pp_psatd.query_enum_sloppy("rho_in_time", rho_in_time, "-_"); - if (psatd_solution_type != PSATDSolutionType::FirstOrder || !do_multi_J) + if (m_psatd_solution_type != PSATDSolutionType::FirstOrder || !do_multi_J) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( rho_in_time == RhoInTime::Linear, @@ -2904,7 +2904,7 @@ void WarpX::AllocLevelSpectralSolver (amrex::Vector Date: Fri, 24 Jan 2025 23:47:54 +0100 Subject: [PATCH 175/278] WarpX class: make zmax_plasma_to_compute_max_step a private member variable and remove do_compute_max_step_from_zmax (#5605) This PR combines the following static variables of the WarpX class: ``` static amrex::Real zmax_plasma_to_compute_max_step; static bool do_compute_max_step_from_zmax; ``` into a single private member variable: ``` std::optional m_zmax_plasma_to_compute_max_step = std::nullopt; ``` This is a small step towards reducing reliance on static class variables. --- Source/Initialization/WarpXInitData.cpp | 6 +++--- Source/WarpX.H | 14 ++++++-------- Source/WarpX.cpp | 9 +++------ 3 files changed, 12 insertions(+), 17 deletions(-) diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 6900061ea3f..7d7e9b5e601 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -530,7 +530,7 @@ WarpX::InitData () // WarpX::computeMaxStepBoostAccelerator // needs to start from the initial zmin_domain_boost, // even if restarting from a checkpoint file - if (do_compute_max_step_from_zmax) { + if (m_zmax_plasma_to_compute_max_step.has_value()) { zmin_domain_boost_step_0 = geom[0].ProbLo(WARPX_ZINDEX); } if (restart_chkfile.empty()) @@ -798,7 +798,7 @@ WarpX::ComputePMLFactors () void WarpX::ComputeMaxStep () { - if (do_compute_max_step_from_zmax) { + if (m_zmax_plasma_to_compute_max_step.has_value()) { computeMaxStepBoostAccelerator(); } } @@ -831,7 +831,7 @@ WarpX::computeMaxStepBoostAccelerator() { // End of the plasma: Transform input argument // zmax_plasma_to_compute_max_step to boosted frame. - const Real len_plasma_boost = zmax_plasma_to_compute_max_step/gamma_boost; + const Real len_plasma_boost = m_zmax_plasma_to_compute_max_step.value()/gamma_boost; // Plasma velocity const Real v_plasma_boost = -beta_boost * PhysConst::c; // Get time at which the lower end of the simulation domain passes the diff --git a/Source/WarpX.H b/Source/WarpX.H index 6e575a6da5d..50d4f36fc8e 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -356,14 +356,7 @@ public: static amrex::Real beta_boost; //! Direction of the Lorentz transform that defines the boosted frame of the simulation static amrex::Vector boost_direction; - //! If specified, the maximum number of iterations is computed automatically so that - //! the lower end of the simulation domain along z reaches #zmax_plasma_to_compute_max_step - //! in the boosted frame - static amrex::Real zmax_plasma_to_compute_max_step; - //! Set to true if #zmax_plasma_to_compute_max_step is specified, in which case - //! the maximum number of iterations is computed automatically so that the lower end of the - //! simulation domain along z reaches #zmax_plasma_to_compute_max_step in the boosted frame - static bool do_compute_max_step_from_zmax; + //! store initial value of zmin_domain_boost because WarpX::computeMaxStepBoostAccelerator //! needs the initial value of zmin_domain_boost, even if restarting from a checkpoint file static amrex::Real zmin_domain_boost_step_0; @@ -1541,6 +1534,11 @@ private: int max_step = std::numeric_limits::max(); amrex::Real stop_time = std::numeric_limits::max(); + //! If specified, the maximum number of iterations is computed automatically so that + //! the lower end of the simulation domain along z reaches #zmax_plasma_to_compute_max_step + //! in the boosted frame + std::optional m_zmax_plasma_to_compute_max_step = std::nullopt; + int regrid_int = -1; amrex::Real cfl = amrex::Real(0.999); diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 900b70964b6..859797099cf 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -111,9 +111,7 @@ amrex::IntVect WarpX::m_fill_guards_current = amrex::IntVect(0); Real WarpX::gamma_boost = 1._rt; Real WarpX::beta_boost = 0._rt; Vector WarpX::boost_direction = {0,0,0}; -bool WarpX::do_compute_max_step_from_zmax = false; bool WarpX::compute_max_step_from_btd = false; -Real WarpX::zmax_plasma_to_compute_max_step = 0._rt; Real WarpX::zmin_domain_boost_step_0 = 0._rt; int WarpX::max_particle_its_in_implicit_scheme = 21; @@ -491,7 +489,6 @@ WarpX::~WarpX () void WarpX::ReadParameters () { - { const ParmParse pp;// Traditionally, max_step and stop_time do not have prefix. utils::parser::queryWithParser(pp, "max_step", max_step); @@ -673,9 +670,9 @@ WarpX::ReadParameters () // queryWithParser returns 1 if argument zmax_plasma_to_compute_max_step is // specified by the user, 0 otherwise. - do_compute_max_step_from_zmax = utils::parser::queryWithParser( - pp_warpx, "zmax_plasma_to_compute_max_step", - zmax_plasma_to_compute_max_step); + if(auto temp = 0.0_rt; utils::parser::queryWithParser(pp_warpx, "zmax_plasma_to_compute_max_step",temp)){ + m_zmax_plasma_to_compute_max_step = temp; + } pp_warpx.query("compute_max_step_from_btd", compute_max_step_from_btd); From 59432e8ec0235bcb90731ded5eda54ee769747d2 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 24 Jan 2025 23:49:06 +0100 Subject: [PATCH 176/278] WarpX class: make dt_update_interval a private member variable (#5603) This PR changes `dt_update_interval` from a static WarpX class variable to a private member variable (renamed `m_dt_update_interval`). This is a small step towards reducing the use of static variables in the WarpX class. --- Source/Evolve/WarpXComputeDt.cpp | 2 +- Source/Evolve/WarpXEvolve.cpp | 2 +- Source/WarpX.H | 2 +- Source/WarpX.cpp | 4 +--- 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/Source/Evolve/WarpXComputeDt.cpp b/Source/Evolve/WarpXComputeDt.cpp index 9645f7edbe2..2b4db960ed6 100644 --- a/Source/Evolve/WarpXComputeDt.cpp +++ b/Source/Evolve/WarpXComputeDt.cpp @@ -48,7 +48,7 @@ WarpX::ComputeDt () WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_const_dt.has_value(), "warpx.const_dt must be specified with the hybrid-PIC solver."); } else if (electromagnetic_solver_id == ElectromagneticSolverAlgo::None) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - m_const_dt.has_value() || dt_update_interval.isActivated(), + m_const_dt.has_value() || m_dt_update_interval.isActivated(), "warpx.const_dt must be specified with the electrostatic solver, or warpx.dt_update_interval must be > 0." ); } diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index c9e363879a6..f3128b902d5 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -130,7 +130,7 @@ WarpX::Evolve (int numsteps) // Update timestep for electrostatic solver if a constant dt is not provided // This first synchronizes the position and velocity before setting the new timestep if (electromagnetic_solver_id == ElectromagneticSolverAlgo::None && - !m_const_dt.has_value() && dt_update_interval.contains(step+1)) { + !m_const_dt.has_value() && m_dt_update_interval.contains(step+1)) { if (verbose) { amrex::Print() << Utils::TextMsg::Info("updating timestep"); } diff --git a/Source/WarpX.H b/Source/WarpX.H index 50d4f36fc8e..453ea2dd9c2 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -1389,7 +1389,7 @@ private: amrex::Vector t_new; amrex::Vector t_old; amrex::Vector dt; - static utils::parser::IntervalsParser dt_update_interval; // How often to update the timestep when using adaptive timestepping + utils::parser::IntervalsParser m_dt_update_interval = utils::parser::IntervalsParser{}; // How often to update the timestep when using adaptive timestepping // Particle container std::unique_ptr mypc; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 859797099cf..8ea5b95fe8a 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -182,8 +182,6 @@ bool WarpX::do_multi_J = false; int WarpX::do_multi_J_n_depositions; bool WarpX::safe_guard_cells = false; -utils::parser::IntervalsParser WarpX::dt_update_interval; - std::map WarpX::imultifab_map; IntVect WarpX::filter_npass_each_dir(1); @@ -740,7 +738,7 @@ WarpX::ReadParameters () utils::parser::queryWithParser(pp_warpx, "max_dt", m_max_dt); std::vector dt_interval_vec = {"-1"}; pp_warpx.queryarr("dt_update_interval", dt_interval_vec); - dt_update_interval = utils::parser::IntervalsParser(dt_interval_vec); + m_dt_update_interval = utils::parser::IntervalsParser(dt_interval_vec); // Filter defaults to true for the explicit scheme, and false for the implicit schemes if (evolve_scheme != EvolveScheme::Explicit) { From 7790292f316d36affb97ce805114012b15ace1cb Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 24 Jan 2025 23:49:36 +0100 Subject: [PATCH 177/278] WarpX class: make safe_guard_cells a private member variable (#5599) This PR changes `safe_guard_cells` from a static WarpX class variable to a private member variable (renamed `m_safe_guard_cells`). This is a small step towards reducing the use of static variables in the WarpX class. --- Source/Evolve/WarpXEvolve.cpp | 8 ++-- Source/Parallelization/WarpXComm.cpp | 20 ++++----- Source/Utils/WarpXMovingWindow.cpp | 67 ++++++++++++++-------------- Source/WarpX.H | 4 +- Source/WarpX.cpp | 5 +-- 5 files changed, 53 insertions(+), 51 deletions(-) diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index f3128b902d5..218a60f6203 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -446,7 +446,7 @@ WarpX::OneStep_nosub (Real cur_time) // E and B are up-to-date in the domain, but all guard cells are // outdated. - if (safe_guard_cells) { + if (m_safe_guard_cells) { FillBoundaryB(guard_cells.ng_alloc_EB); } } // !PSATD @@ -978,7 +978,7 @@ WarpX::OneStep_sub1 (Real cur_time) FillBoundaryE(fine_lev, PatchType::fine, guard_cells.ng_FieldSolver); } - if ( safe_guard_cells ) { + if ( m_safe_guard_cells ) { FillBoundaryF(fine_lev, PatchType::fine, guard_cells.ng_FieldSolver); } FillBoundaryB(fine_lev, PatchType::fine, guard_cells.ng_FieldSolver); @@ -1034,12 +1034,12 @@ WarpX::OneStep_sub1 (Real cur_time) WarpX::sync_nodal_points); } DampPML(coarse_lev, PatchType::fine); - if ( safe_guard_cells ) { + if ( m_safe_guard_cells ) { FillBoundaryE(coarse_lev, PatchType::fine, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); } } - if ( safe_guard_cells ) { + if ( m_safe_guard_cells ) { FillBoundaryB(coarse_lev, PatchType::fine, guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); } diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index a0ae7ed67e9..b82e4d687a4 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -756,7 +756,7 @@ WarpX::FillBoundaryE (const int lev, const PatchType patch_type, const amrex::In ng.allLE(mf[i]->nGrowVect()), "Error: in FillBoundaryE, requested more guard cells than allocated"); - const amrex::IntVect nghost = (safe_guard_cells) ? mf[i]->nGrowVect() : ng; + const amrex::IntVect nghost = (m_safe_guard_cells) ? mf[i]->nGrowVect() : ng; ablastr::utils::communication::FillBoundary(*mf[i], nghost, WarpX::do_single_precision_comms, period, nodal_sync); } } @@ -821,7 +821,7 @@ WarpX::FillBoundaryB (const int lev, const PatchType patch_type, const amrex::In ng.allLE(mf[i]->nGrowVect()), "Error: in FillBoundaryB, requested more guard cells than allocated"); - const amrex::IntVect nghost = (safe_guard_cells) ? mf[i]->nGrowVect() : ng; + const amrex::IntVect nghost = (m_safe_guard_cells) ? mf[i]->nGrowVect() : ng; ablastr::utils::communication::FillBoundary(*mf[i], nghost, WarpX::do_single_precision_comms, period, nodal_sync); } } @@ -846,7 +846,7 @@ WarpX::FillBoundaryE_avg (int lev, PatchType patch_type, IntVect ng) ablastr::fields::MultiLevelVectorField Efield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_fp, finest_level); const amrex::Periodicity& period = Geom(lev).periodicity(); - if ( safe_guard_cells ){ + if ( m_safe_guard_cells ){ const Vector mf{Efield_avg_fp[lev][0],Efield_avg_fp[lev][1],Efield_avg_fp[lev][2]}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period); } else { @@ -868,7 +868,7 @@ WarpX::FillBoundaryE_avg (int lev, PatchType patch_type, IntVect ng) ablastr::fields::MultiLevelVectorField Efield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level); const amrex::Periodicity& cperiod = Geom(lev-1).periodicity(); - if ( safe_guard_cells ) { + if ( m_safe_guard_cells ) { const Vector mf{Efield_avg_cp[lev][0],Efield_avg_cp[lev][1],Efield_avg_cp[lev][2]}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, cperiod); @@ -906,7 +906,7 @@ WarpX::FillBoundaryB_avg (int lev, PatchType patch_type, IntVect ng) ablastr::fields::MultiLevelVectorField Bfield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_fp, finest_level); const amrex::Periodicity& period = Geom(lev).periodicity(); - if ( safe_guard_cells ) { + if ( m_safe_guard_cells ) { const Vector mf{Bfield_avg_fp[lev][0],Bfield_avg_fp[lev][1],Bfield_avg_fp[lev][2]}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, period); } else { @@ -928,7 +928,7 @@ WarpX::FillBoundaryB_avg (int lev, PatchType patch_type, IntVect ng) ablastr::fields::MultiLevelVectorField Bfield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level); const amrex::Periodicity& cperiod = Geom(lev-1).periodicity(); - if ( safe_guard_cells ){ + if ( m_safe_guard_cells ){ const Vector mf{Bfield_avg_cp[lev][0],Bfield_avg_cp[lev][1],Bfield_avg_cp[lev][2]}; ablastr::utils::communication::FillBoundary(mf, WarpX::do_single_precision_comms, cperiod); } else { @@ -967,7 +967,7 @@ WarpX::FillBoundaryF (int lev, PatchType patch_type, IntVect ng, std::optionalnGrowVect() : ng; + const amrex::IntVect& nghost = (m_safe_guard_cells) ? m_fields.get(FieldType::F_fp, lev)->nGrowVect() : ng; ablastr::utils::communication::FillBoundary(*m_fields.get(FieldType::F_fp, lev), nghost, WarpX::do_single_precision_comms, period, nodal_sync); } } @@ -986,7 +986,7 @@ WarpX::FillBoundaryF (int lev, PatchType patch_type, IntVect ng, std::optionalnGrowVect() : ng; + const amrex::IntVect& nghost = (m_safe_guard_cells) ? m_fields.get(FieldType::F_cp, lev)->nGrowVect() : ng; ablastr::utils::communication::FillBoundary(*m_fields.get(FieldType::F_cp, lev), nghost, WarpX::do_single_precision_comms, period, nodal_sync); } } @@ -1020,7 +1020,7 @@ void WarpX::FillBoundaryG (int lev, PatchType patch_type, IntVect ng, std::optio { const amrex::Periodicity& period = Geom(lev).periodicity(); MultiFab* G_fp = m_fields.get(FieldType::G_fp,lev); - const amrex::IntVect& nghost = (safe_guard_cells) ? G_fp->nGrowVect() : ng; + const amrex::IntVect& nghost = (m_safe_guard_cells) ? G_fp->nGrowVect() : ng; ablastr::utils::communication::FillBoundary(*G_fp, nghost, WarpX::do_single_precision_comms, period, nodal_sync); } } @@ -1040,7 +1040,7 @@ void WarpX::FillBoundaryG (int lev, PatchType patch_type, IntVect ng, std::optio { const amrex::Periodicity& period = Geom(lev-1).periodicity(); MultiFab* G_cp = m_fields.get(FieldType::G_cp,lev); - const amrex::IntVect& nghost = (safe_guard_cells) ? G_cp->nGrowVect() : ng; + const amrex::IntVect& nghost = (m_safe_guard_cells) ? G_cp->nGrowVect() : ng; ablastr::utils::communication::FillBoundary(*G_cp, nghost, WarpX::do_single_precision_comms, period, nodal_sync); } } diff --git a/Source/Utils/WarpXMovingWindow.cpp b/Source/Utils/WarpXMovingWindow.cpp index d5cebd69254..cc8886fc67f 100644 --- a/Source/Utils/WarpXMovingWindow.cpp +++ b/Source/Utils/WarpXMovingWindow.cpp @@ -238,59 +238,59 @@ WarpX::MoveWindow (const int step, bool move_j) if (dim == 1) { Efield_parser = m_p_ext_field_params->Eyfield_parser->compile<3>(); } if (dim == 2) { Efield_parser = m_p_ext_field_params->Ezfield_parser->compile<3>(); } } - shiftMF(*m_fields.get(FieldType::Bfield_fp, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, + shiftMF(*m_fields.get(FieldType::Bfield_fp, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*m_fields.get(FieldType::Efield_fp, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, + shiftMF(*m_fields.get(FieldType::Efield_fp, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells, m_p_ext_field_params->E_external_grid[dim], use_Eparser, Efield_parser); if (fft_do_time_averaging) { ablastr::fields::MultiLevelVectorField Efield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_fp, finest_level); ablastr::fields::MultiLevelVectorField Bfield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_fp, finest_level); - shiftMF(*Bfield_avg_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, + shiftMF(*Bfield_avg_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*Efield_avg_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, + shiftMF(*Efield_avg_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells, m_p_ext_field_params-> E_external_grid[dim], use_Eparser, Efield_parser); } if (move_j) { - shiftMF(*m_fields.get(FieldType::current_fp, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::current_fp, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells); } if (pml[lev] && pml[lev]->ok()) { amrex::MultiFab* pml_B = m_fields.get(FieldType::pml_B_fp, Direction{dim}, lev); amrex::MultiFab* pml_E = m_fields.get(FieldType::pml_E_fp, Direction{dim}, lev); - shiftMF(*pml_B, geom[lev], num_shift, dir, lev, dont_update_cost); - shiftMF(*pml_E, geom[lev], num_shift, dir, lev, dont_update_cost); + shiftMF(*pml_B, geom[lev], num_shift, dir, lev, dont_update_cost, m_safe_guard_cells); + shiftMF(*pml_E, geom[lev], num_shift, dir, lev, dont_update_cost, m_safe_guard_cells); } #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) if (pml_rz[lev] && dim < 2) { amrex::MultiFab* pml_rz_B = m_fields.get(FieldType::pml_B_fp, Direction{dim}, lev); amrex::MultiFab* pml_rz_E = m_fields.get(FieldType::pml_E_fp, Direction{dim}, lev); - shiftMF(*pml_rz_B, geom[lev], num_shift, dir, lev, dont_update_cost); - shiftMF(*pml_rz_E, geom[lev], num_shift, dir, lev, dont_update_cost); + shiftMF(*pml_rz_B, geom[lev], num_shift, dir, lev, dont_update_cost, m_safe_guard_cells); + shiftMF(*pml_rz_E, geom[lev], num_shift, dir, lev, dont_update_cost, m_safe_guard_cells); } #endif if (lev > 0) { // coarse grid - shiftMF(*m_fields.get(FieldType::Bfield_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, + shiftMF(*m_fields.get(FieldType::Bfield_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*m_fields.get(FieldType::Efield_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, + shiftMF(*m_fields.get(FieldType::Efield_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells, m_p_ext_field_params->E_external_grid[dim], use_Eparser, Efield_parser); - shiftMF(*m_fields.get(FieldType::Bfield_aux, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost); - shiftMF(*m_fields.get(FieldType::Efield_aux, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::Bfield_aux, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells); + shiftMF(*m_fields.get(FieldType::Efield_aux, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells); if (fft_do_time_averaging) { ablastr::fields::MultiLevelVectorField Efield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level); ablastr::fields::MultiLevelVectorField Bfield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level); - shiftMF(*Bfield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, + shiftMF(*Bfield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*Efield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, + shiftMF(*Efield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells, m_p_ext_field_params->E_external_grid[dim], use_Eparser, Efield_parser); } if (move_j) { - shiftMF(*m_fields.get(FieldType::current_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::current_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells); } if (do_pml && pml[lev]->ok()) { amrex::MultiFab* pml_B_cp = m_fields.get(FieldType::pml_B_cp, Direction{dim}, lev); amrex::MultiFab* pml_E_cp = m_fields.get(FieldType::pml_E_cp, Direction{dim}, lev); - shiftMF(*pml_B_cp, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); - shiftMF(*pml_E_cp, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); + shiftMF(*pml_B_cp, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost, m_safe_guard_cells); + shiftMF(*pml_E_cp, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost, m_safe_guard_cells); } } } @@ -300,11 +300,11 @@ WarpX::MoveWindow (const int step, bool move_j) if (m_fields.has(FieldType::F_fp, lev)) { // Fine grid - shiftMF(*m_fields.get(FieldType::F_fp, lev), geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::F_fp, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells); if (lev > 0) { // Coarse grid - shiftMF(*m_fields.get(FieldType::F_cp, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::F_cp, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells); } } @@ -315,7 +315,7 @@ WarpX::MoveWindow (const int step, bool move_j) if (do_pml && pml[lev]->ok()) { amrex::MultiFab* pml_F = m_fields.get(FieldType::pml_F_fp, lev); - shiftMF(*pml_F, geom[lev], num_shift, dir, lev, dont_update_cost); + shiftMF(*pml_F, geom[lev], num_shift, dir, lev, dont_update_cost, m_safe_guard_cells); } if (lev > 0) { @@ -323,7 +323,7 @@ WarpX::MoveWindow (const int step, bool move_j) if (do_pml && pml[lev]->ok()) { amrex::MultiFab* pml_F = m_fields.get(FieldType::pml_F_cp, lev); - shiftMF(*pml_F, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); + shiftMF(*pml_F, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost, m_safe_guard_cells); } } } @@ -333,11 +333,11 @@ WarpX::MoveWindow (const int step, bool move_j) if (m_fields.has(FieldType::G_fp, lev)) { // Fine grid - shiftMF(*m_fields.get(FieldType::G_fp, lev), geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::G_fp, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells); if (lev > 0) { // Coarse grid - shiftMF(*m_fields.get(FieldType::G_cp, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::G_cp, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells); } } @@ -348,7 +348,7 @@ WarpX::MoveWindow (const int step, bool move_j) if (do_pml && pml[lev]->ok()) { amrex::MultiFab* pml_G = m_fields.get(FieldType::pml_G_fp, lev); - shiftMF(*pml_G, geom[lev], num_shift, dir, lev, dont_update_cost); + shiftMF(*pml_G, geom[lev], num_shift, dir, lev, dont_update_cost, m_safe_guard_cells); } if (lev > 0) { @@ -356,7 +356,7 @@ WarpX::MoveWindow (const int step, bool move_j) if (do_pml && pml[lev]->ok()) { amrex::MultiFab* pml_G = m_fields.get(FieldType::pml_G_cp, lev); - shiftMF(*pml_G, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost); + shiftMF(*pml_G, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost, m_safe_guard_cells); } } } @@ -365,10 +365,10 @@ WarpX::MoveWindow (const int step, bool move_j) if (move_j) { if (m_fields.has(FieldType::rho_fp, lev)) { // Fine grid - shiftMF(*m_fields.get(FieldType::rho_fp,lev), geom[lev], num_shift, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::rho_fp,lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells); if (lev > 0){ // Coarse grid - shiftMF(*m_fields.get(FieldType::rho_cp,lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost); + shiftMF(*m_fields.get(FieldType::rho_cp,lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells); } } } @@ -378,10 +378,10 @@ WarpX::MoveWindow (const int step, bool move_j) const int n_fluid_species = myfl->nSpecies(); for (int i=0; iGetFluidContainer(i); - shiftMF( *m_fields.get(fl.name_mf_N, lev), geom[lev], num_shift, dir, lev, do_update_cost ); - shiftMF( *m_fields.get(fl.name_mf_NU, Direction{0}, lev), geom[lev], num_shift, dir, lev, do_update_cost ); - shiftMF( *m_fields.get(fl.name_mf_NU, Direction{1}, lev), geom[lev], num_shift, dir, lev, do_update_cost ); - shiftMF( *m_fields.get(fl.name_mf_NU, Direction{2}, lev), geom[lev], num_shift, dir, lev, do_update_cost ); + shiftMF( *m_fields.get(fl.name_mf_N, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells ); + shiftMF( *m_fields.get(fl.name_mf_NU, Direction{0}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells ); + shiftMF( *m_fields.get(fl.name_mf_NU, Direction{1}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells ); + shiftMF( *m_fields.get(fl.name_mf_NU, Direction{2}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells ); } } } @@ -478,6 +478,7 @@ WarpX::MoveWindow (const int step, bool move_j) void WarpX::shiftMF (amrex::MultiFab& mf, const amrex::Geometry& geom, int num_shift, int dir, const int lev, bool update_cost_flag, + const bool safe_guard_cells, amrex::Real external_field, bool useparser, amrex::ParserExecutor<3> const& field_parser) { @@ -493,7 +494,7 @@ WarpX::shiftMF (amrex::MultiFab& mf, const amrex::Geometry& geom, amrex::MultiFab tmpmf(ba, dm, nc, ng); amrex::MultiFab::Copy(tmpmf, mf, 0, 0, nc, ng); - if ( WarpX::safe_guard_cells ) { + if ( safe_guard_cells ) { // Fill guard cells. ablastr::utils::communication::FillBoundary(tmpmf, WarpX::do_single_precision_comms, geom.periodicity()); } else { diff --git a/Source/WarpX.H b/Source/WarpX.H index 453ea2dd9c2..f40792de891 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -168,6 +168,7 @@ public: static void shiftMF (amrex::MultiFab& mf, const amrex::Geometry& geom, int num_shift, int dir, int lev, bool update_cost_flag, + bool safe_guard_cells, amrex::Real external_field=0.0, bool useparser = false, amrex::ParserExecutor<3> const& field_parser={}); @@ -379,7 +380,6 @@ public: static int do_multi_J_n_depositions; static bool do_device_synchronize; - static bool safe_guard_cells; //! With mesh refinement, particles located inside a refinement patch, but within //! #n_field_gather_buffer cells of the edge of the patch, will gather the fields @@ -1391,6 +1391,8 @@ private: amrex::Vector dt; utils::parser::IntervalsParser m_dt_update_interval = utils::parser::IntervalsParser{}; // How often to update the timestep when using adaptive timestepping + bool m_safe_guard_cells = false; + // Particle container std::unique_ptr mypc; std::unique_ptr multi_diags; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 8ea5b95fe8a..21bf48be8e0 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -180,7 +180,6 @@ bool WarpX::do_dynamic_scheduling = true; bool WarpX::do_multi_J = false; int WarpX::do_multi_J_n_depositions; -bool WarpX::safe_guard_cells = false; std::map WarpX::imultifab_map; @@ -655,7 +654,7 @@ WarpX::ReadParameters () pp_warpx, "do_multi_J_n_depositions", do_multi_J_n_depositions); } pp_warpx.query("use_hybrid_QED", use_hybrid_QED); - pp_warpx.query("safe_guard_cells", safe_guard_cells); + pp_warpx.query("safe_guard_cells", m_safe_guard_cells); std::vector override_sync_intervals_string_vec = {"1"}; pp_warpx.queryarr("override_sync_intervals", override_sync_intervals_string_vec); override_sync_intervals = @@ -2083,7 +2082,7 @@ WarpX::AllocLevelData (int lev, const BoxArray& ba, const DistributionMapping& d maxLevel(), WarpX::m_v_galilean, WarpX::m_v_comoving, - safe_guard_cells, + m_safe_guard_cells, WarpX::do_multi_J, WarpX::fft_do_time_averaging, ::isAnyBoundaryPML(field_boundary_lo, field_boundary_hi), From f8e2376d0f45e56011704c4909130c58b95d8e4e Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 24 Jan 2025 23:50:02 +0100 Subject: [PATCH 178/278] WarpX class: make do_divb_cleaning_external a private member variable (#5598) This PR changes `do_divb_cleaning_external` from a static WarpX class variable to a private member variable (renamed `m_do_divb_cleaning_external`). This is a small step towards reducing the use of static variables in the WarpX class. --- Source/Initialization/WarpXInitData.cpp | 2 +- Source/WarpX.H | 8 ++++---- Source/WarpX.cpp | 5 ++--- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 7d7e9b5e601..98efb1f5b3f 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -590,7 +590,7 @@ WarpX::InitData () WriteUsedInputsFile(); // Run div cleaner here on loaded external fields - if (WarpX::do_divb_cleaning_external) { + if (m_do_divb_cleaning_external) { WarpX::ProjectionCleanDivB(); } diff --git a/Source/WarpX.H b/Source/WarpX.H index f40792de891..06b2d46e910 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -290,10 +290,6 @@ public: //! Solve additional Maxwell equation for G in order to control errors in magnetic Gauss' law static bool do_divb_cleaning; - //! Solve Poisson equation when loading an external magnetic field to clean divergence - //! This is useful to remove errors that could lead to non-zero B field divergence - static bool do_divb_cleaning_external; - //! Order of the particle shape factors (splines) along x static int nox; //! Order of the particle shape factors (splines) along y @@ -1582,6 +1578,10 @@ private: int noy_fft = 16; int noz_fft = 16; + //! Solve Poisson equation when loading an external magnetic field to clean divergence + //! This is useful to remove errors that could lead to non-zero B field divergence + bool m_do_divb_cleaning_external = false; + //! Domain decomposition on Level 0 amrex::IntVect numprocs{0}; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 21bf48be8e0..5896a918480 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -118,7 +118,6 @@ int WarpX::max_particle_its_in_implicit_scheme = 21; ParticleReal WarpX::particle_tol_in_implicit_scheme = 1.e-10; bool WarpX::do_dive_cleaning = false; bool WarpX::do_divb_cleaning = false; -bool WarpX::do_divb_cleaning_external = false; bool WarpX::do_single_precision_comms = false; bool WarpX::do_shared_mem_charge_deposition = false; @@ -1064,9 +1063,9 @@ WarpX::ReadParameters () || WarpX::electrostatic_solver_id == ElectrostaticSolverAlgo::LabFrameElectroMagnetostatic) && WarpX::poisson_solver_id == PoissonSolverAlgo::Multigrid))) { - do_divb_cleaning_external = true; + m_do_divb_cleaning_external = true; } - pp_warpx.query("do_divb_cleaning_external", do_divb_cleaning_external); + pp_warpx.query("do_divb_cleaning_external", m_do_divb_cleaning_external); // If true, the current is deposited on a nodal grid and centered onto // a staggered grid. Setting warpx.do_current_centering=1 makes sense From 980d707dd46ac3d94da6f3036246901f9b273078 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 24 Jan 2025 23:52:18 +0100 Subject: [PATCH 179/278] WarpX class: make mirror-related variables private member variables (#5596) `num_mirrors` was a static class variable. This PR makes it a private member variable, with the aim of making WarpX less static. The name is also changed into `m_num_mirrors`. With the occasion, also the variables `mirror_z`. `mirror_z_width`, and `mirror_z_npoints` are renamed with an `m_` prefix and made private. --- Source/Evolve/WarpXEvolve.cpp | 12 ++++++------ Source/WarpX.H | 11 ++++++----- Source/WarpX.cpp | 20 +++++++++----------- 3 files changed, 21 insertions(+), 22 deletions(-) diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 218a60f6203..21abb0d8233 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -1190,16 +1190,16 @@ WarpX::applyMirrors (Real time) using ablastr::fields::Direction; // something to do? - if (num_mirrors == 0) { + if (m_num_mirrors == 0) { return; } // Loop over the mirrors - for(int i_mirror=0; i_mirror1) @@ -1211,9 +1211,9 @@ WarpX::applyMirrors (Real time) // Loop over levels for(int lev=0; lev<=finest_level; lev++) { - // Mirror must contain at least mirror_z_npoints[i_mirror] cells + // Mirror must contain at least m_mirror_z_npoints[i_mirror] cells const amrex::Real dz = WarpX::CellSize(lev)[2]; - const amrex::Real z_max = std::max(z_max_tmp, z_min+mirror_z_npoints[i_mirror]*dz); + const amrex::Real z_max = std::max(z_max_tmp, z_min+m_mirror_z_npoints[i_mirror]*dz); // Set each field on the fine patch to zero between z_min and z_max NullifyMF(m_fields, "Efield_fp", Direction{0}, lev, z_min, z_max); diff --git a/Source/WarpX.H b/Source/WarpX.H index 06b2d46e910..c7baedc985f 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -464,11 +464,6 @@ public: amrex::Vector m_v_comoving = amrex::Vector(3, amrex::Real(0.)); - static int num_mirrors; - amrex::Vector mirror_z; - amrex::Vector mirror_z_width; - amrex::Vector mirror_z_npoints; - /// object with all reduced diagnostics, similar to MultiParticleContainer for species. std::unique_ptr reduced_diags; @@ -1465,6 +1460,12 @@ private: amrex::Real moving_window_x = std::numeric_limits::max(); + // Mirrors + int m_num_mirrors = 0; + amrex::Vector m_mirror_z; + amrex::Vector m_mirror_z_width; + amrex::Vector m_mirror_z_npoints; + // Plasma injection parameters int warpx_do_continuous_injection = 0; int num_injected_species = -1; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 5896a918480..6c2557f71db 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -162,8 +162,6 @@ bool WarpX::use_filter_compensation = false; bool WarpX::serialize_initial_conditions = false; bool WarpX::refine_plasma = false; -int WarpX::num_mirrors = 0; - utils::parser::IntervalsParser WarpX::sort_intervals; amrex::IntVect WarpX::sort_bin_size(AMREX_D_DECL(1,1,1)); @@ -782,17 +780,17 @@ WarpX::ReadParameters () #endif utils::parser::queryWithParser( - pp_warpx, "num_mirrors", num_mirrors); - if (num_mirrors>0){ - mirror_z.resize(num_mirrors); + pp_warpx, "num_mirrors", m_num_mirrors); + if (m_num_mirrors>0){ + m_mirror_z.resize(m_num_mirrors); utils::parser::getArrWithParser( - pp_warpx, "mirror_z", mirror_z, 0, num_mirrors); - mirror_z_width.resize(num_mirrors); + pp_warpx, "mirror_z", m_mirror_z, 0, m_num_mirrors); + m_mirror_z_width.resize(m_num_mirrors); utils::parser::getArrWithParser( - pp_warpx, "mirror_z_width", mirror_z_width, 0, num_mirrors); - mirror_z_npoints.resize(num_mirrors); + pp_warpx, "mirror_z_width", m_mirror_z_width, 0, m_num_mirrors); + m_mirror_z_npoints.resize(m_num_mirrors); utils::parser::getArrWithParser( - pp_warpx, "mirror_z_npoints", mirror_z_npoints, 0, num_mirrors); + pp_warpx, "mirror_z_npoints", m_mirror_z_npoints, 0, m_num_mirrors); } pp_warpx.query("do_single_precision_comms", do_single_precision_comms); @@ -1168,7 +1166,7 @@ WarpX::ReadParameters () // implicit evolve schemes not setup to use mirrors if (evolve_scheme == EvolveScheme::SemiImplicitEM || evolve_scheme == EvolveScheme::ThetaImplicitEM) { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( num_mirrors == 0, + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( m_num_mirrors == 0, "Mirrors cannot be used with Implicit evolve schemes."); } From c9168a0d7f90f3657a8bd1605bea97e5275218cd Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 24 Jan 2025 23:54:23 +0100 Subject: [PATCH 180/278] Move checkEarlyUnusedParams from WarpX class to anonymous namespace (#5573) `checkEarlyUnusedParams` is only used once inside `WarpX::Evolve`. Therefore, this PR moves the function from the WarpX class to an anonymous namespae inside `WarpXEvolve.cpp` . The final goal is to reduce the complexity of the WarpX class. --- Source/Evolve/WarpXEvolve.cpp | 28 ++++++++++++++++++---------- Source/WarpX.H | 7 ------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 21abb0d8233..1b2ff7e34f1 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -61,6 +61,23 @@ using namespace amrex; using ablastr::utils::SignalHandling; +namespace +{ + /** Print Unused Parameter Warnings after Step 1 + * + * Instead of waiting for a simulation to end, we already do an early "unused parameter check" + * after step 1 to inform users early of potential issues with their simulation setup. + */ + void checkEarlyUnusedParams () + { + amrex::Print() << "\n"; // better: conditional \n based on return value + amrex::ParmParse::QueryUnusedInputs(); + + // Print the warning list right after the first step. + amrex::Print() << ablastr::warn_manager::GetWMInstance().PrintGlobalWarnings("FIRST STEP"); + } +} + void WarpX::Synchronize () { using ablastr::fields::Direction; @@ -310,7 +327,7 @@ WarpX::Evolve (int numsteps) // inputs: unused parameters (e.g. typos) check after step 1 has finished if (!early_params_checked) { - checkEarlyUnusedParams(); + ::checkEarlyUnusedParams(); early_params_checked = true; } @@ -461,15 +478,6 @@ bool WarpX::checkStopSimulation (amrex::Real cur_time) m_exit_loop_due_to_interrupt_signal; } -void WarpX::checkEarlyUnusedParams () -{ - amrex::Print() << "\n"; // better: conditional \n based on return value - amrex::ParmParse::QueryUnusedInputs(); - - // Print the warning list right after the first step. - amrex::Print() << ablastr::warn_manager::GetWMInstance().PrintGlobalWarnings("FIRST STEP"); -} - void WarpX::ExplicitFillBoundaryEBUpdateAux () { WARPX_ALWAYS_ASSERT_WITH_MESSAGE(evolve_scheme == EvolveScheme::Explicit, diff --git a/Source/WarpX.H b/Source/WarpX.H index c7baedc985f..d1f3e585c4f 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -1614,13 +1614,6 @@ private: [[nodiscard]] bool checkStopSimulation (amrex::Real cur_time); - /** Print Unused Parameter Warnings after Step 1 - * - * Instead of waiting for a simulation to end, we already do an early "unused parameter check" - * after step 1 to inform users early of potential issues with their simulation setup. - */ - void checkEarlyUnusedParams (); - /** Perform essential particle house keeping at boundaries * * Inject, communicate, scrape and sort particles. From 2ef640c1ae1b7e6e0a9d85bacc1c3742885b58de Mon Sep 17 00:00:00 2001 From: Thomas Marks Date: Fri, 24 Jan 2025 18:17:47 -0500 Subject: [PATCH 181/278] Catch warpx not being initialized in library loader (#5567) Follows up on, https://github.com/ECP-WarpX/WarpX/pull/5412, using @ax3l's [suggestion](https://github.com/ECP-WarpX/WarpX/pull/5412#pullrequestreview-2556960357) to catch warpx not being initialized when accessing libwarpx.warpx --- Python/pywarpx/_libwarpx.py | 9 ++++++--- Python/pywarpx/particle_containers.py | 4 ++-- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/Python/pywarpx/_libwarpx.py b/Python/pywarpx/_libwarpx.py index 40426104b9f..98c10f7a8c4 100755 --- a/Python/pywarpx/_libwarpx.py +++ b/Python/pywarpx/_libwarpx.py @@ -40,9 +40,14 @@ def __getattr__(self, attribute): # Once loaded, it gets added to the dictionary so this code won't be called again. self.load_library() return self.__dict__[attribute] + elif attribute == "warpx": + # A `warpx` attribute has not yet been assigned, so `initialize_warpx` has not been called. + raise AttributeError( + "Trying to access libwarpx.warpx before initialize_warpx has been called!" + ) else: # For any other attribute, call the built-in routine - this should always - # return an AttributeException. + # return an AttributeError. return self.__getattribute__(attribute) def _get_package_root(self): @@ -143,8 +148,6 @@ def initialize(self, argv=None, mpi_comm=None): self.libwarpx_so.execute_python_callback("afterinit") self.libwarpx_so.execute_python_callback("particleloader") - # self.libwarpx_so.warpx_init() - self.initialized = True def finalize(self, finalize_mpi=1): diff --git a/Python/pywarpx/particle_containers.py b/Python/pywarpx/particle_containers.py index 9a4d7257a69..3d77a61cb07 100644 --- a/Python/pywarpx/particle_containers.py +++ b/Python/pywarpx/particle_containers.py @@ -35,7 +35,7 @@ def particle_container(self): self.name ) except AttributeError as e: - msg = "This is likely caused by attempting to access a ParticleContainerWrapper before initialize_warpx has been called" + msg = "You must initialize WarpX before accessing a ParticleContainerWrapper's particle_container." raise AttributeError(msg) from e return self._particle_container @@ -777,7 +777,7 @@ def particle_buffer(self): try: self._particle_buffer = libwarpx.warpx.get_particle_boundary_buffer() except AttributeError as e: - msg = "This is likely caused by attempting to access a ParticleBoundaryBufferWrapper before initialize_warpx has been called" + msg = "You must initialize WarpX before accessing a ParticleBoundaryBufferWrapper's particle_buffer." raise AttributeError(msg) from e return self._particle_buffer From 18d7f82220d4e41988941f81d076be4dc3acc300 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 24 Jan 2025 15:23:52 -0800 Subject: [PATCH 182/278] CI: CDash dashboard support (#5566) Trying to add and set up CDash dashboard support for WarpX. Close #5292. Relevant documentation: - https://cmake.org/cmake/help/book/mastering-cmake/chapter/CDash.html - https://public.kitware.com/Wiki/CDash:Administration#Creating_a_project (project settings) - https://cmake.org/cmake/help/latest/guide/tutorial/Adding%20Support%20for%20a%20Testing%20Dashboard.html --------- Co-authored-by: Axel Huebl --- .azure-pipelines.yml | 12 ++++++++++-- CMakeLists.txt | 3 ++- CTestConfig.cmake | 18 ++++++++++++++++++ 3 files changed, 30 insertions(+), 3 deletions(-) create mode 100644 CTestConfig.cmake diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index 28c4e03d102..badedcb994c 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -155,8 +155,16 @@ jobs: - bash: | # set options set -o nounset errexit pipefail - # run tests (exclude pytest.AMReX when running Python tests) - ctest --test-dir build --output-on-failure -E AMReX + # determine if the build was triggered by a push to the development branch + if [[ "$(Build.SourceBranch)" == "refs/heads/development" ]]; then + # run tests (exclude pytest.AMReX when running Python tests) + # and submit results to CDash as Experimental + ctest --test-dir build --output-on-failure -E AMReX \ + -D ExperimentalTest -D ExperimentalSubmit + else + # run tests (exclude pytest.AMReX when running Python tests) + ctest --test-dir build --output-on-failure -E AMReX + fi displayName: 'Test' - bash: | diff --git a/CMakeLists.txt b/CMakeLists.txt index 90771cbbb29..f1dcece8ce1 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -193,7 +193,8 @@ if(WarpX_FFT) set(ABLASTR_FFT ON CACHE STRING "FFT-based solvers" FORCE) endif() -# this defined the variable BUILD_TESTING which is ON by default +# Define the variable BUILD_TESTING (ON by default), +# include CDash dashboard testing module include(CTest) diff --git a/CTestConfig.cmake b/CTestConfig.cmake new file mode 100644 index 00000000000..938d2a4f518 --- /dev/null +++ b/CTestConfig.cmake @@ -0,0 +1,18 @@ +## This file should be placed in the root directory of your project. +## Then modify the CMakeLists.txt file in the root directory of your +## project to incorporate the testing dashboard. +## +## # The following are required to submit to the CDash dashboard: +## ENABLE_TESTING() +## INCLUDE(CTest) + +set(CTEST_PROJECT_NAME WarpX) +set(CTEST_NIGHTLY_START_TIME 08:00:00 UTC) + +set(CTEST_SUBMIT_URL https://my.cdash.org/submit.php?project=WarpX) + +set(CTEST_DROP_SITE_CDASH TRUE) + +# Additional settings +set(CTEST_SITE "Azure-Pipelines") +set(CTEST_BUILD_NAME "CI-Development") From 1b2afd835818a3bce7f38ba42f6bfe9ec00e4553 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 24 Jan 2025 15:28:11 -0800 Subject: [PATCH 183/278] Docs: configure conda to use only `conda-forge` (#5569) Requested by @ax3l: > We might also need to rework our conda docs, to change the default channel priorities to only use `conda-forge`. By default, many people have the `anaconda` repos in them too and, although we do not use them, this can break the build in this situation: some labs now block them in their firewall due to recent events, and then the installer just aborts instead of trying the next channel in the list. Very easy to do, I think we just need to prepend `conda-forge` in the channels (instead of the CLI) or add a guide to install Conda/Mamba from Conda-Forge/Minimamba instead of Anaconda. This PR adds instructions to: 1. add `conda-forge` to the top of the channel list; 2. set the channel priority to strict. Hopefully this should be enough? --------- Co-authored-by: Axel Huebl --- Docs/source/install/dependencies.rst | 25 ++++++++++++------------- Docs/source/install/users.rst | 25 ++++++++++++------------- 2 files changed, 24 insertions(+), 26 deletions(-) diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index dcad6a00869..facaa3a5614 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -52,27 +52,26 @@ For all other systems, we recommend to use a **package dependency manager**: Pick *one* of the installation methods below to install all dependencies for WarpX development in a consistent manner. -Conda (Linux/macOS/Windows) ---------------------------- +Conda-Forge (Linux/macOS/Windows) +--------------------------------- -`Conda `__/`Mamba `__ are cross-compatible, user-level package managers. +`Conda-Forge `__ is a repository for cross-compatible, user-level packages. .. tip:: - We recommend to configure your conda to use the faster ``libmamba`` `dependency solver `__. + We recommend to deactivate that conda self-activates its ``base`` environment. + This `avoids interference with the system and other package managers `__. .. code-block:: bash - conda update -y -n base conda - conda install -y -n base conda-libmamba-solver - conda config --set solver libmamba + conda config --set auto_activate_base false - We recommend to deactivate that conda self-activates its ``base`` environment. - This `avoids interference with the system and other package managers `__. + In order to make sure that the conda configuration uses ``conda-forge`` as the only channel, which will help avoid issues with blocked ``defaults`` or ``anaconda`` repositories, please set the following configurations: .. code-block:: bash - conda config --set auto_activate_base false + conda config --add channels conda-forge + conda config --set channel_priority strict .. tab-set:: @@ -104,19 +103,19 @@ For OpenMP support, you will further need: .. code-block:: bash - conda install -c conda-forge libgomp + mamba install -c conda-forge libgomp .. tab-item:: macOS or Windows .. code-block:: bash - conda install -c conda-forge llvm-openmp + mamba install -c conda-forge llvm-openmp For Nvidia CUDA GPU support, you will need to have `a recent CUDA driver installed `__ or you can lower the CUDA version of `the Nvidia cuda package `__ and `conda-forge to match your drivers `__ and then add these packages: .. code-block:: bash - conda install -c nvidia -c conda-forge cuda cuda-nvtx-dev cupy + mamba install -c nvidia -c conda-forge cuda cuda-nvtx-dev cupy More info for `CUDA-enabled ML packages `__. diff --git a/Docs/source/install/users.rst b/Docs/source/install/users.rst index e56d2d8ac43..47378bbf6d6 100644 --- a/Docs/source/install/users.rst +++ b/Docs/source/install/users.rst @@ -38,36 +38,35 @@ If want to use WarpX on a specific high-performance computing (HPC) systems, jum .. image:: conda.svg -Using the Conda Package ------------------------ +Using the Conda-Forge Package +----------------------------- -A package for WarpX is available via the `Conda `_ package manager. +A package for WarpX is available via `Conda-Forge `__. .. tip:: - We recommend to configure your conda to use the faster ``libmamba`` `dependency solver `__. + We recommend to deactivate that conda self-activates its ``base`` environment. + This `avoids interference with the system and other package managers `__. .. code-block:: bash - conda update -y -n base conda - conda install -y -n base conda-libmamba-solver - conda config --set solver libmamba + conda config --set auto_activate_base false - We recommend to deactivate that conda self-activates its ``base`` environment. - This `avoids interference with the system and other package managers `__. + In order to make sure that the conda configuration uses ``conda-forge`` as the only channel, which will help avoid issues with blocked ``defaults`` or ``anaconda`` repositories, please set the following configurations: .. code-block:: bash - conda config --set auto_activate_base false + conda config --add channels conda-forge + conda config --set channel_priority strict .. code-block:: bash - conda create -n warpx -c conda-forge warpx - conda activate warpx + mamba create -n warpx -c conda-forge warpx + mamba activate warpx .. note:: - The ``warpx`` `conda package `__ does not yet provide GPU support. + The ``warpx`` package on conda-forge does not yet provide `GPU support `__. .. _install-spack: From b9c31fd55008c031769e635b04e03e94111f7a83 Mon Sep 17 00:00:00 2001 From: Arianna Formenti Date: Fri, 24 Jan 2025 15:32:18 -0800 Subject: [PATCH 184/278] Docs and issue templates: clarify usage of issues and discussions (#5527) This PR adds guidance and best practices on how to use issues and discussions. This is useful to avoid issues that should be discussions, for example. --------- Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Co-authored-by: Axel Huebl --- .github/ISSUE_TEMPLATE/blank_issue.md | 14 +++++++++++++ .github/ISSUE_TEMPLATE/config.yml | 2 +- CONTRIBUTING.rst | 30 +++++++++++++++++++++++++++ 3 files changed, 45 insertions(+), 1 deletion(-) create mode 100644 .github/ISSUE_TEMPLATE/blank_issue.md diff --git a/.github/ISSUE_TEMPLATE/blank_issue.md b/.github/ISSUE_TEMPLATE/blank_issue.md new file mode 100644 index 00000000000..2d5216c8fc8 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/blank_issue.md @@ -0,0 +1,14 @@ +--- +name: Blank issue +about: Ask us a question +labels: [question] +--- + +Are you here because you have something to report that is neither a bug, a new feature, nor an installation problem? + +Before opening this issue, consider opening a [discussion](https://github.com/ECP-WarpX/WarpX/discussions) instead! + +Issues are used to report bugs, installation problems or to request new features. +Discussions are used to ask more open-ended questions, brainstorm, ask our feedback, etc. + +You can find more details on how to use issues and discussions [here](https://github.com/ECP-WarpX/WarpX/blob/development/CONTRIBUTING.rst). diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml index 0086358db1e..3ba13e0cec6 100644 --- a/.github/ISSUE_TEMPLATE/config.yml +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -1 +1 @@ -blank_issues_enabled: true +blank_issues_enabled: false diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 77b8200b0d5..3affc9f4aaa 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -12,6 +12,36 @@ Git workflow The WarpX project uses `git `_ for version control. If you are new to git, you can follow `this tutorial `__. + +What to do when +^^^^^^^^^^^^^^^ + +Issues +"""""" + +`Issues `__ are used to track tasks that the contributors and/or maintainers can work on. +Use issues for reporting bugs or installation problems and for requesting new features. + +If you've found a bug and wish to report it, first search the open issues and `pull requests `__ to see if someone else has already reported the same thing. +If it's something new, open an issue using a template. +We'll use the issue to address the problem you've encountered. + +Discussions +""""""""""" + +`Discussions `__ are for open-ended conversations, general questions, brainstorming ideas. +Please, use discussions if you want to ask us something that is not technically a bug or a feature. +Feel free to ping us there! + +Pull Requests (PRs) +""""""""""""""""""" + +Open a `pull request `__ if you want to add a new feature yourself. +Follow the guide below for more details. + + +Thank you for contributing! 🥰 + Configure your GitHub Account & Development Machine ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From 1d5f3c4d2bbf6cdcb5da7352f04b1ced18e64799 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 24 Jan 2025 15:38:43 -0800 Subject: [PATCH 185/278] Docs: improve "Workflows" section for developers (#5525) Suggestion for reviewers: Compare the preview of the new documentation available from the CI check `docs/readthedocs.org:warpx` with the existing documentation available online. Improve the [Workflows](https://warpx.readthedocs.io/en/latest/developers/workflows.html) documentation for developers, so far only the following sections: - "How to test the code" - "How to run the clang-tidy linter" To-do: - [x] Incorporate minimal checksum documentation in one single test section - [x] Add step-by-step guide on how to add new tests (enumerated list of steps) --- ...compile.rst => how_to_compile_locally.rst} | 4 +- Docs/source/developers/how_to_guides.rst | 13 ++ .../{profiling.rst => how_to_profile.rst} | 4 +- .../developers/how_to_run_clang_tidy.rst | 51 +++++ .../{testing.rst => how_to_test.rst} | 212 ++++++++---------- ...entation.rst => how_to_write_the_docs.rst} | 4 +- .../developers/run_clang_tidy_locally.rst | 55 ----- Docs/source/developers/workflows.rst | 14 -- Docs/source/index.rst | 2 +- 9 files changed, 169 insertions(+), 190 deletions(-) rename Docs/source/developers/{local_compile.rst => how_to_compile_locally.rst} (99%) create mode 100644 Docs/source/developers/how_to_guides.rst rename Docs/source/developers/{profiling.rst => how_to_profile.rst} (99%) create mode 100644 Docs/source/developers/how_to_run_clang_tidy.rst rename Docs/source/developers/{testing.rst => how_to_test.rst} (51%) rename Docs/source/developers/{documentation.rst => how_to_write_the_docs.rst} (98%) delete mode 100644 Docs/source/developers/run_clang_tidy_locally.rst delete mode 100644 Docs/source/developers/workflows.rst diff --git a/Docs/source/developers/local_compile.rst b/Docs/source/developers/how_to_compile_locally.rst similarity index 99% rename from Docs/source/developers/local_compile.rst rename to Docs/source/developers/how_to_compile_locally.rst index 8bfa033a92d..9fc1b397b78 100644 --- a/Docs/source/developers/local_compile.rst +++ b/Docs/source/developers/how_to_compile_locally.rst @@ -1,7 +1,7 @@ .. _developers-local-compile: -Fast, Local Compilation -======================= +How to compile locally and fast +=============================== For simplicity, WarpX :ref:`compilation with CMake ` by default downloads, configures and compiles compatible versions of :ref:`central dependencies ` such as: diff --git a/Docs/source/developers/how_to_guides.rst b/Docs/source/developers/how_to_guides.rst new file mode 100644 index 00000000000..093641e01b0 --- /dev/null +++ b/Docs/source/developers/how_to_guides.rst @@ -0,0 +1,13 @@ +.. _development-howtoguides: + +How-to guides +============= + +.. toctree:: + :maxdepth: 1 + + how_to_profile + how_to_test + how_to_run_clang_tidy + how_to_compile_locally + how_to_write_the_docs diff --git a/Docs/source/developers/profiling.rst b/Docs/source/developers/how_to_profile.rst similarity index 99% rename from Docs/source/developers/profiling.rst rename to Docs/source/developers/how_to_profile.rst index 5acea786920..e756d8362c2 100644 --- a/Docs/source/developers/profiling.rst +++ b/Docs/source/developers/how_to_profile.rst @@ -1,7 +1,7 @@ .. _developers-profiling: -Profiling the Code -================== +How to profile the code +======================= Profiling allows us to find the bottle-necks of the code as it is currently implemented. Bottle-necks are the parts of the code that may delay the simulation, making it more computationally expensive. diff --git a/Docs/source/developers/how_to_run_clang_tidy.rst b/Docs/source/developers/how_to_run_clang_tidy.rst new file mode 100644 index 00000000000..bbcd7b80130 --- /dev/null +++ b/Docs/source/developers/how_to_run_clang_tidy.rst @@ -0,0 +1,51 @@ +.. _developers-run_clang_tidy_locally: + +How to run the clang-tidy linter +================================ + +WarpX's CI tests include several checks performed with the `clang-tidy `__ linter. +The complete list of checks performed is defined in the ``.clang-tidy`` configuration file. + +.. dropdown:: clang-tidy configuration file + :color: light + :icon: info + :animate: fade-in-slide-down + + .. literalinclude:: ../../../.clang-tidy + :language: yaml + +Under `Tools/Linter `__, the script ``runClangTidy.sh`` can be used to run the clang-tidy linter locally. + +.. dropdown:: clang-tidy local run script + :color: light + :icon: info + :animate: fade-in-slide-down + + .. literalinclude:: ../../../Tools/Linter/runClangTidy.sh + :language: bash + +It is a prerequisite that WarpX is compiled following the instructions that you find in our :ref:`Users ` or :ref:`Developers ` sections. + +The script generates a wrapper to ensure that clang-tidy is only applied to WarpX source files and compiles WarpX in 1D, 2D, 3D, and RZ geometry, using such wrapper. + +By default WarpX is compiled in single precision with PSATD solver, QED module, QED table generator and embedded boundary in order to ensure broader coverage with the clang-tidy tool. + +Few optional environment variables can be set to tune the behavior of the script: + +* ``WARPX_TOOLS_LINTER_PARALLEL``: set the number of cores used for compilation; + +* ``CLANG``, ``CLANGXX``, and ``CLANGTIDY``: set the version of the compiler and the linter. + +For continuous integration we currently use clang version 15.0.0 and it is recommended to use this version locally as well. +A newer version may find issues not currently covered by CI tests (checks are opt-in), while older versions may not find all the issues. + +Here's an example of how to run the script after setting the appropriate environment variables: + +.. code-block:: bash + + export WARPX_TOOLS_LINTER_PARALLEL=12 + export CLANG=clang-15 + export CLANGXX=clang++-15 + export CLANGTIDY=clang-tidy-15 + + ./Tools/Linter/runClangTidy.sh diff --git a/Docs/source/developers/testing.rst b/Docs/source/developers/how_to_test.rst similarity index 51% rename from Docs/source/developers/testing.rst rename to Docs/source/developers/how_to_test.rst index 57194b54642..12dc653ac61 100644 --- a/Docs/source/developers/testing.rst +++ b/Docs/source/developers/how_to_test.rst @@ -1,35 +1,40 @@ .. _developers-testing: -Testing the Code -================ +How to test the code +==================== -When proposing a code change, you want to make sure that +When you propose code changes, you want to make sure that -* the code change does not break the existing code; -* the code change gives correct results (numerics, physics, etc.). +* the code changes do not break the behavior of the rest of the code; +* the code changes give correct results (numerics, physics, etc.). -WarpX follows the continuous integration (CI) software development practice, where automated builds and tests are run after merging code changes into the main branch. - -While the code is tested regularly remotely (on the cloud when commits are pushed to an open PR, and every night on local clusters), it can also be useful to run tests on your custom input file. +Following the continuous integration (CI) software development practice, WarpX runs automated builds and tests after a commit is pushed to an open PR as well as after a PR is merged into the main branch. How to run pre-commit tests locally ----------------------------------- -First, when proposing a code change, we perform a couple of automated style and correctness checks. +First, WarpX uses `pre-commit `__ to perform automated style and correctness checks. + +Here is how to install ``pre-commit`` locally: + +#. Install ``pre-commit``: + + .. code-block:: sh + + python -m pip install -U pre-commit -If you install the ``pre-commit`` tool on your local machine via +#. Install the git hook scripts: -.. code-block:: sh + .. code-block:: sh - python -m pip install -U pre-commit - pre-commit install + pre-commit install -the style and correctness checks will run automatically on your local machine, after you commit the change and before you push. +If you install ``pre-commit`` locally, the style and correctness checks will run automatically on your computer, after you commit the code changes and before you push them to the remote repository. -If you do not install the ``pre-commit`` tool on your local machine, these checks will run automatically as part of our CI workflows and a commit containing style and correctness changes might be added automatically to your branch. -In that case, you will need to pull that automated commit before pushing further changes. +If you do not install ``pre-commit`` locally, these checks will run automatically as part of our CI workflows and a commit containing style and correctness changes might be added automatically to your branch after you have pushed your own commit. +In that case, you will need to pull that automated commit before pushing further commits. -See `pre-commit.com `__ and our ``.pre-commit-config.yaml`` file in the repository for more details. +The configuration options for ``pre-commit`` are set in the `pre-commit-config.yaml `__ file. How to configure the automated tests ------------------------------------ @@ -43,11 +48,11 @@ A test that requires a build option that was not configured and built will be sk How to run automated tests locally ---------------------------------- -Once your new feature is ready, there are ways to check that you did not break anything. +Once your code changes are ready, there are ways to check that they do not break the rest of the code. WarpX has automated tests running every time a commit is pushed to an open pull request. The input files and scripts used by the automated tests can be found in the `Examples `__ directory, either under `Physics_applications `__ or `Tests `__. -For easier debugging, it can be convenient to run the tests on your local machine by executing CTest as illustrated in the examples below (where we assume that WarpX was configured and built in the directory ``build``): +For easier debugging, it can be convenient to run the tests on your local computer by executing CTest as illustrated in the examples below (where we assume that WarpX was configured and built in the directory ``build``): * List tests available for the current build options: @@ -101,7 +106,7 @@ For easier debugging, it can be convenient to run the tests on your local machin ctest --test-dir build -R "test_3d_langmuir_multi\..*" Note that filtering with ``-R "test_3d_langmuir_multi"`` would include the additional tests that have the same substring in their name and would not be sufficient to isolate a single test. - Note also that the escaping ``\.`` in the regular expression is necessary in order to take into account the fact that each test is automatically appended with the strings ``.run``, ``.analysis`` and possibly ``.cleanup``. + Note also that the escaping ``\.`` in the regular expression is necessary in order to take into account the fact that each test is automatically appended with the strings ``.run``, ``.analysis``, ``.checksum`` and possibly ``.cleanup``. * Run only tests not labeled with the ``slow`` label: @@ -120,74 +125,56 @@ How to add automated tests An automated test typically consists of the following components: * input file or PICMI input script; + * analysis script; -* checksum file. -To learn more about how to use checksums in automated tests, please see the corresponding section :ref:`Checksums on Tests `. +* checksum file. As mentioned above, the input files and scripts used by the automated tests can be found in the `Examples `__ directory, under either `Physics_applications `__ or `Tests `__. Each test directory must contain a file named ``CMakeLists.txt`` where all tests associated with the input files and scripts in that directory must be listed. -A new test can be added by adding a corresponding entry in ``CMakeLists.txt`` as illustrated in the examples below: - -* Add the **regular test** ``test_1d_laser_acceleration``: +A checksum file is a file that contains reference values obtained by computing a chosen checksum for a set of fields. +More precisely, we compute the sums of the absolute values of the arrays corresponding to each field from the results produced by the automated test and compare these checksums with the reference ones stored in the checksum file of that test, with respect to specific tolerances. +This is expected to be sensitive enough to make the automated test fail if the code changes cause significant differences in the final results, thus catching possible bugs. - .. code-block:: cmake +A new test can be added by calling the function ``add_warpx_test`` in ``CMakeLists.txt``. The function has the following signature: - add_warpx_test( - test_1d_laser_acceleration # name - 1 # dims - 2 # nprocs - inputs_test_1d_laser_acceleration # inputs - "analysis.py diags/diag1000100" # analysis - "analysis_default_regression.py --path diags/diag1000100" # checksum - OFF # dependency - ) +.. code-block:: cmake -* Add the **PICMI test** ``test_2d_laser_acceleration_picmi``: + function(add_warpx_test + name # unique test name: + # test_1d_example, test_2d_example_picmi, etc. + dims # dimensionality: 1, 2, 3, RZ + nprocs # number of processes: 1, 2 + inputs # inputs file or PICMI script: + # inputs_test_1d_example, inputs_test_2d_example_picmi.py, "inputs_test_2d_example_picmi.py arg1 arg2", etc. + analysis # custom test analysis command: + # OFF, "analysis.py", "analysis.py arg1 arg2", etc. + checksum # default regression analysis command: + # OFF, "analysis_default_regression.py --path diags/diag1", etc. + dependency # name of base test that must run first (must match name exactly): + # OFF, test_1d_example_prepare, etc. + ) - .. code-block:: cmake +Here's how to add an automated test: - add_warpx_test( - test_2d_laser_acceleration_picmi # name - 2 # dims - 2 # nprocs - inputs_test_2d_laser_acceleration_picmi.py # inputs - "analysis.py diags/diag1000100" # analysis - "analysis_default_regression.py --path diags/diag1000100" # checksum - OFF # dependency - ) +#. Choose the test directory, either an existing one or a new one. -* Add the **restart test** ``test_3d_laser_acceleration_restart``: +#. Add an input file or PICMI input script. + The name must follow the naming conventions described in the section :ref:`developers-testing-naming` below. - .. code-block:: cmake +#. Add a Python analysis script to analyze the results of the test. - add_warpx_test( - test_3d_laser_acceleration_restart # name - 3 # dims - 2 # nprocs - inputs_test_3d_laser_acceleration_restart # inputs - "analysis_default_restart.py diags/diag1000100" # analysis - "analysis_default_regression.py --path diags/diag1000100" # checksum - test_3d_laser_acceleration # dependency - ) +#. Add the test to the ``CMakeLists.txt`` file (add such file if you are adding the test in a new test directory) using the function ``add_warpx_test`` mentioned above. - Note that the restart has an explicit dependency, namely it can run only provided that the original test, from which the restart checkpoint files will be read, runs first. +#. If the test directory is new, add the directory with the command ``add_subdirectory`` in `Physics_applications/CMakeLists.txt `__ or `Tests/CMakeLists.txt `__, depending on where the test directory is located. -* A more complex example. Add the **PICMI test** ``test_rz_laser_acceleration_picmi``, with custom command-line arguments ``--test`` and ``dir``, openPMD time series output, and custom command line arguments for the checksum comparison: +#. If the test directory is new, make a symbolic link to the default regression analysis script ``analysis_default_regression.py`` from `Examples/analysis_default_regression.py `__, by running ``ln -s ../../analysis_default_regression.py analysis_default_regression.py`` from the test directory. - .. code-block:: cmake +#. Run the test locally with ``ctest``, after setting the environment variable ``CHECKSUM_RESET=ON``, in order to generate automatically the checksum file. - add_warpx_test( - test_rz_laser_acceleration_picmi # name - RZ # dims - 2 # nprocs - "inputs_test_rz_laser_acceleration_picmi.py --test --dir 1" # inputs - "analysis.py diags/diag1/" # analysis - "analysis_default_regression.py --path diags/diag1/ --skip-particles --rtol 1e-7" # checksum - OFF # dependency - ) +Once you have added the test, run the test locally again, after resetting ``CHECKSUM_RESET=OFF``, to check that everything works as expected. The ``analysis`` and ``checksum`` commands passed as arguments to ``add_warpx_test`` can be set to ``OFF`` if the intention is to skip the respective analysis for a given test. @@ -196,14 +183,6 @@ If you need a new Python package dependency for testing, please add it in `Regre Sometimes two or more tests share a large number of input parameters. The shared input parameters can be collected in a "base" input file that can be passed as a runtime parameter in the actual test input files through the parameter ``FILE``. -If the new test is added in a new directory that did not exist before, please add the name of that directory with the command ``add_subdirectory`` in `Physics_applications/CMakeLists.txt `__ or `Tests/CMakeLists.txt `__, depending on where the new test directory is located. - -If not already present, the default regression analysis script ``analysis_default_regression.py`` in the examples above must be linked from `Examples/analysis_default_regression.py `__, by executing once the following command from the test directory: - - .. code-block:: bash - - ln -s ../../analysis_default_regression.py analysis_default_regression.py - Here is the help message of the default regression analysis script, including usage and list of available options and arguments: .. code-block:: bash @@ -216,6 +195,42 @@ Here is the help message of the default regression analysis script, including us --skip-fields skip fields when comparing checksums --skip-particles skip particles when comparing checksums +How to reset checksums locally +------------------------------ + +It is possible to reset a checksum file locally by running the corresponding test with ``ctest`` with the environment variable ``CHECKSUM_RESET=ON``. For example: + + .. code-block:: bash + + CHECKSUM_RESET=ON ctest --test-dir build -R laser_acceleration + +Alternatively, it is also possible to reset multiple checksum files using the output of our Azure pipelines, which can be useful for code changes that result in resetting a large numbers of checksum files. +Here's how to do so: + +#. On the GitHub page of the pull request, find (one of) the pipeline(s) failing due to checksum regressions and click on "Details" (highlighted in blue). + + .. figure:: https://gist.github.com/user-attachments/assets/09db91b9-5711-4250-8b36-c52a6049e38e + +#. In the new page that opens up, click on "View more details on Azure pipelines" (highlighted in blue). + + .. figure:: https://gist.github.com/user-attachments/assets/ab0c9a24-5518-4da7-890f-d79fa1c8de4c + +#. In the new page that opens up, select the group of tests for which you want to reset the checksum files (e.g., ``cartesian_3d``) and click on "View raw log". + + .. figure:: https://gist.github.com/user-attachments/assets/06c1fe27-2c13-4bd3-b6b8-8b8941b37889 + +#. Save the raw log as a text file on your computer. + +#. Go to the directory `Tools/DevUtils `__ and run the Python script `update_benchmarks_from_azure_output.py `__ passing the path of the raw log text file as a command line argument: + + .. code:: bash + + python update_benchmarks_from_azure_output.py path/to/raw_log.txt + + This will update the checksum files for all the tests in the raw log that did not pass the checksum analysis. + +.. _developers-testing-naming: + Naming conventions for automated tests -------------------------------------- @@ -231,40 +246,9 @@ Note that we currently obey the following snake\_case naming conventions for tes #. **Base input files** (that is, files collecting input parameters shared between two or more tests) are typically named ``inputs_base_1d``, ``inputs_base_2d``, ``inputs_base_3d`` or ``inputs_base_rz``, possibly followed by additional strings if need be. -Useful tool for plotfile comparison: ``fcompare`` -------------------------------------------------- - -AMReX provides ``fcompare``, an executable that takes two ``plotfiles`` as input and returns the absolute and relative difference for each field between these two plotfiles. For some changes in the code, it is very convenient to run the same input file with an old and your current version, and ``fcompare`` the plotfiles at the same iteration. To use it: - -.. code-block:: sh - - # Compile the executable - cd /Tools/Plotfile/ # This may change - make -j 8 - # Run the executable to compare old and new versions - /Tools/Plotfile/fcompare.gnu.ex old/plt00200 new/plt00200 - -which should return something like - -.. code-block:: sh - - variable name absolute error relative error - (||A - B||) (||A - B||/||A||) - ---------------------------------------------------------------------------- - level = 0 - jx 1.044455105e+11 1.021651316 - jy 4.08631977e+16 7.734299273 - jz 1.877301764e+14 1.073458933 - Ex 4.196315448e+10 1.253551615 - Ey 3.330698083e+12 6.436470137 - Ez 2.598167798e+10 0.6804387128 - Bx 273.8687473 2.340209782 - By 152.3911863 1.10952567 - Bz 37.43212767 2.1977289 - part_per_cell 15 0.9375 - Ex_fp 4.196315448e+10 1.253551615 - Ey_fp 3.330698083e+12 6.436470137 - Ez_fp 2.598167798e+10 0.6804387128 - Bx_fp 273.8687473 2.340209782 - By_fp 152.3911863 1.10952567 - Bz_fp 37.43212767 2.1977289 +Other resources +--------------- + +With regard to testing the code more generally, not necessarily in the context of continuous integration, AMReX provides a number of useful post-processing tools for plotfiles. +The complete list of tools can be found `here `__. +One tool that traditionally stood out as especially useful for core developers and maintainers is `fcompare `__. diff --git a/Docs/source/developers/documentation.rst b/Docs/source/developers/how_to_write_the_docs.rst similarity index 98% rename from Docs/source/developers/documentation.rst rename to Docs/source/developers/how_to_write_the_docs.rst index 5d604bcf9b3..827b5950d80 100644 --- a/Docs/source/developers/documentation.rst +++ b/Docs/source/developers/how_to_write_the_docs.rst @@ -1,7 +1,7 @@ .. _developers-docs: -Documentation -============= +How to write documentation +========================== Doxygen documentation --------------------- diff --git a/Docs/source/developers/run_clang_tidy_locally.rst b/Docs/source/developers/run_clang_tidy_locally.rst deleted file mode 100644 index 3f600019fe7..00000000000 --- a/Docs/source/developers/run_clang_tidy_locally.rst +++ /dev/null @@ -1,55 +0,0 @@ -.. _developers-run_clang_tidy_locally: - -The clang-tidy linter -===================== - -Clang-tidy CI test ------------------- - -WarpX's CI tests include several checks performed with the -`clang-tidy `__ linter -(currently the version 15 of this tool). The complete list of checks -enforced in CI tests can be found in the ``.clang-tidy`` configuration file. - -.. dropdown:: clang-tidy configuration file - :color: light - :icon: info - :animate: fade-in-slide-down - - .. literalinclude:: ../../../.clang-tidy - :language: yaml - -Run clang-tidy linter locally ------------------------------ - -We provide a script to run clang-tidy locally. The script can be run as follows, -provided that all the requirements to compile WarpX are met (see `building from source `). -The script generates a simple wrapper to ensure that `clang-tidy` is only applied to WarpX source files -and compiles WarpX in 1D,2D,3D, and RZ using such wrapper. By default WarpX is compiled in single precision -with PSATD solver, QED module, QED table generator and Embedded boundary in order to find more -potential issues with the `clang-tidy` tool. - -Few optional environment variables can be set to tune the behavior of the script: - -* ``WARPX_TOOLS_LINTER_PARALLEL``: sets the number of cores to be used for the compilation -* ``CLANG``, ``CLANGXX``, and ``CLANGTIDY`` : set the version of the compiler and of the linter - -Note: clang v15 is currently used in CI tests. It is therefore recommended to use this version. -Otherwise, a newer version may find issues not currently covered by CI tests (checks are opt-in) -while older versions may not find all the issues. - -.. code-block:: bash - - export WARPX_TOOLS_LINTER_PARALLEL=12 - export CLANG=clang-15 - export CLANGXX=clang++-15 - export CLANGTIDY=clang-tidy-15 - ./Tools/Linter/runClangTidy.sh - -.. dropdown:: Script Details - :color: light - :icon: info - :animate: fade-in-slide-down - - .. literalinclude:: ../../../Tools/Linter/runClangTidy.sh - :language: bash diff --git a/Docs/source/developers/workflows.rst b/Docs/source/developers/workflows.rst deleted file mode 100644 index f7c81ae70d8..00000000000 --- a/Docs/source/developers/workflows.rst +++ /dev/null @@ -1,14 +0,0 @@ -.. _development-workflows: - -Workflows -========= - -.. toctree:: - :maxdepth: 1 - - profiling - testing - checksum - run_clang_tidy_locally - local_compile - documentation diff --git a/Docs/source/index.rst b/Docs/source/index.rst index 9668620976a..dfdeb3b9530 100644 --- a/Docs/source/index.rst +++ b/Docs/source/index.rst @@ -126,7 +126,7 @@ Development developers/developers developers/doxygen developers/gnumake - developers/workflows + developers/how_to_guides developers/faq .. good to have in the future: .. developers/repostructure From 4eccf4dffaab605d6919e8c2b770669eea3d9fc7 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 24 Jan 2025 19:13:24 -0800 Subject: [PATCH 186/278] Doc: Workflows -> How-To (#5608) Use the more understandable name for this section. Follow-up to #5525 --- Docs/source/dataanalysis/workflows.rst | 5 +++-- Docs/source/developers/how_to_guides.rst | 2 +- Docs/source/usage/workflows.rst | 5 +++-- 3 files changed, 7 insertions(+), 5 deletions(-) diff --git a/Docs/source/dataanalysis/workflows.rst b/Docs/source/dataanalysis/workflows.rst index dc611b00e53..ab4ca88ddab 100644 --- a/Docs/source/dataanalysis/workflows.rst +++ b/Docs/source/dataanalysis/workflows.rst @@ -1,7 +1,8 @@ +.. _dataanalysis-how-to: .. _dataanalysis-workflows: -Workflows -========= +How-To Guides +============= This section collects typical user workflows and best practices for data analysis with WarpX. diff --git a/Docs/source/developers/how_to_guides.rst b/Docs/source/developers/how_to_guides.rst index 093641e01b0..25ed4f2d30d 100644 --- a/Docs/source/developers/how_to_guides.rst +++ b/Docs/source/developers/how_to_guides.rst @@ -1,6 +1,6 @@ .. _development-howtoguides: -How-to guides +How-To Guides ============= .. toctree:: diff --git a/Docs/source/usage/workflows.rst b/Docs/source/usage/workflows.rst index 5c5329e18b8..fa2f73b35d0 100644 --- a/Docs/source/usage/workflows.rst +++ b/Docs/source/usage/workflows.rst @@ -1,7 +1,8 @@ +.. _usage-how-to: .. _usage-workflows: -Workflows -========= +How-To Guides +============= This section collects typical user workflows and best practices for WarpX. From 480186ab55a49023730b05886d3a84ee4163479b Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Tue, 28 Jan 2025 02:30:45 +0100 Subject: [PATCH 187/278] WarpX class: make `verboncoeur_axis_correction` a private member variable (#5604) This PR changes `verboncoeur_axis_correction` from a static WarpX class variable to a private member variable (renamed `m_verboncoeur_axis_correction`). This is a small step towards reducing the use of static variables in the WarpX class. --- Source/FieldSolver/WarpXPushFieldsEM.cpp | 8 ++++---- Source/WarpX.H | 12 ++++++------ Source/WarpX.cpp | 4 +--- 3 files changed, 11 insertions(+), 13 deletions(-) diff --git a/Source/FieldSolver/WarpXPushFieldsEM.cpp b/Source/FieldSolver/WarpXPushFieldsEM.cpp index 139988b69a2..7e04f1c2b15 100644 --- a/Source/FieldSolver/WarpXPushFieldsEM.cpp +++ b/Source/FieldSolver/WarpXPushFieldsEM.cpp @@ -1331,7 +1331,7 @@ void WarpX::DampFieldsInGuards(const int lev, amrex::MultiFab* mf) // It is faster to apply this on the grid than to do it particle by particle. // It is put here since there isn't another nice place for it. void -WarpX::ApplyInverseVolumeScalingToCurrentDensity (MultiFab* Jx, MultiFab* Jy, MultiFab* Jz, int lev) +WarpX::ApplyInverseVolumeScalingToCurrentDensity (MultiFab* Jx, MultiFab* Jy, MultiFab* Jz, int lev) const { const amrex::IntVect ngJ = Jx->nGrowVect(); const std::array& dx = WarpX::CellSize(lev); @@ -1340,7 +1340,7 @@ WarpX::ApplyInverseVolumeScalingToCurrentDensity (MultiFab* Jx, MultiFab* Jy, Mu constexpr int NODE = amrex::IndexType::NODE; // See Verboncoeur JCP 174, 421-427 (2001) for the modified volume factor - const amrex::Real axis_volume_factor = (verboncoeur_axis_correction ? 1._rt/3._rt : 1._rt/4._rt); + const amrex::Real axis_volume_factor = (m_verboncoeur_axis_correction ? 1._rt/3._rt : 1._rt/4._rt); for ( MFIter mfi(*Jx, TilingIfNotGPU()); mfi.isValid(); ++mfi ) { @@ -1504,7 +1504,7 @@ WarpX::ApplyInverseVolumeScalingToCurrentDensity (MultiFab* Jx, MultiFab* Jy, Mu } void -WarpX::ApplyInverseVolumeScalingToChargeDensity (MultiFab* Rho, int lev) +WarpX::ApplyInverseVolumeScalingToChargeDensity (MultiFab* Rho, int lev) const { const amrex::IntVect ngRho = Rho->nGrowVect(); const std::array& dx = WarpX::CellSize(lev); @@ -1513,7 +1513,7 @@ WarpX::ApplyInverseVolumeScalingToChargeDensity (MultiFab* Rho, int lev) constexpr int NODE = amrex::IndexType::NODE; // See Verboncoeur JCP 174, 421-427 (2001) for the modified volume factor - const amrex::Real axis_volume_factor = (verboncoeur_axis_correction ? 1._rt/3._rt : 1._rt/4._rt); + const amrex::Real axis_volume_factor = (m_verboncoeur_axis_correction ? 1._rt/3._rt : 1._rt/4._rt); Box tilebox; diff --git a/Source/WarpX.H b/Source/WarpX.H index d1f3e585c4f..0efc268ba30 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -333,10 +333,6 @@ public: //! small time steps. static bool galerkin_interpolation; - //! Flag whether the Verboncoeur correction is applied to the current and charge density - //! on the axis when using RZ. - static bool verboncoeur_axis_correction; - //! If true, a bilinear filter is used to smooth charge and currents static bool use_filter; //! If true, the bilinear filtering of charge and currents is done in Fourier space @@ -623,10 +619,10 @@ public: void ApplyInverseVolumeScalingToCurrentDensity(amrex::MultiFab* Jx, amrex::MultiFab* Jy, amrex::MultiFab* Jz, - int lev); + int lev) const; void ApplyInverseVolumeScalingToChargeDensity(amrex::MultiFab* Rho, - int lev); + int lev) const; #endif /** @@ -1478,6 +1474,10 @@ private: // whether to use subcycling bool m_do_subcycling = false; + //! Flag whether the Verboncoeur correction is applied to the current and charge density + //! on the axis when using RZ. + bool m_verboncoeur_axis_correction = true; + // Macroscopic properties std::unique_ptr m_macroscopic_properties; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 6c2557f71db..cad20f17983 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -153,8 +153,6 @@ int WarpX::current_centering_noz = 2; bool WarpX::use_fdtd_nci_corr = false; bool WarpX::galerkin_interpolation = true; -bool WarpX::verboncoeur_axis_correction = true; - bool WarpX::use_filter = true; bool WarpX::use_kspace_filter = true; bool WarpX::use_filter_compensation = false; @@ -726,7 +724,7 @@ WarpX::ReadParameters () #ifdef WARPX_DIM_RZ const ParmParse pp_boundary("boundary"); - pp_boundary.query("verboncoeur_axis_correction", verboncoeur_axis_correction); + pp_boundary.query("verboncoeur_axis_correction", m_verboncoeur_axis_correction); #endif // Read timestepping options From 65c4a3a2d30c6d59ab9f12142f710f365bf5a208 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 28 Jan 2025 02:09:09 +0000 Subject: [PATCH 188/278] [pre-commit.ci] pre-commit autoupdate (#5610) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.2 → v0.9.3](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.2...v0.9.3) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index fc28ca58869..bb03acf77ca 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.2 + rev: v0.9.3 hooks: # Run the linter - id: ruff From 355d7bec39ca41c14c96be06c62d677262dd276c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Maxence=20Th=C3=A9venet?= Date: Tue, 28 Jan 2025 16:31:06 +0100 Subject: [PATCH 189/278] [mini] Fix stretch factor in Gaussian initialisation w/ spatial and angular chirps (#5494) It seems to me that PR https://github.com/ECP-WarpX/WarpX/pull/2173 changed the actual terms of laser initialization in the presence of chirps. This PR proposes to revert back to the previous behavior, which I think is more correct. In this way, I think the advantage of https://github.com/ECP-WarpX/WarpX/pull/2173 regarding single-precision are preserved. --- Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp b/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp index 96d61d920f1..a1162e9a7a5 100644 --- a/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp +++ b/Source/Laser/LaserProfilesImpl/LaserProfileGaussian.cpp @@ -117,8 +117,8 @@ WarpXLaserProfiles::GaussianLaserProfile::fill_amplitude ( // Time stretching due to STCs and phi2 complex envelope // (1 if zeta=0, beta=0, phi2=0) const Complex stretch_factor = 1._rt + 4._rt * - (m_params.zeta+m_params.beta*m_params.focal_distance*inv_tau2) - * (m_params.zeta+m_params.beta*m_params.focal_distance*inv_complex_waist_2) + ((m_params.zeta+m_params.beta*m_params.focal_distance)*inv_tau2) + * ((m_params.zeta+m_params.beta*m_params.focal_distance)*inv_complex_waist_2) + 2._rt*I*(m_params.phi2-m_params.beta*m_params.beta*k0*m_params.focal_distance)*inv_tau2; // Amplitude and monochromatic oscillations From 0b10fca01587887e9588c36a941bf7ee63ba97cc Mon Sep 17 00:00:00 2001 From: Olga Shapoval <30510597+oshapoval@users.noreply.github.com> Date: Tue, 28 Jan 2025 10:39:00 -0800 Subject: [PATCH 190/278] Reduce particle shape when a particle approaches the EB (#5209) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit **Description edited by @RemiLehe** # Overview This PR reduces the particle shape to order 1, when the particle gets closer to the embedded boundary: Screenshot 2025-01-23 at 8 46 34 AM This ensures that the particle does not deposit charge in valid cells, at the time when it is removed, which in turn ensures proper charge conservation with the electromagnetic solver. # Implementation - This PR allocates and initializes a new mask `eb_reduce_particle_shape` (and `iMultiFab`) that indicates in which cells to reduce the particle shape. - The deposition kernels have been modified to use this flag. In order to make sure that this PR does not affect the performance of the deposition kernel in the absence of EB, two versions of the deposition kernel are compiled. # Tests This PR adds tests similar to the ones introduced in https://github.com/ECP-WarpX/WarpX/pull/5562 to check for charge conservation near the embedded boundary, but with higher-order shape factors: - The 2D tests fail on `development` for shape 2 and 3 but pass on this PR. - For some reason, the 3D and RZ tests only fail on `development` for shape 3 ; they do pass for this PR. It is not clear why the tests do not fail on `development` with shape 2. **Note:** For now, this PR only modifies the current deposition (and only the Esirkepov kernel). A follow-up PR will also modify the charge deposition. --------- Co-authored-by: Remi Lehe Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .../CMakeLists.txt | 71 ++++++++++++ .../analysis.py | 2 +- ...oundary_em_particle_absorption_sh_factor_2 | 11 ++ ...oundary_em_particle_absorption_sh_factor_3 | 11 ++ ...oundary_em_particle_absorption_sh_factor_2 | 11 ++ ...oundary_em_particle_absorption_sh_factor_3 | 11 ++ ...oundary_em_particle_absorption_sh_factor_2 | 11 ++ ...oundary_em_particle_absorption_sh_factor_3 | 11 ++ ...ry_em_particle_absorption_sh_factor_2.json | 24 ++++ ...ry_em_particle_absorption_sh_factor_3.json | 24 ++++ ...ed_boundary_removal_depth_sh_factor_1.json | 30 +++++ ...ed_boundary_removal_depth_sh_factor_2.json | 30 +++++ ...ed_boundary_removal_depth_sh_factor_3.json | 31 ++++++ ...ry_em_particle_absorption_sh_factor_2.json | 24 ++++ ...ry_em_particle_absorption_sh_factor_3.json | 24 ++++ ...ed_boundary_removal_depth_sh_factor_1.json | 30 +++++ ...ed_boundary_removal_depth_sh_factor_2.json | 30 +++++ ...ed_boundary_removal_depth_sh_factor_3.json | 31 ++++++ ...ry_em_particle_absorption_sh_factor_2.json | 24 ++++ ...ry_em_particle_absorption_sh_factor_3.json | 24 ++++ ...ed_boundary_removal_depth_sh_factor_1.json | 28 +++++ ...ed_boundary_removal_depth_sh_factor_2.json | 28 +++++ ...ed_boundary_removal_depth_sh_factor_3.json | 28 +++++ Source/EmbeddedBoundary/WarpXInitEB.cpp | 91 +++++++++++++++ Source/Initialization/WarpXInitData.cpp | 13 +-- Source/Parallelization/WarpXRegrid.cpp | 1 + .../Particles/Deposition/CurrentDeposition.H | 104 ++++++++++++++++-- Source/Particles/WarpXParticleContainer.cpp | 37 +++++-- Source/WarpX.H | 29 ++++- Source/WarpX.cpp | 6 + 30 files changed, 799 insertions(+), 31 deletions(-) create mode 100644 Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_2d_embedded_boundary_em_particle_absorption_sh_factor_2 create mode 100644 Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_2d_embedded_boundary_em_particle_absorption_sh_factor_3 create mode 100644 Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_3d_embedded_boundary_em_particle_absorption_sh_factor_2 create mode 100644 Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_3d_embedded_boundary_em_particle_absorption_sh_factor_3 create mode 100644 Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_rz_embedded_boundary_em_particle_absorption_sh_factor_2 create mode 100644 Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_rz_embedded_boundary_em_particle_absorption_sh_factor_3 create mode 100644 Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_em_particle_absorption_sh_factor_2.json create mode 100644 Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_em_particle_absorption_sh_factor_3.json create mode 100644 Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_removal_depth_sh_factor_1.json create mode 100644 Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_removal_depth_sh_factor_2.json create mode 100644 Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_removal_depth_sh_factor_3.json create mode 100644 Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_em_particle_absorption_sh_factor_2.json create mode 100644 Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_em_particle_absorption_sh_factor_3.json create mode 100644 Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_removal_depth_sh_factor_1.json create mode 100644 Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_removal_depth_sh_factor_2.json create mode 100644 Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_removal_depth_sh_factor_3.json create mode 100644 Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_em_particle_absorption_sh_factor_2.json create mode 100644 Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_em_particle_absorption_sh_factor_3.json create mode 100644 Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_removal_depth_sh_factor_1.json create mode 100644 Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_removal_depth_sh_factor_2.json create mode 100644 Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_removal_depth_sh_factor_3.json diff --git a/Examples/Tests/embedded_boundary_em_particle_absorption/CMakeLists.txt b/Examples/Tests/embedded_boundary_em_particle_absorption/CMakeLists.txt index fb5d54c0dbe..0aa5b48b2b7 100644 --- a/Examples/Tests/embedded_boundary_em_particle_absorption/CMakeLists.txt +++ b/Examples/Tests/embedded_boundary_em_particle_absorption/CMakeLists.txt @@ -13,6 +13,29 @@ if(WarpX_EB) ) endif() +if(WarpX_EB) + add_warpx_test( + test_3d_embedded_boundary_em_particle_absorption_sh_factor_2 # name + 3 # dims + 1 # nprocs + inputs_test_3d_embedded_boundary_em_particle_absorption_sh_factor_2 # inputs + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1" # checksum + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_3d_embedded_boundary_em_particle_absorption_sh_factor_3 # name + 3 # dims + 1 # nprocs + inputs_test_3d_embedded_boundary_em_particle_absorption_sh_factor_3 # inputs + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1" # checksum + OFF # dependency + ) +endif() if(WarpX_EB) add_warpx_test( @@ -26,6 +49,30 @@ if(WarpX_EB) ) endif() +if(WarpX_EB) + add_warpx_test( + test_2d_embedded_boundary_em_particle_absorption_sh_factor_2 # name + 2 # dims + 1 # nprocs + inputs_test_2d_embedded_boundary_em_particle_absorption_sh_factor_2 # inputs + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1" # checksum + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_2d_embedded_boundary_em_particle_absorption_sh_factor_3 # name + 2 # dims + 1 # nprocs + inputs_test_2d_embedded_boundary_em_particle_absorption_sh_factor_3 # inputs + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1" # checksum + OFF # dependency + ) +endif() + if(WarpX_EB) add_warpx_test( test_rz_embedded_boundary_em_particle_absorption_sh_factor_1 # name @@ -37,3 +84,27 @@ if(WarpX_EB) OFF # dependency ) endif() + +if(WarpX_EB) + add_warpx_test( + test_rz_embedded_boundary_em_particle_absorption_sh_factor_2 # name + RZ # dims + 1 # nprocs + inputs_test_rz_embedded_boundary_em_particle_absorption_sh_factor_2 # inputs + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1" # checksum + OFF # dependency + ) +endif() + +if(WarpX_EB) + add_warpx_test( + test_rz_embedded_boundary_em_particle_absorption_sh_factor_3 # name + RZ # dims + 1 # nprocs + inputs_test_rz_embedded_boundary_em_particle_absorption_sh_factor_3 # inputs + "analysis.py" # analysis + "analysis_default_regression.py --path diags/diag1" # checksum + OFF # dependency + ) +endif() diff --git a/Examples/Tests/embedded_boundary_em_particle_absorption/analysis.py b/Examples/Tests/embedded_boundary_em_particle_absorption/analysis.py index 7647c23d846..3ba44a8ac1b 100755 --- a/Examples/Tests/embedded_boundary_em_particle_absorption/analysis.py +++ b/Examples/Tests/embedded_boundary_em_particle_absorption/analysis.py @@ -39,7 +39,7 @@ elif dim == "thetaMode": # In RZ: there are issues with divE on axis # Set the few cells around the axis to 0 for this test - divE_avg[13:19] = 0 + divE_avg[:, 13:19] = 0 tolerance = 4e-12 diff --git a/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_2d_embedded_boundary_em_particle_absorption_sh_factor_2 b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_2d_embedded_boundary_em_particle_absorption_sh_factor_2 new file mode 100644 index 00000000000..4cdbfc5dd5e --- /dev/null +++ b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_2d_embedded_boundary_em_particle_absorption_sh_factor_2 @@ -0,0 +1,11 @@ +# base input parameters +FILE = inputs_base + +geometry.dims = 2 +amr.n_cell = 32 32 +geometry.prob_lo = -10 -10 +geometry.prob_hi = 10 10 +boundary.field_lo = pec absorbing_silver_mueller +boundary.field_hi = pec absorbing_silver_mueller + +algo.particle_shape = 2 diff --git a/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_2d_embedded_boundary_em_particle_absorption_sh_factor_3 b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_2d_embedded_boundary_em_particle_absorption_sh_factor_3 new file mode 100644 index 00000000000..6113f0668fe --- /dev/null +++ b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_2d_embedded_boundary_em_particle_absorption_sh_factor_3 @@ -0,0 +1,11 @@ +# base input parameters +FILE = inputs_base + +geometry.dims = 2 +amr.n_cell = 32 32 +geometry.prob_lo = -10 -10 +geometry.prob_hi = 10 10 +boundary.field_lo = pec absorbing_silver_mueller +boundary.field_hi = pec absorbing_silver_mueller + +algo.particle_shape = 3 diff --git a/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_3d_embedded_boundary_em_particle_absorption_sh_factor_2 b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_3d_embedded_boundary_em_particle_absorption_sh_factor_2 new file mode 100644 index 00000000000..ea977877a2d --- /dev/null +++ b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_3d_embedded_boundary_em_particle_absorption_sh_factor_2 @@ -0,0 +1,11 @@ +# base input parameters +FILE = inputs_base + +geometry.dims = 3 +amr.n_cell = 32 32 32 +geometry.prob_lo = -10 -10 -10 +geometry.prob_hi = 10 10 10 +boundary.field_lo = pec pec absorbing_silver_mueller +boundary.field_hi = pec pec absorbing_silver_mueller + +algo.particle_shape = 1 diff --git a/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_3d_embedded_boundary_em_particle_absorption_sh_factor_3 b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_3d_embedded_boundary_em_particle_absorption_sh_factor_3 new file mode 100644 index 00000000000..553e5e058e7 --- /dev/null +++ b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_3d_embedded_boundary_em_particle_absorption_sh_factor_3 @@ -0,0 +1,11 @@ +# base input parameters +FILE = inputs_base + +geometry.dims = 3 +amr.n_cell = 32 32 32 +geometry.prob_lo = -10 -10 -10 +geometry.prob_hi = 10 10 10 +boundary.field_lo = pec pec absorbing_silver_mueller +boundary.field_hi = pec pec absorbing_silver_mueller + +algo.particle_shape = 3 diff --git a/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_rz_embedded_boundary_em_particle_absorption_sh_factor_2 b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_rz_embedded_boundary_em_particle_absorption_sh_factor_2 new file mode 100644 index 00000000000..7faf7fd8934 --- /dev/null +++ b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_rz_embedded_boundary_em_particle_absorption_sh_factor_2 @@ -0,0 +1,11 @@ +# base input parameters +FILE = inputs_base + +geometry.dims = RZ +amr.n_cell = 16 32 +geometry.prob_lo = 0 -10 +geometry.prob_hi = 10 10 +boundary.field_lo = none absorbing_silver_mueller +boundary.field_hi = pec absorbing_silver_mueller + +algo.particle_shape = 1 diff --git a/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_rz_embedded_boundary_em_particle_absorption_sh_factor_3 b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_rz_embedded_boundary_em_particle_absorption_sh_factor_3 new file mode 100644 index 00000000000..aad65594c96 --- /dev/null +++ b/Examples/Tests/embedded_boundary_em_particle_absorption/inputs_test_rz_embedded_boundary_em_particle_absorption_sh_factor_3 @@ -0,0 +1,11 @@ +# base input parameters +FILE = inputs_base + +geometry.dims = RZ +amr.n_cell = 16 32 +geometry.prob_lo = 0 -10 +geometry.prob_hi = 10 10 +boundary.field_lo = none absorbing_silver_mueller +boundary.field_hi = pec absorbing_silver_mueller + +algo.particle_shape = 3 diff --git a/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_em_particle_absorption_sh_factor_2.json b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_em_particle_absorption_sh_factor_2.json new file mode 100644 index 00000000000..481aba1f36b --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_em_particle_absorption_sh_factor_2.json @@ -0,0 +1,24 @@ +{ + "lev=0": { + "divE": 2.4521053721245334e-08, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_em_particle_absorption_sh_factor_3.json b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_em_particle_absorption_sh_factor_3.json new file mode 100644 index 00000000000..82b6c6849ac --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_em_particle_absorption_sh_factor_3.json @@ -0,0 +1,24 @@ +{ + "lev=0": { + "divE": 2.2059346534892452e-08, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_removal_depth_sh_factor_1.json b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_removal_depth_sh_factor_1.json new file mode 100644 index 00000000000..632c48df963 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_removal_depth_sh_factor_1.json @@ -0,0 +1,30 @@ +{ + "lev=0": { + "Bx": 0.0, + "By": 2.3792992316172512e-15, + "Bz": 0.0 , + "Ex": 6.177046470842443e-07, + "Ey": 0.0, + "Ez": 7.259396011803518e-07, + "divE": 2.809306467366024e-07, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_removal_depth_sh_factor_2.json b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_removal_depth_sh_factor_2.json new file mode 100644 index 00000000000..9112c931b55 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_removal_depth_sh_factor_2.json @@ -0,0 +1,30 @@ +{ + "lev=0": { + "Bx": 0.0, + "By": 2.3948084603369097e-15, + "Bz": 0.0, + "Ex": 6.747158562891953e-07, + "Ey": 0.0, + "Ez": 5.541309886315263e-07, + "divE": 2.091715826275267e-07, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_removal_depth_sh_factor_3.json b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_removal_depth_sh_factor_3.json new file mode 100644 index 00000000000..8d6ddf169af --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_embedded_boundary_removal_depth_sh_factor_3.json @@ -0,0 +1,31 @@ +{ + "lev=0": { + "Bx": 0.0, + "By": 2.379299231617251e-15, + "Bz": 0.0, + "Ex": 6.177046470842443e-07, + "Ey": 0.0, + "Ez": 7.259396011803522e-07, + "divE": 2.8093064673660275e-07, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } + } + diff --git a/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_em_particle_absorption_sh_factor_2.json b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_em_particle_absorption_sh_factor_2.json new file mode 100644 index 00000000000..d3e08d9723e --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_em_particle_absorption_sh_factor_2.json @@ -0,0 +1,24 @@ +{ + "lev=0": { + "divE": 4.928354322096152e-07, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_em_particle_absorption_sh_factor_3.json b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_em_particle_absorption_sh_factor_3.json new file mode 100644 index 00000000000..23c03c7e7bc --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_em_particle_absorption_sh_factor_3.json @@ -0,0 +1,24 @@ +{ + "lev=0": { + "divE": 4.3355127342920327e-07, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_removal_depth_sh_factor_1.json b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_removal_depth_sh_factor_1.json new file mode 100644 index 00000000000..109f5fb4d35 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_removal_depth_sh_factor_1.json @@ -0,0 +1,30 @@ +{ + "lev=0": { + "Bx": 3.835377401535272e-15, + "By": 7.634560527952392e-15, + "Bz": 7.670097149513554e-15, + "Ex": 1.837433604148419e-06, + "Ey": 1.4507850267928362e-06, + "Ez": 1.4325637523931794e-06, + "divE": 7.330866223540695e-07, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_removal_depth_sh_factor_2.json b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_removal_depth_sh_factor_2.json new file mode 100644 index 00000000000..9ff91af6550 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_removal_depth_sh_factor_2.json @@ -0,0 +1,30 @@ +{ + "lev=0": { + "Bx": 2.3948084603369097e-15, + "By": 0.0, + "Bz": 6.747158562891953e-07, + "Ex": 0.0, + "Ey": 5.541309886315263e-07, + "Ez": 0.0, + "divE": 2.091715826275267e-07, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_removal_depth_sh_factor_3.json b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_removal_depth_sh_factor_3.json new file mode 100644 index 00000000000..0e0cdf5eb0f --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_embedded_boundary_removal_depth_sh_factor_3.json @@ -0,0 +1,31 @@ +{ + "lev=0": { + "Bx": 2.9100687916345874e-15, + "By": 6.121275580503632e-15, + "Bz": 5.9043095451081354e-15, + "Ex": 1.4574231057224582e-06, + "Ey": 1.1648744803916206e-06, + "Ez": 1.16164257835401e-06, + "divE": 6.781029990295743e-07, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } + } + diff --git a/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_em_particle_absorption_sh_factor_2.json b/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_em_particle_absorption_sh_factor_2.json new file mode 100644 index 00000000000..30d7d0ba081 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_em_particle_absorption_sh_factor_2.json @@ -0,0 +1,24 @@ +{ + "lev=0": { + "divE": 1.4599714697029335e-08, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_em_particle_absorption_sh_factor_3.json b/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_em_particle_absorption_sh_factor_3.json new file mode 100644 index 00000000000..76baf73cc3a --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_em_particle_absorption_sh_factor_3.json @@ -0,0 +1,24 @@ +{ + "lev=0": { + "divE": 1.3292471881599093e-08, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_removal_depth_sh_factor_1.json b/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_removal_depth_sh_factor_1.json new file mode 100644 index 00000000000..0376427a4f0 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_removal_depth_sh_factor_1.json @@ -0,0 +1,28 @@ +{ + "lev=0": { + "Br": 0.0, + "Bz": 0.0, + "Er": 1.6208621785146114e-07, + "Ez": 2.805848027148827e-07, + "divE": 5.118824286040605e-08, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_removal_depth_sh_factor_2.json b/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_removal_depth_sh_factor_2.json new file mode 100644 index 00000000000..0376427a4f0 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_removal_depth_sh_factor_2.json @@ -0,0 +1,28 @@ +{ + "lev=0": { + "Br": 0.0, + "Bz": 0.0, + "Er": 1.6208621785146114e-07, + "Ez": 2.805848027148827e-07, + "divE": 5.118824286040605e-08, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} diff --git a/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_removal_depth_sh_factor_3.json b/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_removal_depth_sh_factor_3.json new file mode 100644 index 00000000000..0376427a4f0 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_rz_embedded_boundary_removal_depth_sh_factor_3.json @@ -0,0 +1,28 @@ +{ + "lev=0": { + "Br": 0.0, + "Bz": 0.0, + "Er": 1.6208621785146114e-07, + "Ez": 2.805848027148827e-07, + "divE": 5.118824286040605e-08, + "rho": 0.0 + }, + "electron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + }, + "positron": { + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_weight": 0.0 + } +} diff --git a/Source/EmbeddedBoundary/WarpXInitEB.cpp b/Source/EmbeddedBoundary/WarpXInitEB.cpp index 6a6f594b380..271f12231b0 100644 --- a/Source/EmbeddedBoundary/WarpXInitEB.cpp +++ b/Source/EmbeddedBoundary/WarpXInitEB.cpp @@ -291,6 +291,97 @@ WarpX::ScaleAreas (ablastr::fields::VectorField& face_areas, } } +void +WarpX::MarkReducedShapeCells ( + std::unique_ptr & eb_reduce_particle_shape, + amrex::EBFArrayBoxFactory const & eb_fact, + int const particle_shape_order ) +{ + // Pre-fill array with 0, including in the ghost cells outside of the domain. + // (The guard cells in the domain will be updated by `FillBoundary` at the end of this function.) + eb_reduce_particle_shape->setVal(0, eb_reduce_particle_shape->nGrow()); + + // Extract structures for embedded boundaries + amrex::FabArray const& eb_flag = eb_fact.getMultiEBCellFlagFab(); + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for (amrex::MFIter mfi(*eb_reduce_particle_shape); mfi.isValid(); ++mfi) { + + const amrex::Box& box = mfi.tilebox(); + amrex::Array4 const & eb_reduce_particle_shape_arr = eb_reduce_particle_shape->array(mfi); + + // Check if the box (including one layer of guard cells) contains a mix of covered and regular cells + const amrex::Box& eb_info_box = mfi.tilebox(amrex::IntVect::TheCellVector()).grow(1); + amrex::FabType const fab_type = eb_flag[mfi].getType( eb_info_box ); + + if (fab_type == amrex::FabType::regular) { // All cells in the box are regular + + // Every cell in box is regular: do not reduce particle shape in any cell + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + eb_reduce_particle_shape_arr(i, j, k) = 0; + }); + + } else if (fab_type == amrex::FabType::covered) { // All cells in the box are covered + + // Every cell in box is fully covered: reduce particle shape + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + eb_reduce_particle_shape_arr(i, j, k) = 1; + }); + + } else { // The box contains a mix of covered and regular cells + + auto const & flag = eb_flag[mfi].array(); + + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + + // Check if any of the neighboring cells over which the particle shape might extend + // are either partially or fully covered. In this case, set eb_reduce_particle_shape_arr + // to one for this cell, to indicate that the particle should use an order 1 shape + // (This ensures that the particle never deposits any charge in a partially or + // fully covered cell, even with higher-order shapes) + // Note: in the code below `particle_shape_order/2` corresponds to the number of neighboring cells + // over which the shape factor could extend, in each direction. + int const i_start = i-particle_shape_order/2; + int const i_end = i+particle_shape_order/2; +#if AMREX_SPACEDIM > 1 + int const j_start = j-particle_shape_order/2; + int const j_end = j+particle_shape_order/2; +#else + int const j_start = j; + int const j_end = j; +#endif +#if AMREX_SPACEDIM > 2 + int const k_start = k-particle_shape_order/2; + int const k_end = k+particle_shape_order/2; +#else + int const k_start = k; + int const k_end = k; +#endif + int reduce_shape = 0; + for (int i_cell = i_start; i_cell <= i_end; ++i_cell) { + for (int j_cell = j_start; j_cell <= j_end; ++j_cell) { + for (int k_cell = k_start; k_cell <= k_end; ++k_cell) { + // `isRegular` returns `false` if the cell is either partially or fully covered. + if ( !flag(i_cell, j_cell, k_cell).isRegular() ) { + reduce_shape = 1; + } + } + } + } + eb_reduce_particle_shape_arr(i, j, k) = reduce_shape; + }); + + } + + } + + // FillBoundary to set the values in the guard cells + eb_reduce_particle_shape->FillBoundary(Geom(0).periodicity()); + +} + void WarpX::MarkUpdateCellsStairCase ( std::array< std::unique_ptr,3> & eb_update, diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 98efb1f5b3f..3d78615fbc3 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1227,20 +1227,12 @@ void WarpX::InitializeEBGridData (int lev) #ifdef AMREX_USE_EB if (lev == maxLevel()) { - // Throw a warning if EB is on and particle_shape > 1 - if ((nox > 1 or noy > 1 or noz > 1) and EB::enabled()) - { - ablastr::warn_manager::WMRecordWarning("Particles", - "when algo.particle_shape > 1, numerical artifacts will be present when\n" - "particles are close to embedded boundaries"); - } + auto const eb_fact = fieldEBFactory(lev); if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD ) { using warpx::fields::FieldType; - auto const eb_fact = fieldEBFactory(lev); - if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { auto edge_lengths_lev = m_fields.get_alldirs(FieldType::edge_lengths, lev); @@ -1261,7 +1253,6 @@ void WarpX::InitializeEBGridData (int lev) MarkUpdateBCellsECT( m_eb_update_B[lev], face_areas_lev, edge_lengths_lev); } else { - // Mark on which grid points E should be updated (stair-case approximation) MarkUpdateCellsStairCase( m_eb_update_E[lev], @@ -1272,12 +1263,12 @@ void WarpX::InitializeEBGridData (int lev) m_eb_update_B[lev], m_fields.get_alldirs(FieldType::Bfield_fp, lev), eb_fact ); - } } ComputeDistanceToEB(); + MarkReducedShapeCells( m_eb_reduce_particle_shape[lev], eb_fact, WarpX::nox ); } #else diff --git a/Source/Parallelization/WarpXRegrid.cpp b/Source/Parallelization/WarpXRegrid.cpp index 79975397196..4c7668aa0e2 100644 --- a/Source/Parallelization/WarpXRegrid.cpp +++ b/Source/Parallelization/WarpXRegrid.cpp @@ -194,6 +194,7 @@ WarpX::RemakeLevel (int lev, Real /*time*/, const BoxArray& ba, const Distributi for (int idim=0; idim < 3; ++idim) { if (eb_enabled) { + RemakeMultiFab( m_eb_reduce_particle_shape[lev] ); if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { RemakeMultiFab( m_eb_update_E[lev][idim] ); RemakeMultiFab( m_eb_update_B[lev][idim] ); diff --git a/Source/Particles/Deposition/CurrentDeposition.H b/Source/Particles/Deposition/CurrentDeposition.H index cb56c559bc0..bc870257d8f 100644 --- a/Source/Particles/Deposition/CurrentDeposition.H +++ b/Source/Particles/Deposition/CurrentDeposition.H @@ -638,6 +638,9 @@ void doDepositionSharedShapeN (const GetParticlePosition& GetPosition, * \param lo Index lower bounds of domain. * \param q species charge. * \param n_rz_azimuthal_modes Number of azimuthal modes when using RZ geometry. + * \param reduced_particle_shape_mask Array4 of int, Mask that indicates whether a particle + * should use its regular shape factor or a reduced, order-1 shape factor instead in a given cell. + * \param enable_reduced_shape Flag to indicate whether to use the reduced shape factor */ template void doEsirkepovDepositionShapeN (const GetParticlePosition& GetPosition, @@ -656,7 +659,10 @@ void doEsirkepovDepositionShapeN (const GetParticlePosition& GetPosition, const amrex::XDim3 & xyzmin, amrex::Dim3 lo, amrex::Real q, - [[maybe_unused]]int n_rz_azimuthal_modes) + [[maybe_unused]] int n_rz_azimuthal_modes, + const amrex::Array4& reduced_particle_shape_mask, + bool enable_reduced_shape + ) { using namespace amrex; using namespace amrex::literals; @@ -680,9 +686,14 @@ void doEsirkepovDepositionShapeN (const GetParticlePosition& GetPosition, #endif // Loop over particles and deposit into Jx_arr, Jy_arr and Jz_arr - amrex::ParallelFor( - np_to_deposit, - [=] AMREX_GPU_DEVICE (long const ip) { + + // (Compile 2 versions of the kernel: with and without reduced shape) + enum eb_flags : int { has_reduced_shape, no_reduced_shape }; + const int reduce_shape_runtime_flag = (enable_reduced_shape && (depos_order>1))? has_reduced_shape : no_reduced_shape; + + amrex::ParallelFor( TypeList>{}, + {reduce_shape_runtime_flag}, + np_to_deposit, [=] AMREX_GPU_DEVICE (long ip, auto reduce_shape_control) { // --- Get particle quantities Real const gaminv = 1.0_rt/std::sqrt(1.0_rt + uxp[ip]*uxp[ip]*clightsq + uyp[ip]*uyp[ip]*clightsq @@ -735,6 +746,43 @@ void doEsirkepovDepositionShapeN (const GetParticlePosition& GetPosition, double const z_new = (zp - xyzmin.z + (relative_time + 0.5_rt*dt)*uzp[ip]*gaminv)*dinv.z; double const z_old = z_new - dt*dinv.z*uzp[ip]*gaminv; + // Check whether the particle is close to the EB at the old and new position + bool reduce_shape_old, reduce_shape_new; +#ifdef AMREX_USE_CUDA + amrex::ignore_unused(reduced_particle_shape_mask, lo); // Needed to avoid compilation error with nvcc +#endif + if constexpr (reduce_shape_control == has_reduced_shape) { +#if defined(WARPX_DIM_3D) + reduce_shape_old = reduced_particle_shape_mask( + lo.x + int(amrex::Math::floor(x_old)), + lo.y + int(amrex::Math::floor(y_old)), + lo.z + int(amrex::Math::floor(z_old))); + reduce_shape_new = reduced_particle_shape_mask( + lo.x + int(amrex::Math::floor(x_new)), + lo.y + int(amrex::Math::floor(y_new)), + lo.z + int(amrex::Math::floor(z_new))); +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + reduce_shape_old = reduced_particle_shape_mask( + lo.x + int(amrex::Math::floor(x_old)), + lo.y + int(amrex::Math::floor(z_old)), + 0); + reduce_shape_new = reduced_particle_shape_mask( + lo.x + int(amrex::Math::floor(x_new)), + lo.y + int(amrex::Math::floor(z_new)), + 0); +#elif defined(WARPX_DIM_1D_Z) + reduce_shape_old = reduced_particle_shape_mask( + lo.x + int(amrex::Math::floor(z_old)), + 0, 0); + reduce_shape_new = reduced_particle_shape_mask( + lo.x + int(amrex::Math::floor(z_new)), + 0, 0); +#endif + } else { + reduce_shape_old = false; + reduce_shape_new = false; + } + #if defined(WARPX_DIM_RZ) Real const vy = (-uxp[ip]*sintheta_mid + uyp[ip]*costheta_mid)*gaminv; #elif defined(WARPX_DIM_XZ) @@ -749,6 +797,9 @@ void doEsirkepovDepositionShapeN (const GetParticlePosition& GetPosition, // [ijk]_new: leftmost grid point that the particle touches const Compute_shape_factor< depos_order > compute_shape_factor; const Compute_shifted_shape_factor< depos_order > compute_shifted_shape_factor; + // In cells marked by reduced_particle_shape_mask, we need order 1 deposition + const Compute_shifted_shape_factor< 1 > compute_shifted_shape_factor_order1; + amrex::ignore_unused(compute_shifted_shape_factor_order1); // unused for `no_reduced_shape` // Shape factor arrays // Note that there are extra values above and below @@ -758,19 +809,58 @@ void doEsirkepovDepositionShapeN (const GetParticlePosition& GetPosition, #if !defined(WARPX_DIM_1D_Z) double sx_new[depos_order + 3] = {0.}; double sx_old[depos_order + 3] = {0.}; - const int i_new = compute_shape_factor(sx_new+1, x_new); + const int i_new = compute_shape_factor(sx_new+1, x_new ); const int i_old = compute_shifted_shape_factor(sx_old, x_old, i_new); + // If particle is close to the embedded boundary, recompute deposition with order 1 shape + if constexpr (reduce_shape_control == has_reduced_shape) { + if (reduce_shape_new) { + for (int i=0; i eb_reduce_particle_shape; + if (EB::enabled()) { + eb_reduce_particle_shape = (*warpx.GetEBReduceParticleShapeFlag()[lev])[pti].array(); + } + + if (WarpX::nox == 1){ doEsirkepovDepositionShapeN<1>( - GetPosition, wp.dataPtr() + offset, uxp.dataPtr() + offset, - uyp.dataPtr() + offset, uzp.dataPtr() + offset, ion_lev, - jx_arr, jy_arr, jz_arr, np_to_deposit, dt, relative_time, dinv, xyzmin, lo, q, - WarpX::n_rz_azimuthal_modes); + GetPosition, wp.dataPtr() + offset, uxp.dataPtr() + offset, + uyp.dataPtr() + offset, uzp.dataPtr() + offset, ion_lev, + jx_arr, jy_arr, jz_arr, + np_to_deposit, dt, relative_time, dinv, xyzmin, lo, q, + WarpX::n_rz_azimuthal_modes, + eb_reduce_particle_shape, EB::enabled() ); } else if (WarpX::nox == 2){ doEsirkepovDepositionShapeN<2>( GetPosition, wp.dataPtr() + offset, uxp.dataPtr() + offset, uyp.dataPtr() + offset, uzp.dataPtr() + offset, ion_lev, - jx_arr, jy_arr, jz_arr, np_to_deposit, dt, relative_time, dinv, xyzmin, lo, q, - WarpX::n_rz_azimuthal_modes); + jx_arr, jy_arr, jz_arr, + np_to_deposit, dt, relative_time, dinv, xyzmin, lo, q, + WarpX::n_rz_azimuthal_modes, + eb_reduce_particle_shape, EB::enabled() ); } else if (WarpX::nox == 3){ doEsirkepovDepositionShapeN<3>( GetPosition, wp.dataPtr() + offset, uxp.dataPtr() + offset, uyp.dataPtr() + offset, uzp.dataPtr() + offset, ion_lev, - jx_arr, jy_arr, jz_arr, np_to_deposit, dt, relative_time, dinv, xyzmin, lo, q, - WarpX::n_rz_azimuthal_modes); + jx_arr, jy_arr, jz_arr, + np_to_deposit, dt, relative_time, dinv, xyzmin, lo, q, + WarpX::n_rz_azimuthal_modes, + eb_reduce_particle_shape, EB::enabled() ); } else if (WarpX::nox == 4){ doEsirkepovDepositionShapeN<4>( GetPosition, wp.dataPtr() + offset, uxp.dataPtr() + offset, uyp.dataPtr() + offset, uzp.dataPtr() + offset, ion_lev, - jx_arr, jy_arr, jz_arr, np_to_deposit, dt, relative_time, dinv, xyzmin, lo, q, - WarpX::n_rz_azimuthal_modes); + jx_arr, jy_arr, jz_arr, + np_to_deposit, dt, relative_time, dinv, xyzmin, lo, q, + WarpX::n_rz_azimuthal_modes, + eb_reduce_particle_shape, EB::enabled() ); } + } else if (push_type == PushType::Implicit) { #if (AMREX_SPACEDIM >= 2) auto& xp_n = pti.GetAttribs(particle_comps["x_n"]); diff --git a/Source/WarpX.H b/Source/WarpX.H index 0efc268ba30..f500347febc 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -162,9 +162,9 @@ public: HybridPICModel& GetHybridPICModel () { return *m_hybrid_pic_model; } [[nodiscard]] HybridPICModel * get_pointer_HybridPICModel () const { return m_hybrid_pic_model.get(); } MultiDiagnostics& GetMultiDiags () {return *multi_diags;} - ParticleBoundaryBuffer& GetParticleBoundaryBuffer () { return *m_particle_boundary_buffer; } amrex::Vector,3 > >& GetEBUpdateEFlag() { return m_eb_update_E; } + amrex::Vector< std::unique_ptr > const & GetEBReduceParticleShapeFlag() const { return m_eb_reduce_particle_shape; } static void shiftMF (amrex::MultiFab& mf, const amrex::Geometry& geom, int num_shift, int dir, int lev, bool update_cost_flag, @@ -982,6 +982,26 @@ public: void InitEB (); #ifdef AMREX_USE_EB + + /** \brief Set a flag to indicate in which cells a particle should deposit charge/current + * with a reduced, order 1 shape. + * + * More specifically, the flag is set to 1 if any of the neighboring cells over which the + * particle shape might extend are either partially or fully covered by an embedded boundary. + * This ensures that a particle in this cell deposits with an order 1 shape, which in turn + * makes sure that the particle never deposits any charge in a partially or fully covered cell. + * + * \param[in] eb_reduce_particle_shape multifab to be filled with 1s and 0s + * \param[in] eb_fact EB factory + * \param[in] particle_shape_order order of the particle shape function + */ + + + void MarkReducedShapeCells ( + std::unique_ptr & eb_reduce_particle_shape, + amrex::EBFArrayBoxFactory const & eb_fact, + int particle_shape_order ); + /** \brief Set a flag to indicate on which grid points the field `field` * should be updated, depending on their position relative to the embedded boundary. * @@ -1405,6 +1425,13 @@ private: amrex::Vector,3 > > m_eb_update_E; amrex::Vector,3 > > m_eb_update_B; + /** EB: Mask that indicates whether a particle should use its regular shape factor (mask set to 0) + * or a reduced, order-1 shape factor instead (mask set to 1) in a given cell, when depositing charge/current. + * The flag is typically set to 1 in cells that are close to the embedded boundary, in order to avoid + * errors in charge conservation when a particle is too close to the embedded boundary. + */ + amrex::Vector< std::unique_ptr > m_eb_reduce_particle_shape; + /** EB: for every mesh face flag_info_face contains a: * * 0 if the face needs to be extended * * 1 if the face is large enough to lend area to other faces diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index cad20f17983..9442fed0596 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -332,6 +332,8 @@ WarpX::WarpX () m_eb_update_E.resize(nlevs_max); m_eb_update_B.resize(nlevs_max); + m_eb_reduce_particle_shape.resize(nlevs_max); + m_flag_info_face.resize(nlevs_max); m_flag_ext_face.resize(nlevs_max); m_borrowing.resize(nlevs_max); @@ -2318,9 +2320,13 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm amrex::IntVect const ng_ls(2); //EB level set m_fields.alloc_init(FieldType::distance_to_eb, lev, amrex::convert(ba, IntVect::TheNodeVector()), dm, nc_ls, ng_ls, 0.0_rt); + // Whether to reduce the particle shape to order 1 when close to the EB + AllocInitMultiFab(m_eb_reduce_particle_shape[lev], amrex::convert(ba, IntVect::TheCellVector()), dm, ncomps, + ngRho, lev, "m_eb_reduce_particle_shape"); // EB info are needed only at the finest level if (lev == maxLevel()) { + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) { AllocInitMultiFab(m_eb_update_E[lev][0], amrex::convert(ba, Ex_nodal_flag), dm, ncomps, From 0e1df8438d54fd522a5557f62436901cbfa2a28d Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Tue, 28 Jan 2025 20:46:28 -0800 Subject: [PATCH 191/278] CDash: fix variables in CTestConfig.cmake (#5611) Try using CTest module variables, instead of CTest script variables, given that we are not using a CTest script, to fix fields displayed in the CDash dashboard. Examples of guidance from the official documentation (https://cmake.org/cmake/help/v3.24/manual/ctest.1.html): ![Screenshot from 2025-01-28 11-04-12](https://github.com/user-attachments/assets/a59e088d-8da6-4e2d-a0f4-d5dd508047a4) ![Screenshot from 2025-01-28 11-08-45](https://github.com/user-attachments/assets/f677da64-dfc4-4b98-85dc-79e360f1f915) However, this seems to be true for all other variables in CTestConfig.cmake. Should we try to update them all? --- CTestConfig.cmake | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/CTestConfig.cmake b/CTestConfig.cmake index 938d2a4f518..8af4cb36ac1 100644 --- a/CTestConfig.cmake +++ b/CTestConfig.cmake @@ -13,6 +13,8 @@ set(CTEST_SUBMIT_URL https://my.cdash.org/submit.php?project=WarpX) set(CTEST_DROP_SITE_CDASH TRUE) -# Additional settings -set(CTEST_SITE "Azure-Pipelines") -set(CTEST_BUILD_NAME "CI-Development") +# Set site and build names +# - CTest script variables: CTEST_SITE, CTEST_BUILD_NAME +# - CTest module variables: SITE, BUILDNAME +set(SITE "Azure-Pipelines") +set(BUILDNAME "CI-Development") From 547bfbbce076db2222a91264590cda27d677ff37 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 29 Jan 2025 18:42:27 +0100 Subject: [PATCH 192/278] Clang-tidy CI test: bump version from 15 to 16 (#5592) This PR bumps the version used for `clang-tidy` CI tests from 15 to 16. It also addresses all the issues found with the upgraded tool. Most of the issues are related to this newly introduced check: - [cppcoreguidelines-avoid-const-or-ref-data-members](https://releases.llvm.org/16.0.0/tools/clang/tools/extra/docs/clang-tidy/checks/cppcoreguidelines/avoid-const-or-ref-data-members.html) The check enforces [CppCoreGuidelines about constant and reference data members]( https://github.com/isocpp/CppCoreGuidelines/blob/master/CppCoreGuidelines.md#c12-dont-make-data-members-const-or-references) . In general, I understand the argument against using constant or reference data members. ~~There is however one case in which I am not fully convinced by the suggestion of the tool: in [PML.H](https://github.com/ECP-WarpX/WarpX/pull/5592/files#diff-f1e020ebe3cd2222f399d50ff05769d0c70482f0e12bbe29b498e9ab2d0f4a53) `const amrex::BoxArray& m_grids;` becomes `amrex::BoxArray m_grids;` and I am wondering if this can be an issue for performances. Maybe we could consider using a (possibly smart) pointer to the `BoxArray`, instead of making a copy.~~ (we are now using a pointer for `amrex::BoxArray m_grids;`). Few issues were instead related to these checks: - [modernize-loop-convert](https://releases.llvm.org/16.0.0/tools/clang/tools/extra/docs/clang-tidy/checks/modernize/loop-convert.html) This check was already enabled, but `clang-tidy-16` broadens its scope with respect to previous versions. - [modernize-use-auto](https://releases.llvm.org/16.0.0/tools/clang/tools/extra/docs/clang-tidy/checks/modernize/use-auto.html) Only one case. I am a bit confused because this should have been found also by version 15 of the tool. - [misc-use-anonymous-namespace](https://releases.llvm.org/16.0.0/tools/clang/tools/extra/docs/clang-tidy/checks/misc/use-anonymous-namespace.html) This is a new check. Actually, the issues found with this check were false positives, but they disappeared when I properly set `misc-use-anonymous-namespace.HeaderFileExtensions` in the `.clang-tidy` configuration file to recognize `.H` files as headers. - [misc-misplaced-const](https://releases.llvm.org/16.0.0/tools/clang/tools/extra/docs/clang-tidy/checks/misc/misplaced-const.html) Only one case. I am a bit confused because this should have been found also by version 15 of the tool. - [readability-misleading-indentation](https://releases.llvm.org/16.0.0/tools/clang/tools/extra/docs/clang-tidy/checks/readability/misleading-indentation.html) [**NOW DISABLED DUE TO FALSE POSITIVES**] This check was already enabled. However, with this newer version of the tool, it seems to me that it generates some false positives. Therefore, I would like to propose to **disable** it. We may try to re-enable it when we will bump the version from 16 to 17. --- .clang-tidy | 3 ++ .github/workflows/clang_tidy.yml | 8 +++--- Source/BoundaryConditions/PML.H | 14 +++++----- Source/BoundaryConditions/PML.cpp | 6 ++-- Source/BoundaryConditions/PML_RZ.H | 4 +-- Source/Diagnostics/BTDiagnostics.H | 2 +- .../BackTransformFunctor.H | 6 ++-- .../ComputeDiagFunctors/CellCenterFunctor.H | 2 +- .../ComputeDiagFunctors/DivBFunctor.H | 2 +- .../ComputeDiagFunctors/DivEFunctor.H | 2 +- .../ComputeDiagFunctors/JFunctor.H | 2 +- .../ComputeDiagFunctors/PartPerCellFunctor.H | 2 +- .../ComputeDiagFunctors/PartPerGridFunctor.H | 2 +- .../ParticleReductionFunctor.H | 8 +++--- .../ComputeDiagFunctors/RhoFunctor.H | 4 +-- .../ComputeDiagFunctors/TemperatureFunctor.H | 4 +-- .../FlushFormats/FlushFormatInSitu.cpp | 6 ++-- Source/Diagnostics/FullDiagnostics.cpp | 10 +++---- .../ReducedDiags/LoadBalanceCosts.H | 4 +-- Source/Diagnostics/WarpXOpenPMD.cpp | 28 +++++++++---------- .../FieldAccessorFunctors.H | 4 +-- .../HybridPICModel/HybridPICModel.cpp | 2 +- Source/Particles/AddPlasmaUtilities.H | 4 +-- .../ElementaryProcess/QEDPairGeneration.H | 2 +- .../ElementaryProcess/QEDPhotonEmission.H | 6 ++-- .../ElementaryProcess/QEDSchwingerProcess.H | 10 +++---- Source/Particles/Filter/FilterFunctors.H | 16 +++++------ Source/Particles/MultiParticleContainer.cpp | 4 +-- Source/Particles/ParticleBoundaryBuffer.cpp | 12 ++++---- .../Particles/ParticleCreation/SmartCreate.H | 2 +- Source/Particles/Sorting/SortingUtils.H | 4 +-- Source/ablastr/fields/Interpolate.H | 6 ++-- Source/ablastr/utils/SignalHandling.cpp | 2 +- Source/ablastr/utils/msg_logger/MsgLogger.H | 6 ++-- Tools/Linter/runClangTidy.sh | 8 +++--- 35 files changed, 104 insertions(+), 103 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index 04d1419c5c7..efb60a001d0 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -44,6 +44,7 @@ Checks: ' -readability-implicit-bool-conversion, -readability-isolate-declaration, -readability-magic-numbers, + -readability-misleading-indentation, -readability-named-parameter, -readability-uppercase-literal-suffix ' @@ -55,6 +56,8 @@ CheckOptions: value: "H," - key: modernize-pass-by-value.ValuesOnly value: "true" +- key: misc-use-anonymous-namespace.HeaderFileExtensions + value: "H," HeaderFilterRegex: 'Source[a-z_A-Z0-9\/]+\.H$' diff --git a/.github/workflows/clang_tidy.yml b/.github/workflows/clang_tidy.yml index dda7f2185f5..e6816b1c1a9 100644 --- a/.github/workflows/clang_tidy.yml +++ b/.github/workflows/clang_tidy.yml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@v4 - name: install dependencies run: | - .github/workflows/dependencies/clang.sh 15 + .github/workflows/dependencies/clang.sh 16 - name: set up cache uses: actions/cache@v4 with: @@ -43,8 +43,8 @@ jobs: export CCACHE_LOGFILE=${{ github.workspace }}/ccache.log.txt ccache -z - export CXX=$(which clang++-15) - export CC=$(which clang-15) + export CXX=$(which clang++-16) + export CC=$(which clang-16) cmake -S . -B build_clang_tidy \ -DCMAKE_VERBOSE_MAKEFILE=ON \ @@ -62,7 +62,7 @@ jobs: ${{github.workspace}}/.github/workflows/source/makeMakefileForClangTidy.py --input ${{github.workspace}}/ccache.log.txt make -j4 --keep-going -f clang-tidy-ccache-misses.mak \ - CLANG_TIDY=clang-tidy-15 \ + CLANG_TIDY=clang-tidy-16 \ CLANG_TIDY_ARGS="--config-file=${{github.workspace}}/.clang-tidy --warnings-as-errors=*" ccache -s diff --git a/Source/BoundaryConditions/PML.H b/Source/BoundaryConditions/PML.H index 9e7dbc0034c..6be9600b9d9 100644 --- a/Source/BoundaryConditions/PML.H +++ b/Source/BoundaryConditions/PML.H @@ -81,10 +81,10 @@ class SigmaBoxFactory : public amrex::FabFactory { public: - SigmaBoxFactory (const amrex::BoxArray& grid_ba, const amrex::Real* dx, + SigmaBoxFactory (const amrex::BoxArray* grid_ba, const amrex::Real* dx, const amrex::IntVect& ncell, const amrex::IntVect& delta, const amrex::Box& regular_domain, const amrex::Real v_sigma_sb) - : m_grids(grid_ba), m_dx(dx), m_ncell(ncell), m_delta(delta), m_regdomain(regular_domain), m_v_sigma_sb(v_sigma_sb) {} + : m_grids{grid_ba}, m_dx(dx), m_ncell(ncell), m_delta(delta), m_regdomain(regular_domain), m_v_sigma_sb(v_sigma_sb) {} ~SigmaBoxFactory () override = default; SigmaBoxFactory (const SigmaBoxFactory&) = default; @@ -97,7 +97,7 @@ public: [[nodiscard]] SigmaBox* create (const amrex::Box& box, int /*ncomps*/, const amrex::FabInfo& /*info*/, int /*box_index*/) const final { - return new SigmaBox(box, m_grids, m_dx, m_ncell, m_delta, m_regdomain, m_v_sigma_sb); + return new SigmaBox(box, *m_grids, m_dx, m_ncell, m_delta, m_regdomain, m_v_sigma_sb); } void destroy (SigmaBox* fab) const final @@ -112,7 +112,7 @@ public: } private: - const amrex::BoxArray& m_grids; + const amrex::BoxArray* m_grids; const amrex::Real* m_dx; amrex::IntVect m_ncell; amrex::IntVect m_delta; @@ -125,7 +125,7 @@ class MultiSigmaBox { public: MultiSigmaBox(const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, - const amrex::BoxArray& grid_ba, const amrex::Real* dx, + const amrex::BoxArray* grid_ba, const amrex::Real* dx, const amrex::IntVect& ncell, const amrex::IntVect& delta, const amrex::Box& regular_domain, amrex::Real v_sigma_sb); void ComputePMLFactorsB (const amrex::Real* dx, amrex::Real dt); @@ -204,8 +204,8 @@ private: bool m_dive_cleaning; bool m_divb_cleaning; - const amrex::IntVect m_fill_guards_fields; - const amrex::IntVect m_fill_guards_current; + amrex::IntVect m_fill_guards_fields; + amrex::IntVect m_fill_guards_current; const amrex::Geometry* m_geom; const amrex::Geometry* m_cgeom; diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index f45ca222e69..390a09a34c3 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -506,7 +506,7 @@ SigmaBox::ComputePMLFactorsE (const Real* a_dx, Real dt) } MultiSigmaBox::MultiSigmaBox (const BoxArray& ba, const DistributionMapping& dm, - const BoxArray& grid_ba, const Real* dx, + const BoxArray* grid_ba, const Real* dx, const IntVect& ncell, const IntVect& delta, const amrex::Box& regular_domain, const amrex::Real v_sigma_sb) : FabArray(ba,dm,1,0,MFInfo(), @@ -764,7 +764,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, Box single_domain_box = is_single_box_domain ? domain0 : Box(); // Empty box (i.e., Box()) means it's not a single box domain. - sigba_fp = std::make_unique(ba, dm, grid_ba_reduced, geom->CellSize(), + sigba_fp = std::make_unique(ba, dm, &grid_ba_reduced, geom->CellSize(), IntVect(ncell), IntVect(delta), single_domain_box, v_sigma_sb); if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { @@ -879,7 +879,7 @@ PML::PML (const int lev, const BoxArray& grid_ba, warpx.m_fields.alloc_init(FieldType::pml_j_cp, Direction{2}, lev, cba_jz, cdm, 1, ngb, 0.0_rt, false, false); single_domain_box = is_single_box_domain ? cdomain : Box(); - sigba_cp = std::make_unique(cba, cdm, grid_cba_reduced, cgeom->CellSize(), + sigba_cp = std::make_unique(cba, cdm, &grid_cba_reduced, cgeom->CellSize(), cncells, cdelta, single_domain_box, v_sigma_sb); if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::PSATD) { diff --git a/Source/BoundaryConditions/PML_RZ.H b/Source/BoundaryConditions/PML_RZ.H index 20c7d360fc7..5508836a171 100644 --- a/Source/BoundaryConditions/PML_RZ.H +++ b/Source/BoundaryConditions/PML_RZ.H @@ -53,8 +53,8 @@ public: private: - const int m_ncell; - const int m_do_pml_in_domain; + int m_ncell; + int m_do_pml_in_domain; const amrex::Geometry* m_geom; // The MultiFabs pml_E_fp and pml_B_fp are setup using the registry. diff --git a/Source/Diagnostics/BTDiagnostics.H b/Source/Diagnostics/BTDiagnostics.H index c7137f45c9d..f4f118892a8 100644 --- a/Source/Diagnostics/BTDiagnostics.H +++ b/Source/Diagnostics/BTDiagnostics.H @@ -242,7 +242,7 @@ private: * will be used by all snapshots to obtain lab-frame data at the respective * z slice location. */ - std::string const m_cell_centered_data_name; + std::string m_cell_centered_data_name; /** Vector of pointers to compute cell-centered data, per level, per component * using the coarsening-ratio provided by the user. */ diff --git a/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.H index bef40ae1ce0..c4410b0a722 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/BackTransformFunctor.H @@ -100,11 +100,11 @@ public: amrex::Real beta_boost) const; private: /** pointer to source multifab (cell-centered multi-component multifab) */ - amrex::MultiFab const * const m_mf_src = nullptr; + const amrex::MultiFab* m_mf_src = nullptr; /** level at which m_mf_src is defined */ - int const m_lev; + int m_lev; /** Number of buffers or snapshots */ - int const m_num_buffers; + int m_num_buffers; /** Vector of amrex::Box with index-space in the lab-frame */ amrex::Vector m_buffer_box; /** Vector of current z co-ordinate in the boosted-frame for each buffer */ diff --git a/Source/Diagnostics/ComputeDiagFunctors/CellCenterFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/CellCenterFunctor.H index dd5bb239ecf..6f0818b180e 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/CellCenterFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/CellCenterFunctor.H @@ -36,7 +36,7 @@ public: void operator()(amrex::MultiFab& mf_dst, int dcomp, int /*i_buffer=0*/) const override; private: /** pointer to source multifab (can be multi-component) */ - amrex::MultiFab const * const m_mf_src = nullptr; + const amrex::MultiFab* m_mf_src = nullptr; int m_lev; /**< level on which mf_src is defined (used in cylindrical) */ /**< (for cylindrical) whether to average all modes into 1 comp */ bool m_convertRZmodes2cartesian; diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H index 1d36b434ae2..347c40e0338 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/DivBFunctor.H @@ -42,7 +42,7 @@ public: private: /** Vector of pointer to source multifab Bx, By, Bz */ ablastr::fields::VectorField m_arr_mf_src; - int const m_lev; /**< level on which mf_src is defined (used in cylindrical) */ + int m_lev; /**< level on which mf_src is defined (used in cylindrical) */ /**< (for cylindrical) whether to average all modes into 1 comp */ bool m_convertRZmodes2cartesian; }; diff --git a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.H index e7691187f3a..3874ebeb6c6 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/DivEFunctor.H @@ -41,7 +41,7 @@ public: private: /** Vector of pointer to source multifab Bx, By, Bz */ ablastr::fields::VectorField m_arr_mf_src; - int const m_lev; /**< level on which mf_src is defined (used in cylindrical) */ + int m_lev; /**< level on which mf_src is defined (used in cylindrical) */ /**< (for cylindrical) whether to average all modes into 1 comp */ bool m_convertRZmodes2cartesian; }; diff --git a/Source/Diagnostics/ComputeDiagFunctors/JFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/JFunctor.H index d9f9a1e82e0..21e0d3f5034 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/JFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/JFunctor.H @@ -39,7 +39,7 @@ public: void operator()(amrex::MultiFab& mf_dst, int dcomp, int /*i_buffer=0*/) const override; private: /** direction of the current density to save */ - const int m_dir; + int m_dir; /** level on which mf_src is defined */ int m_lev; /** (for cylindrical) whether to average all modes into 1 comp */ diff --git a/Source/Diagnostics/ComputeDiagFunctors/PartPerCellFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/PartPerCellFunctor.H index 1b8785af7b7..491cd2cfe37 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/PartPerCellFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/PartPerCellFunctor.H @@ -30,7 +30,7 @@ public: */ void operator()(amrex::MultiFab& mf_dst, int dcomp, int /*i_buffer=0*/) const override; private: - int const m_lev; /**< level on which mf_src is defined */ + int m_lev; /**< level on which mf_src is defined */ }; #endif // WARPX_PARTPERCELLFUNCTOR_H_ diff --git a/Source/Diagnostics/ComputeDiagFunctors/PartPerGridFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/PartPerGridFunctor.H index 9718c9c7163..b0c3f28ab90 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/PartPerGridFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/PartPerGridFunctor.H @@ -30,7 +30,7 @@ public: */ void operator()(amrex::MultiFab& mf_dst, int dcomp, int /*i_buffer=0*/) const override; private: - int const m_lev; /**< level on which mf_src is defined */ + int m_lev; /**< level on which mf_src is defined */ }; #endif // WARPX_PARTPERGRIDFUNCTOR_H_ diff --git a/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.H index 7de9844a99e..33211900553 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/ParticleReductionFunctor.H @@ -43,10 +43,10 @@ public: */ void operator()(amrex::MultiFab& mf_dst, int dcomp, int /*i_buffer=0*/) const override; private: - int const m_lev; /**< level on which mf_src is defined */ - int const m_ispec; /**< index of species to average over */ - bool const m_do_average; /**< Whether to calculate the average of the data */ - bool const m_do_filter; /**< whether to apply #m_filter_fn */ + int m_lev; /**< level on which mf_src is defined */ + int m_ispec; /**< index of species to average over */ + bool m_do_average; /**< Whether to calculate the average of the data */ + bool m_do_filter; /**< whether to apply #m_filter_fn */ /** Parser function to be averaged by the functor. Arguments: x, y, z, ux, uy, uz */ std::unique_ptr m_map_fn_parser; /** Parser function to filter particles before pass to map. Arguments: x, y, z, ux, uy, uz */ diff --git a/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.H index bc0c8b9f270..073e6476110 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/RhoFunctor.H @@ -43,13 +43,13 @@ public: private: // Level on which source MultiFab mf_src is defined in RZ geometry - int const m_lev; + int m_lev; // Whether to apply k-space filtering of charge density in the diagnostics output in RZ PSATD bool m_apply_rz_psatd_filter; // Species index to dump rho per species - const int m_species_index; + int m_species_index; // Whether to average all modes into one component in RZ geometry bool m_convertRZmodes2cartesian; diff --git a/Source/Diagnostics/ComputeDiagFunctors/TemperatureFunctor.H b/Source/Diagnostics/ComputeDiagFunctors/TemperatureFunctor.H index f6c425e74d5..1716ab61652 100644 --- a/Source/Diagnostics/ComputeDiagFunctors/TemperatureFunctor.H +++ b/Source/Diagnostics/ComputeDiagFunctors/TemperatureFunctor.H @@ -28,8 +28,8 @@ public: */ void operator()(amrex::MultiFab& mf_dst, int dcomp, int /*i_buffer=0*/) const override; private: - int const m_lev; /**< level on which mf_src is defined */ - int const m_ispec; /**< index of species to average over */ + int m_lev; /**< level on which mf_src is defined */ + int m_ispec; /**< index of species to average over */ }; #endif // WARPX_TEMPERATUREFUNCTOR_H_ diff --git a/Source/Diagnostics/FlushFormats/FlushFormatInSitu.cpp b/Source/Diagnostics/FlushFormats/FlushFormatInSitu.cpp index be3047d7ab6..d5313d71727 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatInSitu.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatInSitu.cpp @@ -27,14 +27,14 @@ FlushFormatInSitu::WriteParticles(const amrex::Vector& particle_di // we prefix the fields with "particle_{species_name}" b/c we // want to to uniquely name all the fields that can be plotted - for (unsigned i = 0, n = particle_diags.size(); i < n; ++i) { + for (const auto & particle_diag : particle_diags) { Vector particle_varnames; Vector particle_int_varnames; - std::string prefix = "particle_" + particle_diags[i].getSpeciesName(); + std::string prefix = "particle_" + particle_diag.getSpeciesName(); // Get pc for species // auto& pc = mypc->GetParticleContainer(i); - WarpXParticleContainer* pc = particle_diags[i].getParticleContainer(); + WarpXParticleContainer* pc = particle_diag.getParticleContainer(); // get names of real comps std::map real_comps_map = pc->getParticleComps(); diff --git a/Source/Diagnostics/FullDiagnostics.cpp b/Source/Diagnostics/FullDiagnostics.cpp index 946178fd1b5..8e2ebd3886a 100644 --- a/Source/Diagnostics/FullDiagnostics.cpp +++ b/Source/Diagnostics/FullDiagnostics.cpp @@ -565,12 +565,10 @@ FullDiagnostics::AddRZModesToDiags (int lev) // Check if divE is requested // If so, all components will be written out - bool divE_requested = false; - for (int comp = 0; comp < m_varnames.size(); comp++) { - if ( m_varnames[comp] == "divE" ) { - divE_requested = true; - } - } + const bool divE_requested = std::any_of( + std::begin(m_varnames), + std::end(m_varnames), + [](const auto& varname) { return varname == "divE"; }); // If rho is requested, all components will be written out const bool rho_requested = utils::algorithms::is_in( m_varnames, "rho" ); diff --git a/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.H b/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.H index cfba804d0f0..b30d6b0bb6e 100644 --- a/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.H +++ b/Source/Diagnostics/ReducedDiags/LoadBalanceCosts.H @@ -40,9 +40,9 @@ public: * (cost, processor, level, i_low, j_low, k_low, gpu_ID [if GPU run], num_cells, num_macro_particles * note: the hostname per box is stored separately (in m_data_string) */ #ifdef AMREX_USE_GPU - const int m_nDataFields = 9; + static const int m_nDataFields = 9; #else - const int m_nDataFields = 8; + static const int m_nDataFields = 8; #endif /** used to keep track of max number of boxes over all timesteps; this allows diff --git a/Source/Diagnostics/WarpXOpenPMD.cpp b/Source/Diagnostics/WarpXOpenPMD.cpp index e38ae8c8300..2fac8ede452 100644 --- a/Source/Diagnostics/WarpXOpenPMD.cpp +++ b/Source/Diagnostics/WarpXOpenPMD.cpp @@ -531,10 +531,10 @@ WarpXOpenPMDPlot::WriteOpenPMDParticles (const amrex::Vector& part { WARPX_PROFILE("WarpXOpenPMDPlot::WriteOpenPMDParticles()"); -for (unsigned i = 0, n = particle_diags.size(); i < n; ++i) { +for (const auto & particle_diag : particle_diags) { - WarpXParticleContainer* pc = particle_diags[i].getParticleContainer(); - PinnedMemoryParticleContainer* pinned_pc = particle_diags[i].getPinnedParticleContainer(); + WarpXParticleContainer* pc = particle_diag.getParticleContainer(); + PinnedMemoryParticleContainer* pinned_pc = particle_diag.getPinnedParticleContainer(); if (isBTD || use_pinned_pc) { if (!pinned_pc->isDefined()) { continue; // Skip to the next particle container @@ -546,17 +546,17 @@ for (unsigned i = 0, n = particle_diags.size(); i < n; ++i) { pc->make_alike(); const auto mass = pc->AmIA() ? PhysConst::m_e : pc->getMass(); - RandomFilter const random_filter(particle_diags[i].m_do_random_filter, - particle_diags[i].m_random_fraction); - UniformFilter const uniform_filter(particle_diags[i].m_do_uniform_filter, - particle_diags[i].m_uniform_stride); - ParserFilter parser_filter(particle_diags[i].m_do_parser_filter, + RandomFilter const random_filter(particle_diag.m_do_random_filter, + particle_diag.m_random_fraction); + UniformFilter const uniform_filter(particle_diag.m_do_uniform_filter, + particle_diag.m_uniform_stride); + ParserFilter parser_filter(particle_diag.m_do_parser_filter, utils::parser::compileParser - (particle_diags[i].m_particle_filter_parser.get()), + (particle_diag.m_particle_filter_parser.get()), pc->getMass(), time); parser_filter.m_units = InputUnits::SI; - GeometryFilter const geometry_filter(particle_diags[i].m_do_geom_filter, - particle_diags[i].m_diag_domain); + GeometryFilter const geometry_filter(particle_diag.m_do_geom_filter, + particle_diag.m_diag_domain); if (isBTD || use_pinned_pc) { particlesConvertUnits(ConvertDirection::WarpX_to_SI, pinned_pc, mass); @@ -587,7 +587,7 @@ for (unsigned i = 0, n = particle_diags.size(); i < n; ++i) { } // Gather the electrostatic potential (phi) on the macroparticles - if ( particle_diags[i].m_plot_phi ) { + if ( particle_diag.m_plot_phi ) { storePhiOnParticles( tmp, WarpX::electrostatic_solver_id, !use_pinned_pc ); } @@ -619,7 +619,7 @@ for (unsigned i = 0, n = particle_diags.size(); i < n; ++i) { real_names[x.second+PIdx::nattribs] = detail::snakeToCamel(x.first); } // plot any "extra" fields by default - real_flags = particle_diags[i].m_plot_flags; + real_flags = particle_diag.m_plot_flags; real_flags.resize(tmp.NumRealComps(), 1); // and the names int_names.resize(tmp.NumIntComps()); @@ -634,7 +634,7 @@ for (unsigned i = 0, n = particle_diags.size(); i < n; ++i) { // real_names contains a list of all real particle attributes. // real_flags is 1 or 0, whether quantity is dumped or not. DumpToFile(&tmp, - particle_diags.at(i).getSpeciesName(), + particle_diag.getSpeciesName(), m_CurrentStep, real_flags, int_flags, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/FieldAccessorFunctors.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/FieldAccessorFunctors.H index 05b1db1fe94..ba94eab0b66 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/FieldAccessorFunctors.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceAlgorithms/FieldAccessorFunctors.H @@ -42,9 +42,9 @@ struct FieldAccessorMacroscopic } private: /** Array4 of the source field to be scaled and returned by the operator() */ - amrex::Array4 const m_field; + amrex::Array4 m_field; /** Array4 of the macroscopic parameter used to divide m_field in the operator() */ - amrex::Array4 const m_parameter; + amrex::Array4 m_parameter; }; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index ec3742d1ff8..ba6bb0e042c 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -339,7 +339,7 @@ void HybridPICModel::HybridPICSolveE ( auto& warpx = WarpX::GetInstance(); ablastr::fields::VectorField current_fp_plasma = warpx.m_fields.get_alldirs(FieldType::hybrid_current_fp_plasma, lev); - const ablastr::fields::ScalarField electron_pressure_fp = warpx.m_fields.get(FieldType::hybrid_electron_pressure_fp, lev); + auto* const electron_pressure_fp = warpx.m_fields.get(FieldType::hybrid_electron_pressure_fp, lev); // Solve E field in regular cells warpx.get_pointer_fdtd_solver_fp(lev)->HybridPICSolveE( diff --git a/Source/Particles/AddPlasmaUtilities.H b/Source/Particles/AddPlasmaUtilities.H index bb05d7be3c8..824e3e10955 100644 --- a/Source/Particles/AddPlasmaUtilities.H +++ b/Source/Particles/AddPlasmaUtilities.H @@ -334,8 +334,8 @@ struct QEDHelper amrex::ParticleReal* p_optical_depth_QSR = nullptr; amrex::ParticleReal* p_optical_depth_BW = nullptr; - const bool has_quantum_sync; - const bool has_breit_wheeler; + bool has_quantum_sync; + bool has_breit_wheeler; QuantumSynchrotronGetOpticalDepth quantum_sync_get_opt; BreitWheelerGetOpticalDepth breit_wheeler_get_opt; diff --git a/Source/Particles/ElementaryProcess/QEDPairGeneration.H b/Source/Particles/ElementaryProcess/QEDPairGeneration.H index 180fdf0fb35..f1beb8363a7 100644 --- a/Source/Particles/ElementaryProcess/QEDPairGeneration.H +++ b/Source/Particles/ElementaryProcess/QEDPairGeneration.H @@ -172,7 +172,7 @@ public: private: - const BreitWheelerGeneratePairs + BreitWheelerGeneratePairs m_generate_functor; /*!< A copy of the functor to generate pairs. It contains only pointers to the lookup tables.*/ GetParticlePosition m_get_position; diff --git a/Source/Particles/ElementaryProcess/QEDPhotonEmission.H b/Source/Particles/ElementaryProcess/QEDPhotonEmission.H index 514526374bd..0b6836a38bc 100644 --- a/Source/Particles/ElementaryProcess/QEDPhotonEmission.H +++ b/Source/Particles/ElementaryProcess/QEDPhotonEmission.H @@ -178,12 +178,12 @@ public: } private: - const QuantumSynchrotronGetOpticalDepth + QuantumSynchrotronGetOpticalDepth m_opt_depth_functor; /*!< A copy of the functor to initialize the optical depth of the source species. */ - const int m_opt_depth_runtime_comp = 0; /*!< Index of the optical depth component of source species*/ + int m_opt_depth_runtime_comp = 0; /*!< Index of the optical depth component of source species*/ - const QuantumSynchrotronPhotonEmission + QuantumSynchrotronPhotonEmission m_emission_functor; /*!< A copy of the functor to generate photons. It contains only pointers to the lookup tables.*/ GetParticlePosition m_get_position; diff --git a/Source/Particles/ElementaryProcess/QEDSchwingerProcess.H b/Source/Particles/ElementaryProcess/QEDSchwingerProcess.H index 32b58dc50dc..e7eb7e8be04 100644 --- a/Source/Particles/ElementaryProcess/QEDSchwingerProcess.H +++ b/Source/Particles/ElementaryProcess/QEDSchwingerProcess.H @@ -17,9 +17,9 @@ */ struct SchwingerFilterFunc { - const int m_threshold_poisson_gaussian; - const amrex::Real m_dV; - const amrex::Real m_dt; + int m_threshold_poisson_gaussian; + amrex::Real m_dV; + amrex::Real m_dt; /** Get the number of created pairs in a given cell at a given timestep. * @@ -59,8 +59,8 @@ struct SchwingerFilterFunc */ struct SchwingerTransformFunc { - const amrex::Real m_y_size; - const int m_weight_index; + amrex::Real m_y_size; + int m_weight_index; /** Assign a weight to particles created via the Schwinger process. * diff --git a/Source/Particles/Filter/FilterFunctors.H b/Source/Particles/Filter/FilterFunctors.H index 982eeb0d23a..9d2b5f67a64 100644 --- a/Source/Particles/Filter/FilterFunctors.H +++ b/Source/Particles/Filter/FilterFunctors.H @@ -50,8 +50,8 @@ struct RandomFilter return ( (!m_is_active) || (amrex::Random(engine) < m_fraction) ); } private: - const bool m_is_active; //! select all particles if false - const amrex::Real m_fraction = 1.0; //! range: [0.0:1.0] where 0 is no & 1 is all particles + bool m_is_active; //! select all particles if false + amrex::Real m_fraction = 1.0; //! range: [0.0:1.0] where 0 is no & 1 is all particles }; /** @@ -77,8 +77,8 @@ struct UniformFilter return ( (!m_is_active) || ( p.id()%m_stride == 0 ) ); } private: - const bool m_is_active; //! select all particles if false - const int m_stride = 0; //! selection of every n-th particle + bool m_is_active; //! select all particles if false + int m_stride = 0; //! selection of every n-th particle }; /** @@ -134,10 +134,10 @@ struct ParserFilter } private: /** Whether this diagnostics is activated. Select all particles if false */ - const bool m_is_active; + bool m_is_active; public: /** Parser function with 7 input variables, t,x,y,z,ux,uy,uz */ - amrex::ParserExecutor<7> const m_function_partparser; + amrex::ParserExecutor<7> m_function_partparser; /** Mass of particle species */ amrex::ParticleReal m_mass; /** Store physical time on the coarsest level. */ @@ -171,9 +171,9 @@ struct GeometryFilter } private: /** Whether this diagnostics is activated. Select all particles if false */ - const bool m_is_active; + bool m_is_active; /** Physical extent of the axis-aligned region used for particle check */ - const amrex::RealBox m_domain; + amrex::RealBox m_domain; }; #endif // WARPX_FILTERFUNCTORS_H diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index 619b54ed7ad..c6724b5185a 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -88,7 +88,7 @@ namespace /** A little collection to transport six Array4 that point to the EM fields */ struct MyFieldList { - Array4< amrex::Real const > const Ex, Ey, Ez, Bx, By, Bz; + Array4< amrex::Real const > Ex, Ey, Ez, Bx, By, Bz; }; } @@ -223,7 +223,7 @@ MultiParticleContainer::ReadParameters () pp_particles, "repeated_plasma_lens_lengths", h_repeated_plasma_lens_lengths); - const int n_lenses = static_cast(h_repeated_plasma_lens_starts.size()); + const auto n_lenses = static_cast(h_repeated_plasma_lens_starts.size()); d_repeated_plasma_lens_starts.resize(n_lenses); d_repeated_plasma_lens_lengths.resize(n_lenses); amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, diff --git a/Source/Particles/ParticleBoundaryBuffer.cpp b/Source/Particles/ParticleBoundaryBuffer.cpp index a1f1c46d894..dbe5dea7085 100644 --- a/Source/Particles/ParticleBoundaryBuffer.cpp +++ b/Source/Particles/ParticleBoundaryBuffer.cpp @@ -50,11 +50,11 @@ struct IsOutsideDomainBoundary { }; struct FindEmbeddedBoundaryIntersection { - const int m_step_index; - const int m_delta_index; - const int m_normal_index; - const int m_step; - const amrex::Real m_dt; + int m_step_index; + int m_delta_index; + int m_normal_index; + int m_step; + amrex::Real m_dt; amrex::Array4 m_phiarr; amrex::GpuArray m_dxi; amrex::GpuArray m_plo; @@ -173,7 +173,7 @@ struct CopyAndTimestamp { int m_delta_index; int m_normal_index; int m_step; - const amrex::Real m_dt; + amrex::Real m_dt; int m_idim; int m_iside; diff --git a/Source/Particles/ParticleCreation/SmartCreate.H b/Source/Particles/ParticleCreation/SmartCreate.H index fe4cb5929e0..d93624b6433 100644 --- a/Source/Particles/ParticleCreation/SmartCreate.H +++ b/Source/Particles/ParticleCreation/SmartCreate.H @@ -35,7 +35,7 @@ struct SmartCreate { const InitializationPolicy* m_policy_real; const InitializationPolicy* m_policy_int; - const int m_weight_index = 0; + int m_weight_index = 0; template AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE diff --git a/Source/Particles/Sorting/SortingUtils.H b/Source/Particles/Sorting/SortingUtils.H index ba7761bf48a..49366e888ae 100644 --- a/Source/Particles/Sorting/SortingUtils.H +++ b/Source/Particles/Sorting/SortingUtils.H @@ -174,9 +174,9 @@ class fillBufferFlagRemainingParticles amrex::GpuArray m_inv_cell_size; amrex::Box m_domain; int* m_inexflag_ptr; - WarpXParticleContainer::ParticleTileType::ConstParticleTileDataType const m_ptd; + WarpXParticleContainer::ParticleTileType::ConstParticleTileDataType m_ptd; amrex::Array4 m_buffer_mask; - int const m_start_index; + int m_start_index; int const* m_indices_ptr; }; diff --git a/Source/ablastr/fields/Interpolate.H b/Source/ablastr/fields/Interpolate.H index a9f0a7fc75f..e5121215393 100644 --- a/Source/ablastr/fields/Interpolate.H +++ b/Source/ablastr/fields/Interpolate.H @@ -46,9 +46,9 @@ namespace ablastr::fields::details { 0, m_refratio); } - amrex::Array4 const m_phi_fp_arr; - amrex::Array4 const m_phi_cp_arr; - amrex::IntVect const m_refratio; + amrex::Array4 m_phi_fp_arr; + amrex::Array4 m_phi_cp_arr; + amrex::IntVect m_refratio; }; } // namespace ablastr::fields::details diff --git a/Source/ablastr/utils/SignalHandling.cpp b/Source/ablastr/utils/SignalHandling.cpp index 5eeaeec259f..bf4874b4536 100644 --- a/Source/ablastr/utils/SignalHandling.cpp +++ b/Source/ablastr/utils/SignalHandling.cpp @@ -37,7 +37,7 @@ SignalHandling::parseSignalNameToNumber (const std::string &str) #if defined(__linux__) || defined(__APPLE__) const struct { const char* abbrev; - const int value; + int value; } signals_to_parse[] = { {"ABRT", SIGABRT}, {"ALRM", SIGALRM}, diff --git a/Source/ablastr/utils/msg_logger/MsgLogger.H b/Source/ablastr/utils/msg_logger/MsgLogger.H index 401432f5dda..2497bdcfae7 100644 --- a/Source/ablastr/utils/msg_logger/MsgLogger.H +++ b/Source/ablastr/utils/msg_logger/MsgLogger.H @@ -280,9 +280,9 @@ namespace ablastr::utils::msg_logger #endif - const int m_rank /*! MPI rank of the current process*/; - const int m_num_procs /*! Number of MPI ranks*/; - const int m_io_rank /*! Rank of the I/O process*/; + int m_rank /*! MPI rank of the current process*/; + int m_num_procs /*! Number of MPI ranks*/; + int m_io_rank /*! Rank of the I/O process*/; std::map m_messages /*! This stores a map to associate warning messages with the corresponding counters*/; }; diff --git a/Tools/Linter/runClangTidy.sh b/Tools/Linter/runClangTidy.sh index 046c72d7b27..262d713cac6 100755 --- a/Tools/Linter/runClangTidy.sh +++ b/Tools/Linter/runClangTidy.sh @@ -55,13 +55,13 @@ ${CTIDY} --version echo echo "This can be overridden by setting the environment" echo "variables CLANG, CLANGXX, and CLANGTIDY e.g.: " -echo "$ export CLANG=clang-15" -echo "$ export CLANGXX=clang++-15" -echo "$ export CTIDCLANGTIDYY=clang-tidy-15" +echo "$ export CLANG=clang-16" +echo "$ export CLANGXX=clang++-16" +echo "$ export CTIDCLANGTIDYY=clang-tidy-16" echo "$ ./Tools/Linter/runClangTidy.sh" echo echo "******************************************************" -echo "* Warning: clang v15 is currently used in CI tests. *" +echo "* Warning: clang v16 is currently used in CI tests. *" echo "* It is therefore recommended to use this version. *" echo "* Otherwise, a newer version may find issues not *" echo "* currently covered by CI tests while older versions *" From 962829d1781f80c05a8a3dc14e8e5ae0ad43da54 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 29 Jan 2025 18:43:00 +0100 Subject: [PATCH 193/278] WarpX class: evolve_scheme no longer a static variable (#5616) This PR contributes to reducing the usage of static variables in the WarpX class. --- Source/WarpX.H | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Source/WarpX.H b/Source/WarpX.H index f500347febc..3d7835de58b 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -197,7 +197,7 @@ public: //! Integer that corresponds to the type of Maxwell solver (Yee, CKC, PSATD, ECT) static inline auto electromagnetic_solver_id = ElectromagneticSolverAlgo::Default; //! Integer that corresponds to the evolve scheme (explicit, semi_implicit_em, theta_implicit_em) - static inline auto evolve_scheme = EvolveScheme::Default; + EvolveScheme evolve_scheme = EvolveScheme::Default; //! Maximum iterations used for self-consistent particle update in implicit particle-suppressed evolve schemes static int max_particle_its_in_implicit_scheme; //! Relative tolerance used for self-consistent particle update in implicit particle-suppressed evolve schemes From 9f2d0f94b54835cb1fa88bc6a9ea8ef76899e398 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 29 Jan 2025 18:43:29 +0100 Subject: [PATCH 194/278] WarpX class: ProjectionCleanDivB no longer static (#5615) This PR contributes to reducing the usage of static member functions and static variables in the WarpX class. --- Source/Python/WarpX.cpp | 4 ++-- Source/WarpX.H | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 01ab2d3e48f..870a3a87c91 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -266,8 +266,8 @@ The physical fields in WarpX have the following naming: py::arg("potential"), "Sets the EB potential string and updates the function parser." ) - .def_static("run_div_cleaner", - [] () { WarpX::ProjectionCleanDivB(); }, + .def("run_div_cleaner", + [] (WarpX& wx) { wx.ProjectionCleanDivB(); }, "Executes projection based divergence cleaner on loaded Bfield_fp_external." ) .def("synchronize", diff --git a/Source/WarpX.H b/Source/WarpX.H index 3d7835de58b..fec12affecd 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -849,7 +849,7 @@ public: void ComputeDivE(amrex::MultiFab& divE, int lev); - static void ProjectionCleanDivB (); + void ProjectionCleanDivB (); [[nodiscard]] amrex::IntVect getngEB() const { return guard_cells.ng_alloc_EB; } [[nodiscard]] amrex::IntVect getngF() const { return guard_cells.ng_alloc_F; } From fc37679567f417b3e53c8d99073ef666867d63ae Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 29 Jan 2025 18:43:56 +0100 Subject: [PATCH 195/278] Remove unused SliceDiagnostic.H/cpp (#5617) The functions defined in `SliceDiagnostic.H` and implemented in ` SliceDiagnostic.cpp` are never used in WarpX. Therefore, this PR removes these two source files (and updates `CMakeLists.txt` and `Make.package` accordingly) --- Source/Diagnostics/CMakeLists.txt | 1 - Source/Diagnostics/Make.package | 1 - Source/Diagnostics/SliceDiagnostic.H | 38 -- Source/Diagnostics/SliceDiagnostic.cpp | 526 ------------------------- 4 files changed, 566 deletions(-) delete mode 100644 Source/Diagnostics/SliceDiagnostic.H delete mode 100644 Source/Diagnostics/SliceDiagnostic.cpp diff --git a/Source/Diagnostics/CMakeLists.txt b/Source/Diagnostics/CMakeLists.txt index 376487dc94a..d899bd5e155 100644 --- a/Source/Diagnostics/CMakeLists.txt +++ b/Source/Diagnostics/CMakeLists.txt @@ -7,7 +7,6 @@ foreach(D IN LISTS WarpX_DIMS) FullDiagnostics.cpp MultiDiagnostics.cpp ParticleIO.cpp - SliceDiagnostic.cpp WarpXIO.cpp WarpXOpenPMD.cpp BTDiagnostics.cpp diff --git a/Source/Diagnostics/Make.package b/Source/Diagnostics/Make.package index 75b41fba5e8..28afdb35290 100644 --- a/Source/Diagnostics/Make.package +++ b/Source/Diagnostics/Make.package @@ -4,7 +4,6 @@ CEXE_sources += FullDiagnostics.cpp CEXE_sources += WarpXIO.cpp CEXE_sources += ParticleIO.cpp CEXE_sources += FieldIO.cpp -CEXE_sources += SliceDiagnostic.cpp CEXE_sources += BTDiagnostics.cpp CEXE_sources += BoundaryScrapingDiagnostics.cpp CEXE_sources += BTD_Plotfile_Header_Impl.cpp diff --git a/Source/Diagnostics/SliceDiagnostic.H b/Source/Diagnostics/SliceDiagnostic.H deleted file mode 100644 index 570f86d5384..00000000000 --- a/Source/Diagnostics/SliceDiagnostic.H +++ /dev/null @@ -1,38 +0,0 @@ -/* Copyright 2019 Revathi Jambunathan - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ -#ifndef WARPX_SliceDiagnostic_H_ -#define WARPX_SliceDiagnostic_H_ - -#include - -#include - -#include - -std::unique_ptr CreateSlice( const amrex::MultiFab& mf, - const amrex::Vector &dom_geom, - amrex::RealBox &slice_realbox, - amrex::IntVect &slice_cr_ratio ); - -void CheckSliceInput( amrex::RealBox real_box, - amrex::RealBox &slice_cc_nd_box, amrex::RealBox &slice_realbox, - amrex::IntVect &slice_cr_ratio, amrex::Vector dom_geom, - amrex::IntVect SliceType, amrex::IntVect &slice_lo, - amrex::IntVect &slice_hi, amrex::IntVect &interp_lo); - -void InterpolateSliceValues( amrex::MultiFab& smf, - amrex::IntVect interp_lo, amrex::RealBox slice_realbox, - const amrex::Vector& geom, int ncomp, int nghost, - amrex::IntVect slice_lo, amrex::IntVect slice_hi, - amrex::IntVect SliceType, amrex::RealBox real_box); - -void InterpolateLo( const amrex::Box& bx, amrex::FArrayBox &fabox, - amrex::IntVect slice_lo, amrex::Vector geom, - int idir, amrex::IntVect IndType, amrex::RealBox slice_realbox, - int srccomp, int ncomp, int nghost, amrex::RealBox real_box); - -#endif diff --git a/Source/Diagnostics/SliceDiagnostic.cpp b/Source/Diagnostics/SliceDiagnostic.cpp deleted file mode 100644 index bcb6070abdf..00000000000 --- a/Source/Diagnostics/SliceDiagnostic.cpp +++ /dev/null @@ -1,526 +0,0 @@ -/* Copyright 2019-2020 Luca Fedeli, Revathi Jambunathan, Weiqun Zhang - * - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ -#include "SliceDiagnostic.H" - -#include "Fields.H" -#include "Utils/TextMsg.H" -#include "WarpX.H" - -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include - -using namespace amrex; -using warpx::fields::FieldType; - -/* \brief - * The functions creates the slice for diagnostics based on the user-input. - * The slice can be 1D, 2D, or 3D and it inherits the index type of the underlying data. - * The implementation assumes that the slice is aligned with the coordinate axes. - * The input parameters are modified if the user-input does not comply with requirements of coarsenability or if the slice extent is not contained within the simulation domain. - * First a slice multifab (smf) with cell size equal to that of the simulation grid is created such that it extends from slice.dim_lo to slice.dim_hi and shares the same index space as the source multifab (mf) - * The values are copied from src mf to dst smf using amrex::ParallelCopy - * If interpolation is required, then on the smf, using data points stored in the ghost cells, the data in interpolated. - * If coarsening is required, then a coarse slice multifab is generated (cs_mf) and the - * values of the refined slice (smf) is averaged down to obtain the coarse slice. - * \param mf is the source multifab containing the field data - * \param dom_geom is the geometry of the domain and used in the function to obtain the - * CellSize of the underlying grid. - * \param slice_realbox defines the extent of the slice - * \param slice_cr_ratio provides the coarsening ratio for diagnostics - */ - -std::unique_ptr -CreateSlice( const MultiFab& mf, const Vector &dom_geom, - RealBox &slice_realbox, IntVect &slice_cr_ratio ) -{ - std::unique_ptr smf; - std::unique_ptr cs_mf; - - Vector slice_ncells(AMREX_SPACEDIM); - const int nghost = 1; - auto nlevels = static_cast(dom_geom.size()); - const int ncomp = (mf).nComp(); - - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( nlevels==1, - "Slice diagnostics does not work with mesh refinement yet (TO DO)."); - - const auto conversionType = (mf).ixType(); - IntVect SliceType(AMREX_D_DECL(0,0,0)); - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim ) - { - SliceType[idim] = conversionType.nodeCentered(idim); - } - - const RealBox& real_box = dom_geom[0].ProbDomain(); - RealBox slice_cc_nd_box; - const int default_grid_size = 32; - int slice_grid_size = default_grid_size; - - bool interpolate = false; - bool coarsen = false; - - // same index space as domain // - IntVect slice_lo(AMREX_D_DECL(0,0,0)); - IntVect slice_hi(AMREX_D_DECL(1,1,1)); - IntVect interp_lo(AMREX_D_DECL(0,0,0)); - - CheckSliceInput(real_box, slice_cc_nd_box, slice_realbox, slice_cr_ratio, - dom_geom, SliceType, slice_lo, - slice_hi, interp_lo); - int configuration_dim = 0; - // Determine if interpolation is required and number of cells in slice // - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - - // Flag for interpolation if required // - if ( interp_lo[idim] == 1) { - interpolate = true; - } - - // For the case when a dimension is reduced // - if ( ( slice_hi[idim] - slice_lo[idim]) == 1) { - slice_ncells[idim] = 1; - } - else { - slice_ncells[idim] = ( slice_hi[idim] - slice_lo[idim] + 1 ) - / slice_cr_ratio[idim]; - - const int refined_ncells = slice_hi[idim] - slice_lo[idim] + 1 ; - if ( slice_cr_ratio[idim] > 1) { - coarsen = true; - - // modify slice_grid_size if >= refines_cells // - if ( slice_grid_size >= refined_ncells ) { - slice_grid_size = refined_ncells - 1; - } - - } - configuration_dim += 1; - } - } - if (configuration_dim==1) { - ablastr::warn_manager::WMRecordWarning("Diagnostics", - "The slice configuration is 1D and cannot be visualized using yt."); - } - - // Slice generation with index type inheritance // - const Box slice(slice_lo, slice_hi); - - Vector sba(1); - sba[0].define(slice); - sba[0].maxSize(slice_grid_size); - - // Distribution mapping for slice can be different from that of domain // - Vector sdmap(1); - sdmap[0] = DistributionMapping{sba[0]}; - - smf = std::make_unique(amrex::convert(sba[0],SliceType), sdmap[0], - ncomp, nghost); - - // Copy data from domain to slice that has same cell size as that of // - // the domain mf. src and dst have the same number of ghost cells // - const amrex::IntVect nghost_vect(AMREX_D_DECL(nghost, nghost, nghost)); - ablastr::utils::communication::ParallelCopy(*smf, mf, 0, 0, ncomp, nghost_vect, nghost_vect, WarpX::do_single_precision_comms); - - // interpolate if required on refined slice // - if (interpolate) { - InterpolateSliceValues( *smf, interp_lo, slice_cc_nd_box, dom_geom, - ncomp, nghost, slice_lo, slice_hi, SliceType, real_box); - } - - - if (!coarsen) { - return smf; - } - else { - Vector crse_ba(1); - crse_ba[0] = sba[0]; - crse_ba[0].coarsen(slice_cr_ratio); - - AMREX_ALWAYS_ASSERT(crse_ba[0].size() == sba[0].size()); - - cs_mf = std::make_unique(amrex::convert(crse_ba[0],SliceType), - sdmap[0], ncomp,nghost); - - const MultiFab& mfSrc = *smf; - MultiFab& mfDst = *cs_mf; - - auto & warpx = WarpX::GetInstance(); - - using ablastr::fields::Direction; - - MFIter mfi_dst(mfDst); - for (MFIter mfi(mfSrc); mfi.isValid(); ++mfi) { - - Array4 const& Src_fabox = mfSrc.const_array(mfi); - - const Box& Dst_bx = mfi_dst.validbox(); - Array4 const& Dst_fabox = mfDst.array(mfi_dst); - - const int scomp = 0; - const int dcomp = 0; - - const IntVect cctype(AMREX_D_DECL(0,0,0)); - if( SliceType==cctype ) { - amrex::amrex_avgdown(Dst_bx, Dst_fabox, Src_fabox, dcomp, scomp, - ncomp, slice_cr_ratio); - } - const IntVect ndtype(AMREX_D_DECL(1,1,1)); - if( SliceType == ndtype ) { - amrex::amrex_avgdown_nodes(Dst_bx, Dst_fabox, Src_fabox, dcomp, - scomp, ncomp, slice_cr_ratio); - } - if( SliceType == warpx.m_fields.get(FieldType::Efield_aux, Direction{0}, 0)->ixType().toIntVect() ) { - amrex::amrex_avgdown_edges(Dst_bx, Dst_fabox, Src_fabox, dcomp, - scomp, ncomp, slice_cr_ratio, 0); - } - if( SliceType == warpx.m_fields.get(FieldType::Efield_aux, Direction{1}, 0)->ixType().toIntVect() ) { - amrex::amrex_avgdown_edges(Dst_bx, Dst_fabox, Src_fabox, dcomp, - scomp, ncomp, slice_cr_ratio, 1); - } - if( SliceType == warpx.m_fields.get(FieldType::Efield_aux, Direction{2}, 0)->ixType().toIntVect() ) { - amrex::amrex_avgdown_edges(Dst_bx, Dst_fabox, Src_fabox, dcomp, - scomp, ncomp, slice_cr_ratio, 2); - } - if( SliceType == warpx.m_fields.get(FieldType::Bfield_aux, Direction{0}, 0)->ixType().toIntVect() ) { - amrex::amrex_avgdown_faces(Dst_bx, Dst_fabox, Src_fabox, dcomp, - scomp, ncomp, slice_cr_ratio, 0); - } - if( SliceType == warpx.m_fields.get(FieldType::Bfield_aux, Direction{1}, 0)->ixType().toIntVect() ) { - amrex::amrex_avgdown_faces(Dst_bx, Dst_fabox, Src_fabox, dcomp, - scomp, ncomp, slice_cr_ratio, 1); - } - if( SliceType == warpx.m_fields.get(FieldType::Bfield_aux, Direction{2}, 0)->ixType().toIntVect() ) { - amrex::amrex_avgdown_faces(Dst_bx, Dst_fabox, Src_fabox, dcomp, - scomp, ncomp, slice_cr_ratio, 2); - } - - if ( mfi_dst.isValid() ) { - ++mfi_dst; - } - - } - return cs_mf; - } -} - - -/* \brief - * This function modifies the slice input parameters under certain conditions. - * The coarsening ratio, slice_cr_ratio is modified if the input is not an exponent of 2. - * for example, if the coarsening ratio is 3, 5 or 6, which is not an exponent of 2, - * then the value of coarsening ratio is modified to the nearest exponent of 2. - * The default value for coarsening ratio is 1. - * slice_realbox.lo and slice_realbox.hi are set equal to the simulation domain lo and hi - * if for the user-input for the slice lo and hi coordinates are outside the domain. - * If the slice_realbox.lo and slice_realbox.hi coordinates do not align with the data - * points and the number of cells in that dimension is greater than 1, and if the extent of - * the slice in that dimension is not coarsenable, then the value lo and hi coordinates are - * shifted to the nearest coarsenable point to include some extra data points in the slice. - * If slice_realbox.lo==slice_realbox.hi, then that dimension has only one cell and no - * modifications are made to the value. If the lo and hi do not align with a data point, - * then it is flagged for interpolation. - * \param real_box a Real box defined for the underlying domain. - * \param slice_realbox a Real box for defining the slice dimension. - * \param slice_cc_nd_box a Real box for defining the modified lo and hi of the slice - * such that the coordinates align with the underlying data points. - * If the dimension is reduced to have only one cell, the slice_realbox is not modified and * instead the values are interpolated to the coordinate from the nearest data points. - * \param slice_cr_ratio contains values of the coarsening ratio which may be modified - * if the input values do not satisfy coarsenability conditions. - * \param slice_lo and slice_hi are the index values of the slice - * \param interp_lo are set to 0 or 1 if they are flagged for interpolation. - * The slice shares the same index space as that of the simulation domain. - */ - - -void -CheckSliceInput( const RealBox real_box, RealBox &slice_cc_nd_box, - RealBox &slice_realbox, IntVect &slice_cr_ratio, - Vector dom_geom, IntVect const SliceType, - IntVect &slice_lo, IntVect &slice_hi, IntVect &interp_lo) -{ - - IntVect slice_lo2(AMREX_D_DECL(0,0,0)); - for ( int idim = 0; idim < AMREX_SPACEDIM; ++idim) - { - // Modify coarsening ratio if the input value is not an exponent of 2 for AMR // - if ( slice_cr_ratio[idim] > 0 ) { - const int log_cr_ratio = - static_cast(floor ( log2( double(slice_cr_ratio[idim])))); - slice_cr_ratio[idim] = - static_cast (exp2( double(log_cr_ratio) )); - } - - //// Default coarsening ratio is 1 // - // Modify lo if input is out of bounds // - if ( slice_realbox.lo(idim) < real_box.lo(idim) ) { - slice_realbox.setLo( idim, real_box.lo(idim)); - std::stringstream warnMsg; - warnMsg << " slice lo is out of bounds. " << - " Modified it in dimension " << idim << - " to be aligned with the domain box."; - ablastr::warn_manager::WMRecordWarning("Diagnostics", - warnMsg.str(), ablastr::warn_manager::WarnPriority::low); - } - - // Modify hi if input in out od bounds // - if ( slice_realbox.hi(idim) > real_box.hi(idim) ) { - slice_realbox.setHi( idim, real_box.hi(idim)); - std::stringstream warnMsg; - warnMsg << " slice hi is out of bounds. " << - " Modified it in dimension " << idim << - " to be aligned with the domain box."; - ablastr::warn_manager::WMRecordWarning("Diagnostics", - warnMsg.str(), ablastr::warn_manager::WarnPriority::low); - } - - const auto very_small_number = 1E-10; - - // Factor to ensure index values computation depending on index type // - const double fac = ( 1.0 - SliceType[idim] )*dom_geom[0].CellSize(idim)*0.5; - // if dimension is reduced to one cell length // - if ( slice_realbox.hi(idim) - slice_realbox.lo(idim) <= 0) - { - slice_cc_nd_box.setLo( idim, slice_realbox.lo(idim) ); - slice_cc_nd_box.setHi( idim, slice_realbox.hi(idim) ); - - if ( slice_cr_ratio[idim] > 1) { slice_cr_ratio[idim] = 1; } - - // check for interpolation -- compute index lo with floor and ceil - if ( slice_cc_nd_box.lo(idim) - real_box.lo(idim) >= fac ) { - slice_lo[idim] = static_cast( - floor( ( (slice_cc_nd_box.lo(idim) - - (real_box.lo(idim) + fac ) ) - / dom_geom[0].CellSize(idim)) + fac * very_small_number) ); - slice_lo2[idim] = static_cast( - ceil( ( (slice_cc_nd_box.lo(idim) - - (real_box.lo(idim) + fac) ) - / dom_geom[0].CellSize(idim)) - fac * very_small_number) ); - } - else { - slice_lo[idim] = static_cast( - std::round( (slice_cc_nd_box.lo(idim) - - (real_box.lo(idim) ) ) - / dom_geom[0].CellSize(idim)) ); - slice_lo2[idim] = static_cast( - std::ceil((slice_cc_nd_box.lo(idim) - - (real_box.lo(idim) ) ) - / dom_geom[0].CellSize(idim) ) ); - } - - // flag for interpolation -- if reduced dimension location // - // does not align with data point // - if ( slice_lo[idim] == slice_lo2[idim]) { - if ( slice_cc_nd_box.lo(idim) - real_box.lo(idim) < fac ) { - interp_lo[idim] = 1; - } - } - else { - interp_lo[idim] = 1; - } - - // ncells = 1 if dimension is reduced // - slice_hi[idim] = slice_lo[idim] + 1; - } - else - { - // moving realbox.lo and realbox.hi to nearest coarsenable grid point // - auto index_lo = static_cast(floor(((slice_realbox.lo(idim) + very_small_number - - (real_box.lo(idim))) / dom_geom[0].CellSize(idim))) ); - auto index_hi = static_cast(ceil(((slice_realbox.hi(idim) - very_small_number - - (real_box.lo(idim))) / dom_geom[0].CellSize(idim))) ); - - bool modify_cr = true; - - while ( modify_cr ) { - int lo_new = index_lo; - int hi_new = index_hi; - const int mod_lo = index_lo % slice_cr_ratio[idim]; - const int mod_hi = index_hi % slice_cr_ratio[idim]; - modify_cr = false; - - // To ensure that the index.lo is coarsenable // - if ( mod_lo > 0) { - lo_new = index_lo - mod_lo; - } - // To ensure that the index.hi is coarsenable // - if ( mod_hi > 0) { - hi_new = index_hi + (slice_cr_ratio[idim] - mod_hi); - } - - // If modified index.hi is > baselinebox.hi, move the point // - // to the previous coarsenable point // - const auto small_number = 0.01; - if ( (hi_new * dom_geom[0].CellSize(idim)) - > real_box.hi(idim) - real_box.lo(idim) + dom_geom[0].CellSize(idim)*small_number) - { - hi_new = index_hi - mod_hi; - } - - if ( (hi_new - lo_new) == 0 ){ - std::stringstream warnMsg; - warnMsg << " Coarsening ratio " << slice_cr_ratio[idim] << " in dim "<< idim << - "is leading to zero cells for slice." << " Thus reducing cr_ratio by half.\n"; - - ablastr::warn_manager::WMRecordWarning("Diagnostics", - warnMsg.str()); - - slice_cr_ratio[idim] = slice_cr_ratio[idim]/2; - modify_cr = true; - } - - if ( !modify_cr ) { - index_lo = lo_new; - index_hi = hi_new; - } - slice_lo[idim] = index_lo; - slice_hi[idim] = index_hi - 1; // since default is cell-centered - } - slice_realbox.setLo( idim, index_lo * dom_geom[0].CellSize(idim) - + real_box.lo(idim) ); - slice_realbox.setHi( idim, index_hi * dom_geom[0].CellSize(idim) - + real_box.lo(idim) ); - slice_cc_nd_box.setLo( idim, slice_realbox.lo(idim) + Real(fac) ); - slice_cc_nd_box.setHi( idim, slice_realbox.hi(idim) - Real(fac) ); - } - } -} - - -/* \brief - * This function is called if the coordinates of the slice do not align with data points - * \param interp_lo is an IntVect which is flagged as 1, if interpolation - is required in the dimension. - */ -void -InterpolateSliceValues(MultiFab& smf, IntVect interp_lo, RealBox slice_realbox, - const Vector& geom, int ncomp, int nghost, - IntVect slice_lo, IntVect /*slice_hi*/, IntVect SliceType, - const RealBox real_box) -{ - for (MFIter mfi(smf); mfi.isValid(); ++mfi) - { - const Box& bx = mfi.tilebox(); - FArrayBox& fabox = smf[mfi]; - - for ( int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - if ( interp_lo[idim] == 1 ) { - InterpolateLo( bx, fabox, slice_lo, geom, idim, SliceType, - slice_realbox, 0, ncomp, nghost, real_box); - } - } - } - -} - -void -InterpolateLo(const Box& bx, FArrayBox &fabox, IntVect slice_lo, - Vector geom, int idir, IntVect IndType, - RealBox slice_realbox, int srccomp, int ncomp, - int /*nghost*/, const RealBox real_box ) -{ - auto fabarr = fabox.array(); - const auto lo = amrex::lbound(bx); - const auto hi = amrex::ubound(bx); - const double fac = ( 1.0-IndType[idir] )*geom[0].CellSize(idir) * 0.5; - const int imin = slice_lo[idir]; - const double minpos = imin*geom[0].CellSize(idir) + fac + real_box.lo(idir); - const double maxpos = (imin+1)*geom[0].CellSize(idir) + fac + real_box.lo(idir); - const double slice_minpos = slice_realbox.lo(idir) ; - - switch (idir) { - case 0: - { - if ( imin >= lo.x && imin <= lo.x) { - for (int n = srccomp; n < srccomp + ncomp; ++n) { - for (int k = lo.z; k <= hi.z; ++k) { - for (int j = lo.y; j <= hi.y; ++j) { - for (int i = lo.x; i <= hi.x; ++i) { - const double minval = fabarr(i,j,k,n); - const double maxval = fabarr(i+1,j,k,n); - const double ratio = (maxval - minval) / (maxpos - minpos); - const double xdiff = slice_minpos - minpos; - const double newval = minval + xdiff * ratio; - fabarr(i,j,k,n) = static_cast(newval); - } - } - } - } - } - break; - } - case 1: - { - if ( imin >= lo.y && imin <= lo.y) { - for (int n = srccomp; n < srccomp+ncomp; ++n) { - for (int k = lo.z; k <= hi.z; ++k) { - for (int j = lo.y; j <= hi.y; ++j) { - for (int i = lo.x; i <= hi.x; ++i) { - const double minval = fabarr(i,j,k,n); - const double maxval = fabarr(i,j+1,k,n); - const double ratio = (maxval - minval) / (maxpos - minpos); - const double xdiff = slice_minpos - minpos; - const double newval = minval + xdiff * ratio; - fabarr(i,j,k,n) = static_cast(newval); - } - } - } - } - } - break; - } - case 2: - { - if ( imin >= lo.z && imin <= lo.z) { - for (int n = srccomp; n < srccomp+ncomp; ++n) { - for (int k = lo.z; k <= hi.z; ++k) { - for (int j = lo.y; j <= hi.y; ++j) { - for (int i = lo.x; i <= hi.x; ++i) { - const double minval = fabarr(i,j,k,n); - const double maxval = fabarr(i,j,k+1,n); - const double ratio = (maxval - minval) / (maxpos - minpos); - const double xdiff = slice_minpos - minpos; - const double newval = minval + xdiff * ratio; - fabarr(i,j,k,n) = static_cast(newval); - } - } - } - } - } - break; - } - - } - -} From 554a2fff6ee4ce9793827d07d2f76d98f5f027c4 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 29 Jan 2025 18:45:09 +0100 Subject: [PATCH 196/278] WarpX class: AllocInitMultiFab and imultifab_map no longer static (#5614) This PR contributes to reducing the usage of static member functions and static variables in the WarpX class. **Note:** I have observed a [failure](https://dev.azure.com/ECP-WarpX/WarpX/_build/results?buildId=20508&view=logs&jobId=5dcb75fd-7a98-5ebf-88d6-c1115a1d979a&j=5dcb75fd-7a98-5ebf-88d6-c1115a1d979a&t=f00e0ae1-a8d3-5558-a3f3-078bee0de0f0) of the test `test_2d_embedded_circle` . This failure does not seem to be related to the PR. I have observed that sometimes `embedded_circle` tests fail for apparently random reasons. We should look into that, since it might be a race condition or an undefined behavior issue. --- Source/WarpX.H | 4 ++-- Source/WarpX.cpp | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/Source/WarpX.H b/Source/WarpX.H index fec12affecd..995d7edd891 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -404,7 +404,7 @@ public: * \param[in] name The name of the iMultiFab to use in the map * \param[in] initial_value The optional initial value */ - static void AllocInitMultiFab ( + void AllocInitMultiFab ( std::unique_ptr& mf, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, @@ -417,7 +417,7 @@ public: // Maps of all of the iMultiFabs used (this can include MFs from other classes) // This is a convenience for the Python interface, allowing all iMultiFabs // to be easily referenced from Python. - static std::map imultifab_map; + std::map imultifab_map; /** * \brief diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 9442fed0596..a1eac8d6080 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -176,8 +176,6 @@ bool WarpX::do_dynamic_scheduling = true; bool WarpX::do_multi_J = false; int WarpX::do_multi_J_n_depositions; -std::map WarpX::imultifab_map; - IntVect WarpX::filter_npass_each_dir(1); int WarpX::n_field_gather_buffer = -1; From 4f0bc75fff56c712b4155a6c269d64d786178b5d Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 29 Jan 2025 10:18:43 -0800 Subject: [PATCH 197/278] CMake/CTest: Opt-in Disable Signal Handling (#5550) In IDEs, we want to attach debuggers to CTest runs. This needs an option to [disable signal handling from AMReX](https://amrex-codes.github.io/amrex/docs_html/Debugging.html#breaking-into-debuggers) to work. --------- Co-authored-by: Edoardo Zoni --- .azure-pipelines.yml | 2 +- CMakeLists.txt | 19 ++++++++++--------- Docs/source/install/cmake.rst | 4 ++++ Examples/CMakeLists.txt | 3 +++ 4 files changed, 18 insertions(+), 10 deletions(-) diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index badedcb994c..77cc75a0264 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -141,7 +141,7 @@ jobs: df -h # configure export AMReX_CMAKE_FLAGS="-DAMReX_ASSERTIONS=ON -DAMReX_TESTING=ON" - export WARPX_TEST_FLAGS="-DWarpX_TEST_CLEANUP=ON -DWarpX_TEST_FPETRAP=ON -DWarpX_TEST_DEBUG=ON" + export WARPX_TEST_FLAGS="-DWarpX_TEST_CLEANUP=ON -DWarpX_TEST_FPETRAP=ON -DWarpX_BACKTRACE_INFO=ON" cmake -S . -B build \ ${AMReX_CMAKE_FLAGS} \ ${WARPX_CMAKE_FLAGS} \ diff --git a/CMakeLists.txt b/CMakeLists.txt index f1dcece8ce1..24e9338982e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -81,18 +81,19 @@ option(WarpX_QED_TABLE_GEN "QED table generation (requires PICSAR and Boost)" option(WarpX_QED_TOOLS "Build external tool to generate QED lookup tables (requires PICSAR and Boost)" OFF) -# Advanced option to automatically clean up CI test directories -option(WarpX_TEST_CLEANUP "Clean up CI test directories" OFF) +# Advanced option to run tests +option(WarpX_TEST_CLEANUP "Clean up automated test directories" OFF) +option(WarpX_TEST_DEBUGGER "Run automated tests without AMReX signal handling (to attach debuggers)" OFF) +option(WarpX_TEST_FPETRAP "Run automated tests with FPE-trapping runtime parameters" OFF) mark_as_advanced(WarpX_TEST_CLEANUP) - -# Advanced option to run CI tests with FPE-trapping runtime parameters -option(WarpX_TEST_FPETRAP "Run CI tests with FPE-trapping runtime parameters" OFF) +mark_as_advanced(WarpX_TEST_DEBUGGER) mark_as_advanced(WarpX_TEST_FPETRAP) -# Advanced option to run CI tests with the -g compile option -option(WarpX_TEST_DEBUG "Run CI tests with the -g compile option" OFF) -mark_as_advanced(WarpX_TEST_DEBUG) -if(WarpX_TEST_DEBUG) +# Advanced option to compile with the -g1 option for minimal debug symbols +# (useful to see, e.g., line numbers in backtraces) +option(WarpX_BACKTRACE_INFO "Compile with -g1 for minimal debug symbols (currently used in CI tests)" OFF) +mark_as_advanced(WarpX_BACKTRACE_INFO) +if(WarpX_BACKTRACE_INFO) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g1") endif() diff --git a/Docs/source/install/cmake.rst b/Docs/source/install/cmake.rst index 5c02fb03b9e..fbdc6809853 100644 --- a/Docs/source/install/cmake.rst +++ b/Docs/source/install/cmake.rst @@ -143,6 +143,10 @@ CMake Option Default & Values Des ``WarpX_pybind11_repo`` ``https://github.com/pybind/pybind11.git`` Repository URI to pull and build pybind11 from ``WarpX_pybind11_branch`` *we set and maintain a compatible commit* Repository branch for ``WarpX_pybind11_repo`` ``WarpX_pybind11_internal`` **ON**/OFF Needs a pre-installed pybind11 library if set to ``OFF`` +``WarpX_TEST_CLEANUP`` ON/**OFF** Clean up automated test directories +``WarpX_TEST_DEBUGGER`` ON/**OFF** Run automated tests without AMReX signal handling (to attach debuggers) +``WarpX_TEST_FPETRAP`` ON/**OFF** Run automated tests with FPE-trapping runtime parameters +``WarpX_BACKTRACE_INFO`` ON/**OFF** Compile with -g1 for minimal debug symbols (currently used in CI tests) ============================= ============================================== =========================================================== For example, one can also build against a local AMReX copy. diff --git a/Examples/CMakeLists.txt b/Examples/CMakeLists.txt index c4303aaee0b..b77a3790c36 100644 --- a/Examples/CMakeLists.txt +++ b/Examples/CMakeLists.txt @@ -159,6 +159,9 @@ function(add_warpx_test "amrex.fpe_trap_zero = 1" ) endif() + if(WarpX_TEST_DEBUGGER) + set(runtime_params_fpetrap "amrex.signal_handling = 0") + endif() add_test( NAME ${name}.run COMMAND From ceb172eaf708afe0f6e7c12d833e161756188fb6 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 29 Jan 2025 10:19:19 -0800 Subject: [PATCH 198/278] Doc: Update Spack Instructions (#5587) Update the Spack instructions to reflect our early 2024 change to include the Python bindings as a variant of the `warpx` package and remove the `py-warpx` package. Close #5563 --- Docs/source/install/users.rst | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/Docs/source/install/users.rst b/Docs/source/install/users.rst index 47378bbf6d6..650cbacd4d0 100644 --- a/Docs/source/install/users.rst +++ b/Docs/source/install/users.rst @@ -79,7 +79,7 @@ Using the Spack Package ----------------------- Packages for WarpX are available via the `Spack `__ package manager. -The package ``warpx`` installs executables and the package ``py-warpx`` includes Python bindings, i.e. `PICMI `_. +The package ``warpx`` installs executables and the variant ``warpx +python`` also includes Python bindings, i.e. `PICMI `__. .. code-block:: bash @@ -88,11 +88,11 @@ The package ``warpx`` installs executables and the package ``py-warpx`` includes spack buildcache keys --install --trust # see `spack info py-warpx` for build options. - # optional arguments: -mpi ^warpx dims=2 compute=cuda - spack install py-warpx - spack load py-warpx + # optional arguments: -mpi compute=cuda + spack install warpx +python + spack load warpx +python -See ``spack info warpx`` or ``spack info py-warpx`` and `the official Spack tutorial `__ for more information. +See ``spack info warpx`` and `the official Spack tutorial `__ for more information. .. _install-pypi: From 98a14f2fda757a8b9f482e532b9a297c9705fee8 Mon Sep 17 00:00:00 2001 From: Weiqun Zhang Date: Thu, 30 Jan 2025 16:05:59 -0800 Subject: [PATCH 199/278] Flux injection from EB: Pick a random point instead of the centroid (#5493) Co-authored-by: Remi Lehe --- Examples/Tests/flux_injection/CMakeLists.txt | 12 ++--- .../analysis_flux_injection_from_eb.py | 3 +- .../Tests/flux_injection/inputs_base_from_eb | 4 +- .../inputs_test_2d_flux_injection_from_eb | 2 +- .../inputs_test_3d_flux_injection_from_eb | 2 +- .../inputs_test_rz_flux_injection_from_eb | 2 +- .../test_2d_flux_injection_from_eb.json | 12 ++--- .../test_3d_flux_injection_from_eb.json | 16 +++---- .../test_rz_flux_injection_from_eb.json | 16 +++---- Source/Particles/AddPlasmaUtilities.H | 39 ++++++++-------- .../Particles/PhysicalParticleContainer.cpp | 46 ++++++++----------- 11 files changed, 72 insertions(+), 82 deletions(-) diff --git a/Examples/Tests/flux_injection/CMakeLists.txt b/Examples/Tests/flux_injection/CMakeLists.txt index 000d5c74917..390c76ec58e 100644 --- a/Examples/Tests/flux_injection/CMakeLists.txt +++ b/Examples/Tests/flux_injection/CMakeLists.txt @@ -26,8 +26,8 @@ add_warpx_test( 3 # dims 2 # nprocs inputs_test_3d_flux_injection_from_eb # inputs - "analysis_flux_injection_from_eb.py diags/diag1000010" # analysis - "analysis_default_regression.py --path diags/diag1000010" # checksum + "analysis_flux_injection_from_eb.py diags/diag1000020" # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum OFF # dependency ) @@ -36,8 +36,8 @@ add_warpx_test( RZ # dims 2 # nprocs inputs_test_rz_flux_injection_from_eb # inputs - "analysis_flux_injection_from_eb.py diags/diag1000010" # analysis - "analysis_default_regression.py --path diags/diag1000010" # checksum + "analysis_flux_injection_from_eb.py diags/diag1000020" # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum OFF # dependency ) @@ -46,7 +46,7 @@ add_warpx_test( 2 # dims 2 # nprocs inputs_test_2d_flux_injection_from_eb # inputs - "analysis_flux_injection_from_eb.py diags/diag1000010" # analysis - "analysis_default_regression.py --path diags/diag1000010" # checksum + "analysis_flux_injection_from_eb.py diags/diag1000020" # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum OFF # dependency ) diff --git a/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py b/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py index 0f2a37eea71..96488fd7e71 100755 --- a/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py +++ b/Examples/Tests/flux_injection/analysis_flux_injection_from_eb.py @@ -147,7 +147,8 @@ def compare_gaussian_flux(u, w, u_th, u_m, label=""): wy = nz * vx - nx * vz wz = nx * vy - ny * vx u_perp2 = ux * wx + uy * wy + uz * wz -compare_gaussian(u_perp2, w, u_th=0.01, label="u_perp") +compare_gaussian(u_perp2, w, u_th=0.01, label="u_perp2") +plt.legend() plt.tight_layout() plt.savefig("Distribution.png") diff --git a/Examples/Tests/flux_injection/inputs_base_from_eb b/Examples/Tests/flux_injection/inputs_base_from_eb index 87b9c32592b..618fd1c941a 100644 --- a/Examples/Tests/flux_injection/inputs_base_from_eb +++ b/Examples/Tests/flux_injection/inputs_base_from_eb @@ -1,5 +1,5 @@ # Maximum number of time steps -max_step = 10 +max_step = 20 # The lo and hi ends of grids are multipliers of blocking factor amr.blocking_factor = 8 @@ -13,7 +13,7 @@ amr.max_level = 0 # Deactivate Maxwell solver algo.maxwell_solver = none -warpx.const_dt = 1e-9 +warpx.const_dt = 0.5e-9 # Embedded boundary warpx.eb_implicit_function = "-(x**2+y**2+z**2-2**2)" diff --git a/Examples/Tests/flux_injection/inputs_test_2d_flux_injection_from_eb b/Examples/Tests/flux_injection/inputs_test_2d_flux_injection_from_eb index f2e6f177887..291ef329ad6 100644 --- a/Examples/Tests/flux_injection/inputs_test_2d_flux_injection_from_eb +++ b/Examples/Tests/flux_injection/inputs_test_2d_flux_injection_from_eb @@ -1,7 +1,7 @@ FILE = inputs_base_from_eb # number of grid points -amr.n_cell = 16 16 +amr.n_cell = 32 32 # Geometry geometry.dims = 2 diff --git a/Examples/Tests/flux_injection/inputs_test_3d_flux_injection_from_eb b/Examples/Tests/flux_injection/inputs_test_3d_flux_injection_from_eb index 81ddc039977..59db133e484 100644 --- a/Examples/Tests/flux_injection/inputs_test_3d_flux_injection_from_eb +++ b/Examples/Tests/flux_injection/inputs_test_3d_flux_injection_from_eb @@ -1,7 +1,7 @@ FILE = inputs_base_from_eb # number of grid points -amr.n_cell = 16 16 16 +amr.n_cell = 32 32 32 # Geometry geometry.dims = 3 diff --git a/Examples/Tests/flux_injection/inputs_test_rz_flux_injection_from_eb b/Examples/Tests/flux_injection/inputs_test_rz_flux_injection_from_eb index 4c970257f57..c206a154646 100644 --- a/Examples/Tests/flux_injection/inputs_test_rz_flux_injection_from_eb +++ b/Examples/Tests/flux_injection/inputs_test_rz_flux_injection_from_eb @@ -1,7 +1,7 @@ FILE = inputs_base_from_eb # number of grid points -amr.n_cell = 8 16 +amr.n_cell = 16 32 # Geometry geometry.dims = RZ diff --git a/Regression/Checksum/benchmarks_json/test_2d_flux_injection_from_eb.json b/Regression/Checksum/benchmarks_json/test_2d_flux_injection_from_eb.json index da993c9ef4b..d4fe12f759f 100644 --- a/Regression/Checksum/benchmarks_json/test_2d_flux_injection_from_eb.json +++ b/Regression/Checksum/benchmarks_json/test_2d_flux_injection_from_eb.json @@ -1,11 +1,11 @@ { "lev=0": {}, "electron": { - "particle_momentum_x": 3.4911323396038835e-19, - "particle_momentum_y": 2.680312173420972e-20, - "particle_momentum_z": 3.4918430443688734e-19, - "particle_position_x": 17950.08139982036, - "particle_position_y": 17949.47183079554, - "particle_weight": 6.269e-08 + "particle_momentum_x": 1.4013860393698154e-18, + "particle_momentum_y": 1.0934049057929508e-19, + "particle_momentum_z": 1.4066623146535866e-18, + "particle_position_x": 72129.9049362857, + "particle_position_y": 72178.76505490103, + "particle_weight": 6.279375e-08 } } diff --git a/Regression/Checksum/benchmarks_json/test_3d_flux_injection_from_eb.json b/Regression/Checksum/benchmarks_json/test_3d_flux_injection_from_eb.json index 15b6c7b602c..c1c888ff808 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_flux_injection_from_eb.json +++ b/Regression/Checksum/benchmarks_json/test_3d_flux_injection_from_eb.json @@ -1,12 +1,12 @@ { "lev=0": {}, "electron": { - "particle_momentum_x": 2.1855512033870577e-18, - "particle_momentum_y": 2.1826030840183147e-18, - "particle_momentum_z": 2.181852403122796e-18, - "particle_position_x": 111042.81925863726, - "particle_position_y": 111012.52928910403, - "particle_position_z": 111015.90903542604, - "particle_weight": 2.4775750000000003e-07 + "particle_momentum_x": 1.7587772989573373e-17, + "particle_momentum_y": 1.7608560965806728e-17, + "particle_momentum_z": 1.7596701993624562e-17, + "particle_position_x": 902783.9285213391, + "particle_position_y": 902981.7980528818, + "particle_position_z": 902777.1246066706, + "particle_weight": 2.503818749999996e-07 } -} \ No newline at end of file +} diff --git a/Regression/Checksum/benchmarks_json/test_rz_flux_injection_from_eb.json b/Regression/Checksum/benchmarks_json/test_rz_flux_injection_from_eb.json index fb7142afed0..f8043c5c3e2 100644 --- a/Regression/Checksum/benchmarks_json/test_rz_flux_injection_from_eb.json +++ b/Regression/Checksum/benchmarks_json/test_rz_flux_injection_from_eb.json @@ -1,12 +1,12 @@ { "lev=0": {}, "electron": { - "particle_momentum_x": 3.3665608248716305e-19, - "particle_momentum_y": 3.392690322852239e-19, - "particle_momentum_z": 5.254577143779578e-19, - "particle_position_x": 26933.772112044953, - "particle_position_y": 26926.994273876346, - "particle_theta": 29492.77423173835, - "particle_weight": 2.4953304765944705e-07 + "particle_momentum_x": 1.3547613622259754e-18, + "particle_momentum_y": 1.3539614160696825e-18, + "particle_momentum_z": 2.102305484242805e-18, + "particle_position_x": 108281.74349700565, + "particle_position_y": 108222.91506078152, + "particle_theta": 118597.06004310239, + "particle_weight": 2.5087578786544294e-07 } -} \ No newline at end of file +} diff --git a/Source/Particles/AddPlasmaUtilities.H b/Source/Particles/AddPlasmaUtilities.H index 824e3e10955..7b8e4e58105 100644 --- a/Source/Particles/AddPlasmaUtilities.H +++ b/Source/Particles/AddPlasmaUtilities.H @@ -111,28 +111,24 @@ AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE amrex::Real compute_scale_fac_area_eb ( const amrex::GpuArray& dx, const amrex::Real num_ppc_real, - amrex::Array4 const& eb_bnd_normal_arr, - int i, int j, int k ) { + AMREX_D_DECL(const amrex::Real n0, + const amrex::Real n1, + const amrex::Real n2)) { using namespace amrex::literals; // Scale particle weight by the area of the emitting surface, within one cell // By definition, eb_bnd_area_arr is normalized (unitless). // Here we undo the normalization (i.e. multiply by the surface used for normalization in amrex: // see https://amrex-codes.github.io/amrex/docs_html/EB.html#embedded-boundary-data-structures) #if defined(WARPX_DIM_3D) - const amrex::Real nx = eb_bnd_normal_arr(i,j,k,0); - const amrex::Real ny = eb_bnd_normal_arr(i,j,k,1); - const amrex::Real nz = eb_bnd_normal_arr(i,j,k,2); - amrex::Real scale_fac = std::sqrt(amrex::Math::powi<2>(nx*dx[1]*dx[2]) + - amrex::Math::powi<2>(ny*dx[0]*dx[2]) + - amrex::Math::powi<2>(nz*dx[0]*dx[1])); + amrex::Real scale_fac = std::sqrt(amrex::Math::powi<2>(n0*dx[1]*dx[2]) + + amrex::Math::powi<2>(n1*dx[0]*dx[2]) + + amrex::Math::powi<2>(n2*dx[0]*dx[1])); #elif defined(WARPX_DIM_RZ) || defined(WARPX_DIM_XZ) - const amrex::Real nx = eb_bnd_normal_arr(i,j,k,0); - const amrex::Real nz = eb_bnd_normal_arr(i,j,k,1); - amrex::Real scale_fac = std::sqrt(amrex::Math::powi<2>(nx*dx[1]) + - amrex::Math::powi<2>(nz*dx[0])); + amrex::Real scale_fac = std::sqrt(amrex::Math::powi<2>(n0*dx[1]) + + amrex::Math::powi<2>(n1*dx[0])); #else - amrex::ignore_unused(dx, eb_bnd_normal_arr, i, j, k); + amrex::ignore_unused(dx, AMREX_D_DECL(n0,n1,n2)); amrex::Real scale_fac = 0.0_rt; #endif // Do not multiply by eb_bnd_area_arr(i,j,k) here because this @@ -159,8 +155,9 @@ amrex::Real compute_scale_fac_area_eb ( AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void rotate_momentum_eb ( PDim3 & pu, - amrex::Array4 const& eb_bnd_normal_arr, - int i, int j, int k ) + AMREX_D_DECL(const amrex::Real n0, + const amrex::Real n1, + const amrex::Real n2)) { using namespace amrex::literals; @@ -168,9 +165,9 @@ void rotate_momentum_eb ( // The minus sign below takes into account the fact that eb_bnd_normal_arr // points towards the covered region, while particles are to be emitted // *away* from the covered region. - amrex::Real const nx = -eb_bnd_normal_arr(i,j,k,0); - amrex::Real const ny = -eb_bnd_normal_arr(i,j,k,1); - amrex::Real const nz = -eb_bnd_normal_arr(i,j,k,2); + amrex::Real const nx = -n0; + amrex::Real const ny = -n1; + amrex::Real const nz = -n2; // Rotate the momentum in theta and phi amrex::Real const cos_theta = nz; @@ -194,14 +191,14 @@ void rotate_momentum_eb ( // The minus sign below takes into account the fact that eb_bnd_normal_arr // points towards the covered region, while particles are to be emitted // *away* from the covered region. - amrex::Real const sin_theta = -eb_bnd_normal_arr(i,j,k,0); - amrex::Real const cos_theta = -eb_bnd_normal_arr(i,j,k,1); + amrex::Real const sin_theta = -n0; + amrex::Real const cos_theta = -n1; amrex::Real const uz = pu.z*cos_theta - pu.x*sin_theta; amrex::Real const ux = pu.x*cos_theta + pu.z*sin_theta; pu.x = ux; pu.z = uz; #else - amrex::ignore_unused(pu, eb_bnd_normal_arr, i, j, k); + amrex::ignore_unused(pu, AMREX_D_DECL(n0,n1,n2)); #endif } #endif //AMREX_USE_EB diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index baac138dd38..9bf24e659e0 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -1351,16 +1351,11 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, #ifdef AMREX_USE_EB bool const inject_from_eb = plasma_injector.m_inject_from_eb; // whether to inject from EB or from a plane // Extract data structures for embedded boundaries + amrex::EBFArrayBoxFactory const* eb_factory = nullptr; amrex::FabArray const* eb_flag = nullptr; - amrex::MultiCutFab const* eb_bnd_area = nullptr; - amrex::MultiCutFab const* eb_bnd_normal = nullptr; - amrex::MultiCutFab const* eb_bnd_cent = nullptr; if (inject_from_eb) { - amrex::EBFArrayBoxFactory const& eb_box_factory = WarpX::GetInstance().fieldEBFactory(0); - eb_flag = &eb_box_factory.getMultiEBCellFlagFab(); - eb_bnd_area = &eb_box_factory.getBndryArea(); - eb_bnd_normal = &eb_box_factory.getBndryNormal(); - eb_bnd_cent = &eb_box_factory.getBndryCent(); + eb_factory = &(WarpX::GetInstance().fieldEBFactory(0)); + eb_flag = &(eb_factory->getMultiEBCellFlagFab()); } #endif @@ -1456,17 +1451,8 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, } #ifdef AMREX_USE_EB - // Extract data structures for embedded boundaries - amrex::Array4::value_type> eb_flag_arr; - amrex::Array4 eb_bnd_area_arr; - amrex::Array4 eb_bnd_normal_arr; - amrex::Array4 eb_bnd_cent_arr; - if (inject_from_eb) { - eb_flag_arr = eb_flag->array(mfi); - eb_bnd_area_arr = eb_bnd_area->array(mfi); - eb_bnd_normal_arr = eb_bnd_normal->array(mfi); - eb_bnd_cent_arr = eb_bnd_cent->array(mfi); - } + auto eb_flag_arr = eb_flag ? eb_flag->const_array(mfi) : Array4{}; + auto eb_data = eb_factory ? eb_factory->getEBData(mfi) : EBData{}; #endif amrex::ParallelForRNG(overlap_box, [=] AMREX_GPU_DEVICE (int i, int j, int k, amrex::RandomEngine const& engine) noexcept @@ -1482,7 +1468,7 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, // Skip cells that are not partially covered by the EB if (eb_flag_arr(i,j,k).isRegular() || eb_flag_arr(i,j,k).isCovered()) { return; } // Scale by the (normalized) area of the EB surface in this cell - num_ppc_real_in_this_cell *= eb_bnd_area_arr(i,j,k); + num_ppc_real_in_this_cell *= eb_data.get(i,j,k); } #else amrex::Real const num_ppc_real_in_this_cell = num_ppc_real; // user input: number of macroparticles per cell @@ -1574,7 +1560,10 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, Real scale_fac; #ifdef AMREX_USE_EB if (inject_from_eb) { - scale_fac = compute_scale_fac_area_eb(dx, num_ppc_real, eb_bnd_normal_arr, i, j, k ); + scale_fac = compute_scale_fac_area_eb(dx, num_ppc_real, + AMREX_D_DECL(eb_data.get(i,j,k,0), + eb_data.get(i,j,k,1), + eb_data.get(i,j,k,2))); } else #endif { @@ -1595,14 +1584,15 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, XDim3 r; #ifdef AMREX_USE_EB if (inject_from_eb) { + auto const& pt = eb_data.randomPointOnEB(i,j,k,engine); #if defined(WARPX_DIM_3D) - pos.x = overlap_corner[0] + (iv[0] + 0.5_rt + eb_bnd_cent_arr(i,j,k,0))*dx[0]; - pos.y = overlap_corner[1] + (iv[1] + 0.5_rt + eb_bnd_cent_arr(i,j,k,1))*dx[1]; - pos.z = overlap_corner[2] + (iv[2] + 0.5_rt + eb_bnd_cent_arr(i,j,k,2))*dx[2]; + pos.x = overlap_corner[0] + (iv[0] + 0.5_rt + pt[0])*dx[0]; + pos.y = overlap_corner[1] + (iv[1] + 0.5_rt + pt[1])*dx[1]; + pos.z = overlap_corner[2] + (iv[2] + 0.5_rt + pt[2])*dx[2]; #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - pos.x = overlap_corner[0] + (iv[0] + 0.5_rt + eb_bnd_cent_arr(i,j,k,0))*dx[0]; + pos.x = overlap_corner[0] + (iv[0] + 0.5_rt + pt[0])*dx[0]; pos.y = 0.0_rt; - pos.z = overlap_corner[1] + (iv[1] + 0.5_rt + eb_bnd_cent_arr(i,j,k,1))*dx[1]; + pos.z = overlap_corner[1] + (iv[1] + 0.5_rt + pt[1])*dx[1]; #endif } else #endif @@ -1661,7 +1651,9 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, // Injection from EB: rotate momentum according to the normal of the EB surface // (The above code initialized the momentum by assuming that z is the direction // normal to the EB surface. Thus we need to rotate from z to the normal.) - rotate_momentum_eb(pu, eb_bnd_normal_arr, i, j , k); + rotate_momentum_eb(pu, AMREX_D_DECL(eb_data.get(i,j,k,0), + eb_data.get(i,j,k,1), + eb_data.get(i,j,k,2))); } #endif From 28d8b23ab1e5f31430ba272d9f4ae670af3171cf Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Fri, 31 Jan 2025 09:10:16 -0800 Subject: [PATCH 200/278] Add reference for new article using WarpX [Tyushev (2025)] (#5627) Just adding a new article that uses WarpX to the Science Highlights section. Signed-off-by: roelof-groenewald --- Docs/source/highlights.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index 300d94149f8..2e8eeffbef2 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -199,6 +199,11 @@ Related works using WarpX: Nuclear Fusion and Plasma Confinement ************************************* +#. Tyushev M., Papahn Zadeh M., Chopra N. S., Raitses Y., Romadanov I., Likhanskii A., Fubiani G., Garrigues L., Groenewald R. and Smolyakov A. + **Mode transitions and spoke structures in E×B Penning discharge**. + Physics of Plasmas **32**, 013511, 2025. + `DOI:10.1063/5.0238577 `__ + #. Scheffel J. and Jäderberg J. and Bendtz K. and Holmberg R. and Lindvall K., **Axial Confinement in the Novatron Mirror Machine**. arXiv 2410.20134 From 43d5aa6f5b751b8fa2b0272fc5bf789e70e17abb Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 31 Jan 2025 18:10:51 +0100 Subject: [PATCH 201/278] WarpX class: remove unused static variable (#5626) `static bool do_device_synchronize;` is unused. Therefore this PR removes it from the `WarpX.H` header. --- Source/WarpX.H | 2 -- 1 file changed, 2 deletions(-) diff --git a/Source/WarpX.H b/Source/WarpX.H index 995d7edd891..ee49be787a9 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -371,8 +371,6 @@ public: static bool do_multi_J; static int do_multi_J_n_depositions; - static bool do_device_synchronize; - //! With mesh refinement, particles located inside a refinement patch, but within //! #n_field_gather_buffer cells of the edge of the patch, will gather the fields //! from the lower refinement level instead of the refinement patch itself From 2996dd0fa2bb992199589fcf55c280a56e0b2e6e Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 31 Jan 2025 09:27:30 -0800 Subject: [PATCH 202/278] AMReX/pyAMReX/PICSAR: Weekly Update (#5613) Weekly update to latest AMReX. Weekly update to latest pyAMReX. Weekly update to latest PICSAR. ```console ./Tools/Release/updateAMReX.py ./Tools/Release/updatepyAMReX.py ./Tools/Release/updatePICSAR.py ``` --------- Signed-off-by: Axel Huebl --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- cmake/dependencies/PICSAR.cmake | 2 +- cmake/dependencies/pyAMReX.cmake | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 0d8ad0e0566..12a68d327f7 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -127,7 +127,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 0f46a1615c17f0bbeaedb20c27a97c9f6e439781 && cd - + cd ../amrex && git checkout --detach 69f1ac884c6aba4d9881260819ade3bb25ed8aad && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index d529712534b..9c8907e835b 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -294,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "0f46a1615c17f0bbeaedb20c27a97c9f6e439781" +set(WarpX_amrex_branch "69f1ac884c6aba4d9881260819ade3bb25ed8aad" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/PICSAR.cmake b/cmake/dependencies/PICSAR.cmake index 067ea464d88..9eb9162238a 100644 --- a/cmake/dependencies/PICSAR.cmake +++ b/cmake/dependencies/PICSAR.cmake @@ -109,7 +109,7 @@ if(WarpX_QED) set(WarpX_picsar_repo "https://github.com/ECP-WarpX/picsar.git" CACHE STRING "Repository URI to pull and build PICSAR from if(WarpX_picsar_internal)") - set(WarpX_picsar_branch "47b393993f860943e387b4b5d79407ee7f52d1ab" + set(WarpX_picsar_branch "24.09" CACHE STRING "Repository branch for WarpX_picsar_repo if(WarpX_picsar_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index 3cb849587dc..257bc264f21 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "6d9b9da849f5941777555ec9c9619be299d04912" +set(WarpX_pyamrex_branch "458c9ae7ab3cd4ca4e4e9736e82c60f9a7e7606c" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") From 958a39463c9e3fea0bbe1da0306104ccf9a2164c Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 31 Jan 2025 09:27:59 -0800 Subject: [PATCH 203/278] CI: Clang-Tidy 250min RZ runs reached the 220min mark on fresh cache. --- .github/workflows/clang_tidy.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/clang_tidy.yml b/.github/workflows/clang_tidy.yml index e6816b1c1a9..3caa11e1885 100644 --- a/.github/workflows/clang_tidy.yml +++ b/.github/workflows/clang_tidy.yml @@ -20,7 +20,7 @@ jobs: dim: [1, 2, RZ, 3] name: clang-tidy-${{ matrix.dim }}D runs-on: ubuntu-22.04 - timeout-minutes: 220 + timeout-minutes: 250 if: github.event.pull_request.draft == false steps: - uses: actions/checkout@v4 From 69a8a11d3d7c395fe4e8ba650b059f8865ec89b5 Mon Sep 17 00:00:00 2001 From: Olga Shapoval <30510597+oshapoval@users.noreply.github.com> Date: Fri, 31 Jan 2025 09:59:37 -0800 Subject: [PATCH 204/278] Added CI to test secondary ion emission in RZ. (#5576) This PR adds secondary ion emission through a callback function, allowing secondary electrons to be emitted when an ion hits the embedded boundary. In the following CI test, the random seed was fixed to ensure consistent emission of secondary electrons for reproducibility. We used a secondary electron emission yield (SEY) of 0.4. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Remi Lehe --- Examples/Tests/CMakeLists.txt | 1 + .../secondary_ion_emission/CMakeLists.txt | 14 + .../Tests/secondary_ion_emission/analysis.py | 58 ++++ .../analysis_default_regression.py | 1 + ...ts_test_rz_secondary_ion_emission_picmi.py | 269 ++++++++++++++++++ .../test_rz_secondary_ion_emission_picmi.json | 26 ++ 6 files changed, 369 insertions(+) create mode 100644 Examples/Tests/secondary_ion_emission/CMakeLists.txt create mode 100644 Examples/Tests/secondary_ion_emission/analysis.py create mode 120000 Examples/Tests/secondary_ion_emission/analysis_default_regression.py create mode 100644 Examples/Tests/secondary_ion_emission/inputs_test_rz_secondary_ion_emission_picmi.py create mode 100644 Regression/Checksum/benchmarks_json/test_rz_secondary_ion_emission_picmi.json diff --git a/Examples/Tests/CMakeLists.txt b/Examples/Tests/CMakeLists.txt index d9e9404ae3e..5ff1d4a9a70 100644 --- a/Examples/Tests/CMakeLists.txt +++ b/Examples/Tests/CMakeLists.txt @@ -71,6 +71,7 @@ add_subdirectory(resampling) add_subdirectory(restart) add_subdirectory(restart_eb) add_subdirectory(rigid_injection) +add_subdirectory(secondary_ion_emission) add_subdirectory(scraping) add_subdirectory(effective_potential_electrostatic) add_subdirectory(silver_mueller) diff --git a/Examples/Tests/secondary_ion_emission/CMakeLists.txt b/Examples/Tests/secondary_ion_emission/CMakeLists.txt new file mode 100644 index 00000000000..e6e38138a08 --- /dev/null +++ b/Examples/Tests/secondary_ion_emission/CMakeLists.txt @@ -0,0 +1,14 @@ +# Add tests (alphabetical order) ############################################## +# + +if(WarpX_EB) + add_warpx_test( + test_rz_secondary_ion_emission_picmi # name + RZ # dims + 1 # nprocs + inputs_test_rz_secondary_ion_emission_picmi.py # inputs + "analysis.py diags/diag1/" # analysis + "analysis_default_regression.py --path diags/diag1/" # checksum + OFF # dependency + ) +endif() diff --git a/Examples/Tests/secondary_ion_emission/analysis.py b/Examples/Tests/secondary_ion_emission/analysis.py new file mode 100644 index 00000000000..8c2ed5b4af6 --- /dev/null +++ b/Examples/Tests/secondary_ion_emission/analysis.py @@ -0,0 +1,58 @@ +#!/usr/bin/env python +""" +This script checks that electron secondary emission (implemented by a callback function) works as intended. + +In this test, four ions hit a spherical embedded boundary, and produce secondary +electrons with a probability of `0.4`. We thus expect ~2 electrons to be produced. +This script tests the number of electrons emitted and checks that their position is +close to the embedded boundary. +""" + +import sys + +import numpy as np +import yt +from openpmd_viewer import OpenPMDTimeSeries + +yt.funcs.mylog.setLevel(0) + +# Open plotfile specified in command line +filename = sys.argv[1] +ts = OpenPMDTimeSeries(filename) + +it = ts.iterations +x, y, z = ts.get_particle(["x", "y", "z"], species="electrons", iteration=it[-1]) + +x_analytic = [-0.091696, 0.011599] +y_analytic = [-0.002282, -0.0111624] +z_analytic = [-0.200242, -0.201728] + +N_sec_e = np.size(z) # number of the secondary electrons + +assert N_sec_e == 2, ( + "Test did not pass: for this set up we expect 2 secondary electrons emitted" +) + +tolerance = 1e-3 + +for i in range(0, N_sec_e): + print("\n") + print(f"Electron # {i}:") + print("NUMERICAL coordinates of the emitted electrons:") + print(f"x={x[i]:5.5f}, y={y[i]:5.5f}, z={z[i]:5.5f}") + print("\n") + print("ANALYTICAL coordinates of the point of contact:") + print(f"x={x_analytic[i]:5.5f}, y={y_analytic[i]:5.5f}, z={z_analytic[i]:5.5f}") + + rel_err_x = np.abs((x[i] - x_analytic[i]) / x_analytic[i]) + rel_err_y = np.abs((y[i] - y_analytic[i]) / y_analytic[i]) + rel_err_z = np.abs((z[i] - z_analytic[i]) / z_analytic[i]) + + print("\n") + print(f"Relative percentage error for x = {rel_err_x * 100:5.4f} %") + print(f"Relative percentage error for y = {rel_err_y * 100:5.4f} %") + print(f"Relative percentage error for z = {rel_err_z * 100:5.4f} %") + + assert ( + (rel_err_x < tolerance) and (rel_err_y < tolerance) and (rel_err_z < tolerance) + ), "Test particle_boundary_interaction did not pass" diff --git a/Examples/Tests/secondary_ion_emission/analysis_default_regression.py b/Examples/Tests/secondary_ion_emission/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/secondary_ion_emission/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/secondary_ion_emission/inputs_test_rz_secondary_ion_emission_picmi.py b/Examples/Tests/secondary_ion_emission/inputs_test_rz_secondary_ion_emission_picmi.py new file mode 100644 index 00000000000..5b6248da33c --- /dev/null +++ b/Examples/Tests/secondary_ion_emission/inputs_test_rz_secondary_ion_emission_picmi.py @@ -0,0 +1,269 @@ +#!/usr/bin/env python3 +# This is the script that tests secondary ion emission when ions hit an embedded boundary +# with a specified secondary emission yield of delta_H = 0.4. Specifically, a callback +# function at each time step ensures that the correct number of secondary electrons is +# emitted when ions impact the embedded boundary, following the given secondary emission +# model defined in sigma_nescap function. This distribution depends on the ion's energy and +# suggests that for an ion incident with 1 keV energy, an average of 0.4 secondary +# electrons will be emitted. +# Simulation is initialized with four ions with i_dist distribution and spherical +# embedded boundary given by implicit function. +import numpy as np +from scipy.constants import e, elementary_charge, m_e, proton_mass + +from pywarpx import callbacks, particle_containers, picmi + +########################## +# numerics parameters +########################## + +dt = 0.000000075 + +# --- Nb time steps +Te = 0.0259 # in eV +dist_th = np.sqrt(Te * elementary_charge / m_e) + +max_steps = 3 +diagnostic_interval = 1 + +# --- grid +nr = 64 +nz = 64 + +rmin = 0.0 +rmax = 2 +zmin = -2 +zmax = 2 +delta_H = 0.4 +E_HMax = 250 + +np.random.seed(10025015) +########################## +# numerics components +########################## + +grid = picmi.CylindricalGrid( + number_of_cells=[nr, nz], + n_azimuthal_modes=1, + lower_bound=[rmin, zmin], + upper_bound=[rmax, zmax], + lower_boundary_conditions=["none", "dirichlet"], + upper_boundary_conditions=["dirichlet", "dirichlet"], + lower_boundary_conditions_particles=["none", "reflecting"], + upper_boundary_conditions_particles=["absorbing", "reflecting"], +) + +solver = picmi.ElectrostaticSolver( + grid=grid, method="Multigrid", warpx_absolute_tolerance=1e-7 +) + +embedded_boundary = picmi.EmbeddedBoundary( + implicit_function="-(x**2+y**2+z**2-radius**2)", radius=0.2 +) + +########################## +# physics components +########################## +i_dist = picmi.ParticleListDistribution( + x=[ + 0.025, + 0.0, + -0.1, + -0.14, + ], + y=[0.0, 0.0, 0.0, 0], + z=[-0.26, -0.29, -0.25, -0.23], + ux=[0.18e6, 0.1e6, 0.15e6, 0.21e6], + uy=[0.0, 0.0, 0.0, 0.0], + uz=[8.00e5, 7.20e5, 6.40e5, 5.60e5], + weight=[1, 1, 1, 1], +) + +electrons = picmi.Species( + particle_type="electron", # Specify the particle type + name="electrons", # Name of the species +) + +ions = picmi.Species( + name="ions", + particle_type="proton", + charge=e, + initial_distribution=i_dist, + warpx_save_particles_at_eb=1, +) + +########################## +# diagnostics +########################## + +field_diag = picmi.FieldDiagnostic( + name="diag1", + grid=grid, + period=diagnostic_interval, + data_list=["Er", "Ez", "phi", "rho"], + warpx_format="openpmd", +) + +part_diag = picmi.ParticleDiagnostic( + name="diag1", + period=diagnostic_interval, + species=[ions, electrons], + warpx_format="openpmd", +) + +########################## +# simulation setup +########################## + +sim = picmi.Simulation( + solver=solver, + time_step_size=dt, + max_steps=max_steps, + warpx_embedded_boundary=embedded_boundary, + warpx_amrex_the_arena_is_managed=1, +) + +sim.add_species( + electrons, + layout=picmi.GriddedLayout(n_macroparticle_per_cell=[0, 0, 0], grid=grid), +) + +sim.add_species( + ions, + layout=picmi.GriddedLayout(n_macroparticle_per_cell=[10, 1, 1], grid=grid), +) + +sim.add_diagnostic(part_diag) +sim.add_diagnostic(field_diag) + +sim.initialize_inputs() +sim.initialize_warpx() + +########################## +# python particle data access +########################## + + +def concat(list_of_arrays): + if len(list_of_arrays) == 0: + # Return a 1d array of size 0 + return np.empty(0) + else: + return np.concatenate(list_of_arrays) + + +def sigma_nascap(energy_kEv, delta_H, E_HMax): + """ + Compute sigma_nascap for each element in the energy array using a loop. + + Parameters: + - energy: ndarray or list, energy values in KeV + - delta_H: float, parameter for the formula + - E_HMax: float, parameter for the formula in KeV + + Returns: + - numpy array, computed probability sigma_nascap + """ + sigma_nascap = np.array([]) + # Loop through each energy value + for energy in energy_kEv: + if energy > 0.0: + sigma = ( + delta_H + * (E_HMax + 1.0) + / (E_HMax * 1.0 + energy) + * np.sqrt(energy / 1.0) + ) + else: + sigma = 0.0 + sigma_nascap = np.append(sigma_nascap, sigma) + return sigma_nascap + + +def secondary_emission(): + buffer = particle_containers.ParticleBoundaryBufferWrapper() # boundary buffer + # STEP 1: extract the different parameters of the boundary buffer (normal, time, position) + lev = 0 # level 0 (no mesh refinement here) + n = buffer.get_particle_boundary_buffer_size("ions", "eb") + elect_pc = particle_containers.ParticleContainerWrapper("electrons") + + if n != 0: + r = concat(buffer.get_particle_boundary_buffer("ions", "eb", "x", lev)) + theta = concat(buffer.get_particle_boundary_buffer("ions", "eb", "theta", lev)) + z = concat(buffer.get_particle_boundary_buffer("ions", "eb", "z", lev)) + x = r * np.cos(theta) # from RZ coordinates to 3D coordinates + y = r * np.sin(theta) + ux = concat(buffer.get_particle_boundary_buffer("ions", "eb", "ux", lev)) + uy = concat(buffer.get_particle_boundary_buffer("ions", "eb", "uy", lev)) + uz = concat(buffer.get_particle_boundary_buffer("ions", "eb", "uz", lev)) + w = concat(buffer.get_particle_boundary_buffer("ions", "eb", "w", lev)) + nx = concat(buffer.get_particle_boundary_buffer("ions", "eb", "nx", lev)) + ny = concat(buffer.get_particle_boundary_buffer("ions", "eb", "ny", lev)) + nz = concat(buffer.get_particle_boundary_buffer("ions", "eb", "nz", lev)) + delta_t = concat( + buffer.get_particle_boundary_buffer("ions", "eb", "deltaTimeScraped", lev) + ) + energy_ions = 0.5 * proton_mass * w * (ux**2 + uy**2 + uz**2) + energy_ions_in_kEv = energy_ions / (e * 1000) + sigma_nascap_ions = sigma_nascap(energy_ions_in_kEv, delta_H, E_HMax) + # Loop over all ions in the EB buffer + for i in range(0, n): + sigma = sigma_nascap_ions[i] + # Ne_sec is number of the secondary electrons to be emitted + Ne_sec = int(sigma + np.random.uniform()) + for _ in range(Ne_sec): + xe = np.array([]) + ye = np.array([]) + ze = np.array([]) + we = np.array([]) + delta_te = np.array([]) + uxe = np.array([]) + uye = np.array([]) + uze = np.array([]) + + # Random thermal momenta distribution + ux_th = np.random.normal(0, dist_th) + uy_th = np.random.normal(0, dist_th) + uz_th = np.random.normal(0, dist_th) + + un_th = nx[i] * ux_th + ny[i] * uy_th + nz[i] * uz_th + + if un_th < 0: + ux_th_reflect = ( + -2 * un_th * nx[i] + ux_th + ) # for a "mirror reflection" u(sym)=-2(u.n)n+u + uy_th_reflect = -2 * un_th * ny[i] + uy_th + uz_th_reflect = -2 * un_th * nz[i] + uz_th + + uxe = np.append(uxe, ux_th_reflect) + uye = np.append(uye, uy_th_reflect) + uze = np.append(uze, uz_th_reflect) + else: + uxe = np.append(uxe, ux_th) + uye = np.append(uye, uy_th) + uze = np.append(uze, uz_th) + + xe = np.append(xe, x[i]) + ye = np.append(ye, y[i]) + ze = np.append(ze, z[i]) + we = np.append(we, w[i]) + delta_te = np.append(delta_te, delta_t[i]) + + elect_pc.add_particles( + x=xe + (dt - delta_te) * uxe, + y=ye + (dt - delta_te) * uye, + z=ze + (dt - delta_te) * uze, + ux=uxe, + uy=uye, + uz=uze, + w=we, + ) + buffer.clear_buffer() # reinitialise the boundary buffer + + +# using the new particle container modified at the last step +callbacks.installafterstep(secondary_emission) +########################## +# simulation run +########################## +sim.step(max_steps) # the whole process is done "max_steps" times diff --git a/Regression/Checksum/benchmarks_json/test_rz_secondary_ion_emission_picmi.json b/Regression/Checksum/benchmarks_json/test_rz_secondary_ion_emission_picmi.json new file mode 100644 index 00000000000..cfc84819e97 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_rz_secondary_ion_emission_picmi.json @@ -0,0 +1,26 @@ +{ + "electrons": { + "particle_momentum_x": 5.621885683102775e-26, + "particle_momentum_y": 1.2079178196118306e-25, + "particle_momentum_z": 1.2496342823828099e-25, + "particle_position_x": 0.10329568998704057, + "particle_position_y": 0.013444257249267193, + "particle_position_z": 0.4019696082583948, + "particle_weight": 2.0 + }, + "ions": { + "particle_momentum_x": 0.0, + "particle_momentum_y": 0.0, + "particle_momentum_z": 0.0, + "particle_position_x": 0.0, + "particle_position_y": 0.0, + "particle_position_z": 0.0, + "particle_weight": 0.0 + }, + "lev=0": { + "Er": 1.772547702166409e-06, + "Ez": 2.2824957684716966e-06, + "phi": 4.338168233265556e-07, + "rho": 1.933391680367631e-15 + } +} \ No newline at end of file From a50cc40204a2c0b78c9dce88dc3753f1d2fa8d51 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 31 Jan 2025 20:36:42 +0100 Subject: [PATCH 205/278] Embedded Boundary: take some EB-related methods out of WarpX class (#5625) `ComputeEdgeLengths`, `ComputeFaceAreas`, `ScaleAreas`, and `ScaleEdges` are pure functions that can be easily taken out of the WarpX class, in order to make it simpler. This PR places these two functions under a newly created namespace `warpx::embedded_boundary`, inside the files `EmbeddedBoundray/EmbeddedBoundary.H/cpp` . --- Source/BoundaryConditions/PML.cpp | 7 +- Source/EmbeddedBoundary/CMakeLists.txt | 2 +- Source/EmbeddedBoundary/EmbeddedBoundary.H | 55 +++++ Source/EmbeddedBoundary/EmbeddedBoundary.cpp | 200 +++++++++++++++++++ Source/EmbeddedBoundary/Make.package | 2 + Source/EmbeddedBoundary/WarpXInitEB.cpp | 166 --------------- Source/Initialization/WarpXInitData.cpp | 11 +- Source/WarpX.H | 25 +-- 8 files changed, 271 insertions(+), 197 deletions(-) create mode 100644 Source/EmbeddedBoundary/EmbeddedBoundary.H create mode 100644 Source/EmbeddedBoundary/EmbeddedBoundary.cpp diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index 390a09a34c3..90518dc432f 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -11,6 +11,9 @@ #include "BoundaryConditions/PML.H" #include "BoundaryConditions/PMLComponent.H" #include "Fields.H" +#ifdef AMREX_USE_EB +# include "EmbeddedBoundary/EmbeddedBoundary.H" +#endif #ifdef WARPX_USE_FFT # include "FieldSolver/SpectralSolver/SpectralFieldData.H" #endif @@ -738,8 +741,8 @@ PML::PML (const int lev, const BoxArray& grid_ba, auto const eb_fact = fieldEBFactory(); ablastr::fields::VectorField t_pml_edge_lengths = warpx.m_fields.get_alldirs(FieldType::pml_edge_lengths, lev); - WarpX::ComputeEdgeLengths(t_pml_edge_lengths, eb_fact); - WarpX::ScaleEdges(t_pml_edge_lengths, WarpX::CellSize(lev)); + warpx::embedded_boundary::ComputeEdgeLengths(t_pml_edge_lengths, eb_fact); + warpx::embedded_boundary::ScaleEdges(t_pml_edge_lengths, WarpX::CellSize(lev)); } } diff --git a/Source/EmbeddedBoundary/CMakeLists.txt b/Source/EmbeddedBoundary/CMakeLists.txt index 2fa5e3e602b..75f9bbdaa04 100644 --- a/Source/EmbeddedBoundary/CMakeLists.txt +++ b/Source/EmbeddedBoundary/CMakeLists.txt @@ -2,10 +2,10 @@ foreach(D IN LISTS WarpX_DIMS) warpx_set_suffix_dims(SD ${D}) target_sources(lib_${SD} PRIVATE + EmbeddedBoundary.cpp Enabled.cpp WarpXInitEB.cpp WarpXFaceExtensions.cpp WarpXFaceInfoBox.H - Enabled.cpp ) endforeach() diff --git a/Source/EmbeddedBoundary/EmbeddedBoundary.H b/Source/EmbeddedBoundary/EmbeddedBoundary.H new file mode 100644 index 00000000000..fc02667246b --- /dev/null +++ b/Source/EmbeddedBoundary/EmbeddedBoundary.H @@ -0,0 +1,55 @@ +/* Copyright 2021-2025 Lorenzo Giacomel, Luca Fedeli + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_EMBEDDED_BOUNDARY_EMBEDDED_BOUNDARY_H_ +#define WARPX_EMBEDDED_BOUNDARY_EMBEDDED_BOUNDARY_H_ + +#include "Enabled.H" + +#ifdef AMREX_USE_EB + +#include + +#include +#include + +#include + +namespace warpx::embedded_boundary +{ + /** + * \brief Compute the length of the mesh edges. Here the length is a value in [0, 1]. + * An edge of length 0 is fully covered. + */ + void ComputeEdgeLengths ( + ablastr::fields::VectorField& edge_lengths, + const amrex::EBFArrayBoxFactory& eb_fact); + /** + * \brief Compute the area of the mesh faces. Here the area is a value in [0, 1]. + * An edge of area 0 is fully covered. + */ + void ComputeFaceAreas ( + ablastr::fields::VectorField& face_areas, + const amrex::EBFArrayBoxFactory& eb_fact); + + /** + * \brief Scale the edges lengths by the mesh width to obtain the real lengths. + */ + void ScaleEdges ( + ablastr::fields::VectorField& edge_lengths, + const std::array& cell_size); + /** + * \brief Scale the edges areas by the mesh width to obtain the real areas. + */ + void ScaleAreas ( + ablastr::fields::VectorField& face_areas, + const std::array& cell_size); +} + +#endif + +#endif //WARPX_EMBEDDED_BOUNDARY_EMBEDDED_BOUNDARY_H_ diff --git a/Source/EmbeddedBoundary/EmbeddedBoundary.cpp b/Source/EmbeddedBoundary/EmbeddedBoundary.cpp new file mode 100644 index 00000000000..9c3d53aefeb --- /dev/null +++ b/Source/EmbeddedBoundary/EmbeddedBoundary.cpp @@ -0,0 +1,200 @@ +/* Copyright 2021-2025 Lorenzo Giacomel, Luca Fedeli + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#include "Enabled.H" + +#ifdef AMREX_USE_EB + +#include "EmbeddedBoundary.H" + +#include "Utils/TextMsg.H" + +#include +#include +#include +#include +#include +#include + +namespace web = warpx::embedded_boundary; + +void +web::ComputeEdgeLengths ( + ablastr::fields::VectorField& edge_lengths, + const amrex::EBFArrayBoxFactory& eb_fact) +{ + BL_PROFILE("ComputeEdgeLengths"); + +#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) + WARPX_ABORT_WITH_MESSAGE("ComputeEdgeLengths only implemented in 2D and 3D"); +#endif + + auto const &flags = eb_fact.getMultiEBCellFlagFab(); + auto const &edge_centroid = eb_fact.getEdgeCent(); + for (int idim = 0; idim < 3; ++idim){ +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + if (idim == 1) { + edge_lengths[1]->setVal(0.); + continue; + } +#endif + for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi){ + amrex::Box const box = mfi.tilebox(edge_lengths[idim]->ixType().toIntVect(), + edge_lengths[idim]->nGrowVect()); + amrex::FabType const fab_type = flags[mfi].getType(box); + auto const &edge_lengths_dim = edge_lengths[idim]->array(mfi); + + if (fab_type == amrex::FabType::regular) { + // every cell in box is all regular + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + edge_lengths_dim(i, j, k) = 1.; + }); + } else if (fab_type == amrex::FabType::covered) { + // every cell in box is all covered + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + edge_lengths_dim(i, j, k) = 0.; + }); + } else { +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + int idim_amrex = idim; + if (idim == 2) { idim_amrex = 1; } + auto const &edge_cent = edge_centroid[idim_amrex]->const_array(mfi); +#elif defined(WARPX_DIM_3D) + auto const &edge_cent = edge_centroid[idim]->const_array(mfi); +#endif + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + if (edge_cent(i, j, k) == amrex::Real(-1.0)) { + // This edge is all covered + edge_lengths_dim(i, j, k) = 0.; + } else if (edge_cent(i, j, k) == amrex::Real(1.0)) { + // This edge is all open + edge_lengths_dim(i, j, k) = 1.; + } else { + // This edge is cut. + edge_lengths_dim(i, j, k) = 1 - amrex::Math::abs(amrex::Real(2.0) + * edge_cent(i, j, k)); + } + + }); + } + } + } +} + + +void +web::ComputeFaceAreas ( + ablastr::fields::VectorField& face_areas, + const amrex::EBFArrayBoxFactory& eb_fact) +{ + BL_PROFILE("ComputeFaceAreas"); + +#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) + WARPX_ABORT_WITH_MESSAGE("ComputeFaceAreas only implemented in 2D and 3D"); +#endif + + auto const &flags = eb_fact.getMultiEBCellFlagFab(); +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + //In 2D the volume frac is actually the area frac. + auto const &area_frac = eb_fact.getVolFrac(); +#elif defined(WARPX_DIM_3D) + auto const &area_frac = eb_fact.getAreaFrac(); +#endif + + for (int idim = 0; idim < 3; ++idim) { +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + if (idim == 0 || idim == 2) { + face_areas[idim]->setVal(0.); + continue; + } +#endif + for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi) { + amrex::Box const box = mfi.tilebox(face_areas[idim]->ixType().toIntVect(), + face_areas[idim]->nGrowVect()); + amrex::FabType const fab_type = flags[mfi].getType(box); + auto const &face_areas_dim = face_areas[idim]->array(mfi); + if (fab_type == amrex::FabType::regular) { + // every cell in box is all regular + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + face_areas_dim(i, j, k) = amrex::Real(1.); + }); + } else if (fab_type == amrex::FabType::covered) { + // every cell in box is all covered + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + face_areas_dim(i, j, k) = amrex::Real(0.); + }); + } else { +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + auto const &face = area_frac.const_array(mfi); +#elif defined(WARPX_DIM_3D) + auto const &face = area_frac[idim]->const_array(mfi); +#endif + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + face_areas_dim(i, j, k) = face(i, j, k); + }); + } + } + } +} + +void +web::ScaleEdges ( + ablastr::fields::VectorField& edge_lengths, + const std::array& cell_size) +{ + BL_PROFILE("ScaleEdges"); + +#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) + WARPX_ABORT_WITH_MESSAGE("ScaleEdges only implemented in 2D and 3D"); +#endif + + for (int idim = 0; idim < 3; ++idim){ +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + if (idim == 1) { continue; } +#endif + for (amrex::MFIter mfi(*edge_lengths[0]); mfi.isValid(); ++mfi) { + const amrex::Box& box = mfi.tilebox(edge_lengths[idim]->ixType().toIntVect(), + edge_lengths[idim]->nGrowVect() ); + auto const &edge_lengths_dim = edge_lengths[idim]->array(mfi); + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + edge_lengths_dim(i, j, k) *= cell_size[idim]; + }); + } + } +} + + +void +web::ScaleAreas ( + ablastr::fields::VectorField& face_areas, + const std::array& cell_size) +{ + BL_PROFILE("ScaleAreas"); + +#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) + WARPX_ABORT_WITH_MESSAGE("ScaleAreas only implemented in 2D and 3D"); +#endif + + for (int idim = 0; idim < 3; ++idim) { +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + if (idim == 0 || idim == 2) { continue; } +#endif + for (amrex::MFIter mfi(*face_areas[0]); mfi.isValid(); ++mfi) { + const amrex::Box& box = mfi.tilebox(face_areas[idim]->ixType().toIntVect(), + face_areas[idim]->nGrowVect() ); + amrex::Real const full_area = cell_size[(idim+1)%3]*cell_size[(idim+2)%3]; + auto const &face_areas_dim = face_areas[idim]->array(mfi); + + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + face_areas_dim(i, j, k) *= full_area; + }); + + } + } +} + +#endif diff --git a/Source/EmbeddedBoundary/Make.package b/Source/EmbeddedBoundary/Make.package index 76a20896f85..e1c6422d99c 100644 --- a/Source/EmbeddedBoundary/Make.package +++ b/Source/EmbeddedBoundary/Make.package @@ -1,9 +1,11 @@ +CEXE_headers += EmbeddedBoundary.H CEXE_headers += Enabled.H CEXE_headers += ParticleScraper.H CEXE_headers += ParticleBoundaryProcess.H CEXE_headers += DistanceToEB.H CEXE_headers += WarpXFaceInfoBox.H +CEXE_sources += EmbeddedBoundary.cpp CEXE_sources += Enabled.cpp CEXE_sources += WarpXInitEB.cpp CEXE_sources += WarpXFaceExtensions.cpp diff --git a/Source/EmbeddedBoundary/WarpXInitEB.cpp b/Source/EmbeddedBoundary/WarpXInitEB.cpp index 271f12231b0..3f33259a313 100644 --- a/Source/EmbeddedBoundary/WarpXInitEB.cpp +++ b/Source/EmbeddedBoundary/WarpXInitEB.cpp @@ -124,172 +124,6 @@ WarpX::InitEB () } #ifdef AMREX_USE_EB -void -WarpX::ComputeEdgeLengths (ablastr::fields::VectorField& edge_lengths, - const amrex::EBFArrayBoxFactory& eb_fact) { - BL_PROFILE("ComputeEdgeLengths"); - -#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) - WARPX_ABORT_WITH_MESSAGE("ComputeEdgeLengths only implemented in 2D and 3D"); -#endif - - auto const &flags = eb_fact.getMultiEBCellFlagFab(); - auto const &edge_centroid = eb_fact.getEdgeCent(); - for (int idim = 0; idim < 3; ++idim){ -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - if (idim == 1) { - edge_lengths[1]->setVal(0.); - continue; - } -#endif - for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi){ - amrex::Box const box = mfi.tilebox(edge_lengths[idim]->ixType().toIntVect(), - edge_lengths[idim]->nGrowVect()); - amrex::FabType const fab_type = flags[mfi].getType(box); - auto const &edge_lengths_dim = edge_lengths[idim]->array(mfi); - - if (fab_type == amrex::FabType::regular) { - // every cell in box is all regular - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - edge_lengths_dim(i, j, k) = 1.; - }); - } else if (fab_type == amrex::FabType::covered) { - // every cell in box is all covered - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - edge_lengths_dim(i, j, k) = 0.; - }); - } else { -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - int idim_amrex = idim; - if (idim == 2) { idim_amrex = 1; } - auto const &edge_cent = edge_centroid[idim_amrex]->const_array(mfi); -#elif defined(WARPX_DIM_3D) - auto const &edge_cent = edge_centroid[idim]->const_array(mfi); -#endif - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - if (edge_cent(i, j, k) == amrex::Real(-1.0)) { - // This edge is all covered - edge_lengths_dim(i, j, k) = 0.; - } else if (edge_cent(i, j, k) == amrex::Real(1.0)) { - // This edge is all open - edge_lengths_dim(i, j, k) = 1.; - } else { - // This edge is cut. - edge_lengths_dim(i, j, k) = 1 - amrex::Math::abs(amrex::Real(2.0) - * edge_cent(i, j, k)); - } - - }); - } - } - } -} - - -void -WarpX::ComputeFaceAreas (VectorField& face_areas, - const amrex::EBFArrayBoxFactory& eb_fact) { - BL_PROFILE("ComputeFaceAreas"); - -#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) - WARPX_ABORT_WITH_MESSAGE("ComputeFaceAreas only implemented in 2D and 3D"); -#endif - - auto const &flags = eb_fact.getMultiEBCellFlagFab(); -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - //In 2D the volume frac is actually the area frac. - auto const &area_frac = eb_fact.getVolFrac(); -#elif defined(WARPX_DIM_3D) - auto const &area_frac = eb_fact.getAreaFrac(); -#endif - - for (int idim = 0; idim < 3; ++idim) { -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - if (idim == 0 || idim == 2) { - face_areas[idim]->setVal(0.); - continue; - } -#endif - for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi) { - amrex::Box const box = mfi.tilebox(face_areas[idim]->ixType().toIntVect(), - face_areas[idim]->nGrowVect()); - amrex::FabType const fab_type = flags[mfi].getType(box); - auto const &face_areas_dim = face_areas[idim]->array(mfi); - if (fab_type == amrex::FabType::regular) { - // every cell in box is all regular - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - face_areas_dim(i, j, k) = amrex::Real(1.); - }); - } else if (fab_type == amrex::FabType::covered) { - // every cell in box is all covered - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - face_areas_dim(i, j, k) = amrex::Real(0.); - }); - } else { -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - auto const &face = area_frac.const_array(mfi); -#elif defined(WARPX_DIM_3D) - auto const &face = area_frac[idim]->const_array(mfi); -#endif - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - face_areas_dim(i, j, k) = face(i, j, k); - }); - } - } - } -} - - -void -WarpX::ScaleEdges (ablastr::fields::VectorField& edge_lengths, - const std::array& cell_size) { - BL_PROFILE("ScaleEdges"); - -#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) - WARPX_ABORT_WITH_MESSAGE("ScaleEdges only implemented in 2D and 3D"); -#endif - - for (int idim = 0; idim < 3; ++idim){ -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - if (idim == 1) { continue; } -#endif - for (amrex::MFIter mfi(*edge_lengths[0]); mfi.isValid(); ++mfi) { - const amrex::Box& box = mfi.tilebox(edge_lengths[idim]->ixType().toIntVect(), - edge_lengths[idim]->nGrowVect() ); - auto const &edge_lengths_dim = edge_lengths[idim]->array(mfi); - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - edge_lengths_dim(i, j, k) *= cell_size[idim]; - }); - } - } -} - -void -WarpX::ScaleAreas (ablastr::fields::VectorField& face_areas, - const std::array& cell_size) { - BL_PROFILE("ScaleAreas"); - -#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) - WARPX_ABORT_WITH_MESSAGE("ScaleAreas only implemented in 2D and 3D"); -#endif - - for (int idim = 0; idim < 3; ++idim) { -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - if (idim == 0 || idim == 2) { continue; } -#endif - for (amrex::MFIter mfi(*face_areas[0]); mfi.isValid(); ++mfi) { - const amrex::Box& box = mfi.tilebox(face_areas[idim]->ixType().toIntVect(), - face_areas[idim]->nGrowVect() ); - amrex::Real const full_area = cell_size[(idim+1)%3]*cell_size[(idim+2)%3]; - auto const &face_areas_dim = face_areas[idim]->array(mfi); - - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - face_areas_dim(i, j, k) *= full_area; - }); - - } - } -} void WarpX::MarkReducedShapeCells ( diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 3d78615fbc3..cf452df56a2 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -17,6 +17,9 @@ #include "Diagnostics/MultiDiagnostics.H" #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "EmbeddedBoundary/Enabled.H" +#ifdef AMREX_USE_EB +# include "EmbeddedBoundary/EmbeddedBoundary.H" +#endif #include "Fields.H" #include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" #include "FieldSolver/FiniteDifferenceSolver/MacroscopicProperties/MacroscopicProperties.H" @@ -1236,12 +1239,12 @@ void WarpX::InitializeEBGridData (int lev) if (WarpX::electromagnetic_solver_id == ElectromagneticSolverAlgo::ECT) { auto edge_lengths_lev = m_fields.get_alldirs(FieldType::edge_lengths, lev); - ComputeEdgeLengths(edge_lengths_lev, eb_fact); - ScaleEdges(edge_lengths_lev, CellSize(lev)); + warpx::embedded_boundary::ComputeEdgeLengths(edge_lengths_lev, eb_fact); + warpx::embedded_boundary::ScaleEdges(edge_lengths_lev, CellSize(lev)); auto face_areas_lev = m_fields.get_alldirs(FieldType::face_areas, lev); - ComputeFaceAreas(face_areas_lev, eb_fact); - ScaleAreas(face_areas_lev, CellSize(lev)); + warpx::embedded_boundary::ComputeFaceAreas(face_areas_lev, eb_fact); + warpx::embedded_boundary::ScaleAreas(face_areas_lev, CellSize(lev)); // Compute additional quantities required for the ECT solver MarkExtensionCells(); diff --git a/Source/WarpX.H b/Source/WarpX.H index ee49be787a9..077e8f5d954 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -1047,30 +1047,7 @@ public: ablastr::fields::VectorField const& face_areas, ablastr::fields::VectorField const& edge_lengths ); - /** - * \brief Compute the length of the mesh edges. Here the length is a value in [0, 1]. - * An edge of length 0 is fully covered. - */ - static void ComputeEdgeLengths (ablastr::fields::VectorField& edge_lengths, - const amrex::EBFArrayBoxFactory& eb_fact); - /** - * \brief Compute the area of the mesh faces. Here the area is a value in [0, 1]. - * An edge of area 0 is fully covered. - */ - static void ComputeFaceAreas (ablastr::fields::VectorField& face_areas, - const amrex::EBFArrayBoxFactory& eb_fact); - - /** - * \brief Scale the edges lengths by the mesh width to obtain the real lengths. - */ - static void ScaleEdges (ablastr::fields::VectorField& edge_lengths, - const std::array& cell_size); - /** - * \brief Scale the edges areas by the mesh width to obtain the real areas. - */ - static void ScaleAreas (ablastr::fields::VectorField& face_areas, - const std::array& cell_size); - /** + /** * \brief Initialize information for cell extensions. * The flags convention for m_flag_info_face is as follows * - 0 for unstable cells From 3092d26bbce4613eb4a25abb7c9548490c24a5b2 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 31 Jan 2025 13:00:11 -0800 Subject: [PATCH 206/278] MultiFabRegister: `throw` in get (#5356) Close #5319 Follow-up to #5230 - [x] Throw a runtime exception instead of returning a `nullptr` if a field is requested via the getter. - [x] update logic to ensure this passes all tests --------- Co-authored-by: Edoardo Zoni Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- Source/BoundaryConditions/PML.cpp | 8 +- Source/Evolve/WarpXEvolve.cpp | 73 ++++++++++++------- .../EffectivePotentialES.cpp | 4 +- .../LabFrameExplicitES.cpp | 4 +- Source/FieldSolver/WarpXPushFieldsEM.cpp | 10 ++- .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 4 +- Source/Parallelization/WarpXComm.cpp | 23 ++++-- Source/Utils/WarpXMovingWindow.cpp | 6 +- Source/ablastr/fields/MultiFabRegister.H | 38 +++++++--- Source/ablastr/fields/MultiFabRegister.cpp | 56 ++++++++++---- 10 files changed, 154 insertions(+), 72 deletions(-) diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index 90518dc432f..1b66195d163 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -1301,16 +1301,16 @@ PML::PushPSATD (ablastr::fields::MultiFabRegister& fields, const int lev) { ablastr::fields::VectorField pml_E_fp = fields.get_alldirs(FieldType::pml_E_fp, lev); ablastr::fields::VectorField pml_B_fp = fields.get_alldirs(FieldType::pml_B_fp, lev); - ablastr::fields::ScalarField pml_F_fp = fields.get(FieldType::pml_F_fp, lev); - ablastr::fields::ScalarField pml_G_fp = fields.get(FieldType::pml_G_fp, lev); + ablastr::fields::ScalarField pml_F_fp = (fields.has(FieldType::pml_F_fp, lev)) ? fields.get(FieldType::pml_F_fp, lev) : nullptr; + ablastr::fields::ScalarField pml_G_fp = (fields.has(FieldType::pml_G_fp, lev)) ? fields.get(FieldType::pml_G_fp, lev) : nullptr; // Update the fields on the fine and coarse patch PushPMLPSATDSinglePatch(lev, *spectral_solver_fp, pml_E_fp, pml_B_fp, pml_F_fp, pml_G_fp, m_fill_guards_fields); if (spectral_solver_cp) { ablastr::fields::VectorField pml_E_cp = fields.get_alldirs(FieldType::pml_E_cp, lev); ablastr::fields::VectorField pml_B_cp = fields.get_alldirs(FieldType::pml_B_cp, lev); - ablastr::fields::ScalarField pml_F_cp = fields.get(FieldType::pml_F_cp, lev); - ablastr::fields::ScalarField pml_G_cp = fields.get(FieldType::pml_G_cp, lev); + ablastr::fields::ScalarField pml_F_cp = (fields.has(FieldType::pml_F_cp, lev)) ? fields.get(FieldType::pml_F_cp, lev) : nullptr; + ablastr::fields::ScalarField pml_G_cp = (fields.has(FieldType::pml_G_cp, lev)) ? fields.get(FieldType::pml_G_cp, lev) : nullptr; PushPMLPSATDSinglePatch(lev, *spectral_solver_cp, pml_E_cp, pml_B_cp, pml_F_cp, pml_G_cp, m_fill_guards_fields); } } diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 1b2ff7e34f1..b40503ac1c7 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -671,6 +671,8 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) using warpx::fields::FieldType; + bool const skip_lev0_coarse_patch = true; + const int rho_mid = spectral_solver_fp[0]->m_spectral_index.rho_mid; const int rho_new = spectral_solver_fp[0]->m_spectral_index.rho_new; @@ -804,8 +806,8 @@ WarpX::OneStep_multiJ (const amrex::Real cur_time) PSATDBackwardTransformEBavg( m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_fp, finest_level), m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_fp, finest_level), - m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level), - m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level) + m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level, skip_lev0_coarse_patch), + m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level, skip_lev0_coarse_patch) ); } @@ -876,11 +878,13 @@ WarpX::OneStep_sub1 (Real cur_time) using warpx::fields::FieldType; + bool const skip_lev0_coarse_patch = true; + // i) Push particles and fields on the fine patch (first fine step) PushParticlesandDeposit(fine_lev, cur_time, DtType::FirstHalf); RestrictCurrentFromFineToCoarsePatch( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), - m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level), fine_lev); + m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level, skip_lev0_coarse_patch), fine_lev); RestrictRhoFromFineToCoarsePatch(fine_lev); if (use_filter) { ApplyFilterMF( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), fine_lev); @@ -889,10 +893,13 @@ WarpX::OneStep_sub1 (Real cur_time) m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), fine_lev, Geom(fine_lev).periodicity()); - ApplyFilterandSumBoundaryRho( - m_fields.get_mr_levels(FieldType::rho_fp, finest_level), - m_fields.get_mr_levels(FieldType::rho_cp, finest_level), - fine_lev, PatchType::fine, 0, 2*ncomps); + if (m_fields.has(FieldType::rho_fp, finest_level) && + m_fields.has(FieldType::rho_cp, finest_level)) { + ApplyFilterandSumBoundaryRho( + m_fields.get_mr_levels(FieldType::rho_fp, finest_level), + m_fields.get_mr_levels(FieldType::rho_cp, finest_level, skip_lev0_coarse_patch), + fine_lev, PatchType::fine, 0, 2*ncomps); + } EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf, cur_time); EvolveF(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); @@ -922,13 +929,18 @@ WarpX::OneStep_sub1 (Real cur_time) StoreCurrent(coarse_lev); AddCurrentFromFineLevelandSumBoundary( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), - m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level), - m_fields.get_mr_levels_alldirs(FieldType::current_buf, finest_level), coarse_lev); - AddRhoFromFineLevelandSumBoundary( - m_fields.get_mr_levels(FieldType::rho_fp, finest_level), - m_fields.get_mr_levels(FieldType::rho_cp, finest_level), - m_fields.get_mr_levels(FieldType::rho_buf, finest_level), - coarse_lev, 0, ncomps); + m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level, skip_lev0_coarse_patch), + m_fields.get_mr_levels_alldirs(FieldType::current_buf, finest_level, skip_lev0_coarse_patch), coarse_lev); + + if (m_fields.has(FieldType::rho_fp, finest_level) && + m_fields.has(FieldType::rho_cp, finest_level) && + m_fields.has(FieldType::rho_buf, finest_level)) { + AddRhoFromFineLevelandSumBoundary( + m_fields.get_mr_levels(FieldType::rho_fp, finest_level), + m_fields.get_mr_levels(FieldType::rho_cp, finest_level, skip_lev0_coarse_patch), + m_fields.get_mr_levels(FieldType::rho_buf, finest_level, skip_lev0_coarse_patch), + coarse_lev, 0, ncomps); + } EvolveB(fine_lev, PatchType::coarse, dt[fine_lev], DtType::FirstHalf, cur_time); EvolveF(fine_lev, PatchType::coarse, dt[fine_lev], DtType::FirstHalf); @@ -958,16 +970,20 @@ WarpX::OneStep_sub1 (Real cur_time) PushParticlesandDeposit(fine_lev, cur_time + dt[fine_lev], DtType::SecondHalf); RestrictCurrentFromFineToCoarsePatch( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), - m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level), fine_lev); + m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level, skip_lev0_coarse_patch), fine_lev); RestrictRhoFromFineToCoarsePatch(fine_lev); if (use_filter) { ApplyFilterMF( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), fine_lev); } SumBoundaryJ( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), fine_lev, Geom(fine_lev).periodicity()); - ApplyFilterandSumBoundaryRho( - m_fields.get_mr_levels(FieldType::rho_fp, finest_level), - m_fields.get_mr_levels(FieldType::rho_cp, finest_level), - fine_lev, PatchType::fine, 0, ncomps); + + if (m_fields.has(FieldType::rho_fp, finest_level) && + m_fields.has(FieldType::rho_cp, finest_level)) { + ApplyFilterandSumBoundaryRho( + m_fields.get_mr_levels(FieldType::rho_fp, finest_level), + m_fields.get_mr_levels(FieldType::rho_cp, finest_level, skip_lev0_coarse_patch), + fine_lev, PatchType::fine, 0, ncomps); + } EvolveB(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf, cur_time + dt[fine_lev]); EvolveF(fine_lev, PatchType::fine, 0.5_rt*dt[fine_lev], DtType::FirstHalf); @@ -996,14 +1012,19 @@ WarpX::OneStep_sub1 (Real cur_time) RestoreCurrent(coarse_lev); AddCurrentFromFineLevelandSumBoundary( m_fields.get_mr_levels_alldirs(FieldType::current_fp, finest_level), - m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level), - m_fields.get_mr_levels_alldirs(FieldType::current_buf, finest_level), + m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level, skip_lev0_coarse_patch), + m_fields.get_mr_levels_alldirs(FieldType::current_buf, finest_level, skip_lev0_coarse_patch), coarse_lev); - AddRhoFromFineLevelandSumBoundary( - m_fields.get_mr_levels(FieldType::rho_fp, finest_level), - m_fields.get_mr_levels(FieldType::rho_cp, finest_level), - m_fields.get_mr_levels(FieldType::rho_buf, finest_level), - coarse_lev, ncomps, ncomps); + + if (m_fields.has(FieldType::rho_fp, finest_level) && + m_fields.has(FieldType::rho_cp, finest_level) && + m_fields.has(FieldType::rho_buf, finest_level)) { + AddRhoFromFineLevelandSumBoundary( + m_fields.get_mr_levels(FieldType::rho_fp, finest_level), + m_fields.get_mr_levels(FieldType::rho_cp, finest_level, skip_lev0_coarse_patch), + m_fields.get_mr_levels(FieldType::rho_buf, finest_level, skip_lev0_coarse_patch), + coarse_lev, ncomps, ncomps); + } EvolveE(fine_lev, PatchType::coarse, dt[fine_lev], cur_time + 0.5_rt * dt[fine_lev]); FillBoundaryE(fine_lev, PatchType::coarse, guard_cells.ng_FieldSolver, diff --git a/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.cpp b/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.cpp index 0a5330b049d..b2f93f7e2b3 100644 --- a/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.cpp +++ b/Source/FieldSolver/ElectrostaticSolvers/EffectivePotentialES.cpp @@ -34,9 +34,11 @@ void EffectivePotentialES::ComputeSpaceChargeField ( using ablastr::fields::MultiLevelVectorField; using warpx::fields::FieldType; + bool const skip_lev0_coarse_patch = true; + // grab the simulation fields const MultiLevelScalarField rho_fp = fields.get_mr_levels(FieldType::rho_fp, max_level); - const MultiLevelScalarField rho_cp = fields.get_mr_levels(FieldType::rho_cp, max_level); + const MultiLevelScalarField rho_cp = fields.get_mr_levels(FieldType::rho_cp, max_level, skip_lev0_coarse_patch); const MultiLevelScalarField phi_fp = fields.get_mr_levels(FieldType::phi_fp, max_level); const MultiLevelVectorField Efield_fp = fields.get_mr_levels_alldirs(FieldType::Efield_fp, max_level); diff --git a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp index 643efefb2f3..88a0899a7cb 100755 --- a/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp +++ b/Source/FieldSolver/ElectrostaticSolvers/LabFrameExplicitES.cpp @@ -31,8 +31,10 @@ void LabFrameExplicitES::ComputeSpaceChargeField ( using ablastr::fields::MultiLevelVectorField; using warpx::fields::FieldType; + bool const skip_lev0_coarse_patch = true; + const MultiLevelScalarField rho_fp = fields.get_mr_levels(FieldType::rho_fp, max_level); - const MultiLevelScalarField rho_cp = fields.get_mr_levels(FieldType::rho_cp, max_level); + const MultiLevelScalarField rho_cp = fields.get_mr_levels(FieldType::rho_cp, max_level, skip_lev0_coarse_patch); const MultiLevelScalarField phi_fp = fields.get_mr_levels(FieldType::phi_fp, max_level); const MultiLevelVectorField Efield_fp = fields.get_mr_levels_alldirs(FieldType::Efield_fp, max_level); diff --git a/Source/FieldSolver/WarpXPushFieldsEM.cpp b/Source/FieldSolver/WarpXPushFieldsEM.cpp index 7e04f1c2b15..0163d158dd0 100644 --- a/Source/FieldSolver/WarpXPushFieldsEM.cpp +++ b/Source/FieldSolver/WarpXPushFieldsEM.cpp @@ -722,6 +722,8 @@ WarpX::PushPSATD (amrex::Real start_time) "PushFieldsEM: PSATD solver selected but not built"); #else + bool const skip_lev0_coarse_patch = true; + const int rho_old = spectral_solver_fp[0]->m_spectral_index.rho_old; const int rho_new = spectral_solver_fp[0]->m_spectral_index.rho_new; @@ -853,8 +855,8 @@ WarpX::PushPSATD (amrex::Real start_time) if (WarpX::fft_do_time_averaging) { auto Efield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_fp, finest_level); auto Bfield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_fp, finest_level); - auto Efield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level); - auto Bfield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level); + auto Efield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level, skip_lev0_coarse_patch); + auto Bfield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level, skip_lev0_coarse_patch); PSATDBackwardTransformEBavg(Efield_avg_fp, Bfield_avg_fp, Efield_avg_cp, Bfield_avg_cp); } if (WarpX::do_dive_cleaning) { PSATDBackwardTransformF(); } @@ -1105,6 +1107,8 @@ WarpX::EvolveG (int lev, PatchType patch_type, amrex::Real a_dt, DtType /*a_dt_t WARPX_PROFILE("WarpX::EvolveG()"); + bool const skip_lev0_coarse_patch = true; + // Evolve G field in regular cells if (patch_type == PatchType::fine) { @@ -1115,7 +1119,7 @@ WarpX::EvolveG (int lev, PatchType patch_type, amrex::Real a_dt, DtType /*a_dt_t } else // coarse patch { - ablastr::fields::MultiLevelVectorField const& Bfield_cp_new = m_fields.get_mr_levels_alldirs(FieldType::Bfield_cp, finest_level); + ablastr::fields::MultiLevelVectorField const& Bfield_cp_new = m_fields.get_mr_levels_alldirs(FieldType::Bfield_cp, finest_level, skip_lev0_coarse_patch); m_fdtd_solver_cp[lev]->EvolveG( m_fields.get(FieldType::G_cp, lev), Bfield_cp_new[lev], a_dt); diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 46950030322..18efba3f445 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -206,11 +206,13 @@ void WarpX::HybridPICDepositInitialRhoAndJ () { using warpx::fields::FieldType; + bool const skip_lev0_coarse_patch = true; + ablastr::fields::MultiLevelScalarField rho_fp_temp = m_fields.get_mr_levels(FieldType::hybrid_rho_fp_temp, finest_level); ablastr::fields::MultiLevelVectorField current_fp_temp = m_fields.get_mr_levels_alldirs(FieldType::hybrid_current_fp_temp, finest_level); mypc->DepositCharge(rho_fp_temp, 0._rt); mypc->DepositCurrent(current_fp_temp, dt[0], 0._rt); - SyncRho(rho_fp_temp, m_fields.get_mr_levels(FieldType::rho_cp, finest_level), m_fields.get_mr_levels(FieldType::rho_buf, finest_level)); + SyncRho(rho_fp_temp, m_fields.get_mr_levels(FieldType::rho_cp, finest_level, skip_lev0_coarse_patch), m_fields.get_mr_levels(FieldType::rho_buf, finest_level, skip_lev0_coarse_patch)); SyncCurrent("hybrid_current_fp_temp"); for (int lev=0; lev <= finest_level; ++lev) { // SyncCurrent does not include a call to FillBoundary, but it is needed diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index b82e4d687a4..d5c36084467 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -836,6 +836,8 @@ WarpX::FillBoundaryE_avg(int lev, IntVect ng) void WarpX::FillBoundaryE_avg (int lev, PatchType patch_type, IntVect ng) { + bool const skip_lev0_coarse_patch = true; + if (patch_type == PatchType::fine) { if (do_pml && pml[lev]->ok()) @@ -865,7 +867,7 @@ WarpX::FillBoundaryE_avg (int lev, PatchType patch_type, IntVect ng) WARPX_ABORT_WITH_MESSAGE("Averaged Galilean PSATD with PML is not yet implemented"); } - ablastr::fields::MultiLevelVectorField Efield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level); + ablastr::fields::MultiLevelVectorField Efield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level, skip_lev0_coarse_patch); const amrex::Periodicity& cperiod = Geom(lev-1).periodicity(); if ( m_safe_guard_cells ) { @@ -896,6 +898,8 @@ WarpX::FillBoundaryB_avg (int lev, PatchType patch_type, IntVect ng) { using ablastr::fields::Direction; + bool const skip_lev0_coarse_patch = true; + if (patch_type == PatchType::fine) { if (do_pml && pml[lev]->ok()) @@ -925,7 +929,7 @@ WarpX::FillBoundaryB_avg (int lev, PatchType patch_type, IntVect ng) WARPX_ABORT_WITH_MESSAGE("Averaged Galilean PSATD with PML is not yet implemented"); } - ablastr::fields::MultiLevelVectorField Bfield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level); + ablastr::fields::MultiLevelVectorField Bfield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level, skip_lev0_coarse_patch); const amrex::Periodicity& cperiod = Geom(lev-1).periodicity(); if ( m_safe_guard_cells ){ @@ -1077,12 +1081,14 @@ WarpX::SyncCurrent (const std::string& current_fp_string) WARPX_PROFILE("WarpX::SyncCurrent()"); + bool const skip_lev0_coarse_patch = true; + ablastr::fields::MultiLevelVectorField const& J_fp = m_fields.get_mr_levels_alldirs(current_fp_string, finest_level); // If warpx.do_current_centering = 1, center currents from nodal grid to staggered grid if (do_current_centering) { - ablastr::fields::MultiLevelVectorField const& J_fp_nodal = m_fields.get_mr_levels_alldirs(FieldType::current_fp_nodal, finest_level+1); + ablastr::fields::MultiLevelVectorField const& J_fp_nodal = m_fields.get_mr_levels_alldirs(FieldType::current_fp_nodal, finest_level); AMREX_ALWAYS_ASSERT_WITH_MESSAGE(finest_level <= 1, "warpx.do_current_centering=1 not supported with more than one fine levels"); @@ -1192,7 +1198,7 @@ WarpX::SyncCurrent (const std::string& current_fp_string) } }); // Now it's safe to apply filter and sumboundary on J_cp - ablastr::fields::MultiLevelVectorField const& J_cp = m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level); + ablastr::fields::MultiLevelVectorField const& J_cp = m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level, skip_lev0_coarse_patch); if (use_filter) { ApplyFilterMF(J_cp, lev+1, idim); @@ -1207,14 +1213,14 @@ WarpX::SyncCurrent (const std::string& current_fp_string) // filtering depends on the level. This is also done before any // same-level communication because it's easier this way to // avoid double counting. - ablastr::fields::MultiLevelVectorField const& J_cp = m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level); + ablastr::fields::MultiLevelVectorField const& J_cp = m_fields.get_mr_levels_alldirs(FieldType::current_cp, finest_level, skip_lev0_coarse_patch); J_cp[lev][Direction{idim}]->setVal(0.0); ablastr::coarsen::average::Coarsen(*J_cp[lev][Direction{idim}], *J_fp[lev][Direction{idim}], refRatio(lev-1)); if (m_fields.has(FieldType::current_buf, Direction{idim}, lev)) { - ablastr::fields::MultiLevelVectorField const& J_buffer = m_fields.get_mr_levels_alldirs(FieldType::current_buf, finest_level); + ablastr::fields::MultiLevelVectorField const& J_buffer = m_fields.get_mr_levels_alldirs(FieldType::current_buf, finest_level, skip_lev0_coarse_patch); IntVect const& ng = J_cp[lev][Direction{idim}]->nGrowVect(); AMREX_ASSERT(ng.allLE(J_buffer[lev][Direction{idim}]->nGrowVect())); @@ -1241,14 +1247,15 @@ WarpX::SyncCurrent (const std::string& current_fp_string) void WarpX::SyncRho () { + bool const skip_lev0_coarse_patch = true; const ablastr::fields::MultiLevelScalarField rho_fp = m_fields.has(FieldType::rho_fp, 0) ? m_fields.get_mr_levels(FieldType::rho_fp, finest_level) : ablastr::fields::MultiLevelScalarField{static_cast(finest_level+1)}; const ablastr::fields::MultiLevelScalarField rho_cp = m_fields.has(FieldType::rho_cp, 1) ? - m_fields.get_mr_levels(FieldType::rho_cp, finest_level) : + m_fields.get_mr_levels(FieldType::rho_cp, finest_level, skip_lev0_coarse_patch) : ablastr::fields::MultiLevelScalarField{static_cast(finest_level+1)}; const ablastr::fields::MultiLevelScalarField rho_buf = m_fields.has(FieldType::rho_buf, 1) ? - m_fields.get_mr_levels(FieldType::rho_buf, finest_level) : + m_fields.get_mr_levels(FieldType::rho_buf, finest_level, skip_lev0_coarse_patch) : ablastr::fields::MultiLevelScalarField{static_cast(finest_level+1)}; SyncRho(rho_fp, rho_cp, rho_buf); diff --git a/Source/Utils/WarpXMovingWindow.cpp b/Source/Utils/WarpXMovingWindow.cpp index cc8886fc67f..b37aa41e28a 100644 --- a/Source/Utils/WarpXMovingWindow.cpp +++ b/Source/Utils/WarpXMovingWindow.cpp @@ -143,6 +143,8 @@ WarpX::MoveWindow (const int step, bool move_j) using ablastr::fields::Direction; using warpx::fields::FieldType; + bool const skip_lev0_coarse_patch = true; + if (step == start_moving_window_step) { amrex::Print() << Utils::TextMsg::Info("Starting moving window"); } @@ -276,8 +278,8 @@ WarpX::MoveWindow (const int step, bool move_j) shiftMF(*m_fields.get(FieldType::Bfield_aux, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells); shiftMF(*m_fields.get(FieldType::Efield_aux, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells); if (fft_do_time_averaging) { - ablastr::fields::MultiLevelVectorField Efield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level); - ablastr::fields::MultiLevelVectorField Bfield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level); + ablastr::fields::MultiLevelVectorField Efield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level, skip_lev0_coarse_patch); + ablastr::fields::MultiLevelVectorField Bfield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level, skip_lev0_coarse_patch); shiftMF(*Bfield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); shiftMF(*Efield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells, diff --git a/Source/ablastr/fields/MultiFabRegister.H b/Source/ablastr/fields/MultiFabRegister.H index 21df20c1678..11cf932c12c 100644 --- a/Source/ablastr/fields/MultiFabRegister.H +++ b/Source/ablastr/fields/MultiFabRegister.H @@ -472,6 +472,7 @@ namespace ablastr::fields * * @param name the name of the field * @param finest_level the highest MR level to return + * @param skip_level_0 return a nullptr for level 0 * @return non-owning pointers to the MultiFab (field) on all levels */ //@{ @@ -479,24 +480,28 @@ namespace ablastr::fields [[nodiscard]] MultiLevelScalarField get_mr_levels ( T name, - int finest_level + int finest_level, + bool skip_level_0=false ) { return internal_get_mr_levels( getExtractedName(name), - finest_level + finest_level, + skip_level_0 ); } template [[nodiscard]] ConstMultiLevelScalarField get_mr_levels ( T name, - int finest_level + int finest_level, + bool skip_level_0=false ) const { return internal_get_mr_levels( getExtractedName(name), - finest_level + finest_level, + skip_level_0 ); } //@} @@ -543,6 +548,7 @@ namespace ablastr::fields * * @param name the name of the field * @param finest_level the highest MR level to return + * @param skip_level_0 return a nullptr for level 0 * @return non-owning pointers to all components of a vector field on all MR levels */ //@{ @@ -550,24 +556,28 @@ namespace ablastr::fields [[nodiscard]] MultiLevelVectorField get_mr_levels_alldirs ( T name, - int finest_level + int finest_level, + bool skip_level_0=false ) { return internal_get_mr_levels_alldirs( getExtractedName(name), - finest_level + finest_level, + skip_level_0 ); } template [[nodiscard]] ConstMultiLevelVectorField get_mr_levels_alldirs ( T name, - int finest_level + int finest_level, + bool skip_level_0=false ) const { return internal_get_mr_levels_alldirs( getExtractedName(name), - finest_level + finest_level, + skip_level_0 ); } //@} @@ -762,12 +772,14 @@ namespace ablastr::fields [[nodiscard]] MultiLevelScalarField internal_get_mr_levels ( std::string const & name, - int finest_level + int finest_level, + bool skip_level_0 ); [[nodiscard]] ConstMultiLevelScalarField internal_get_mr_levels ( std::string const & name, - int finest_level + int finest_level, + bool skip_level_0 ) const; [[nodiscard]] VectorField internal_get_alldirs ( @@ -782,12 +794,14 @@ namespace ablastr::fields [[nodiscard]] MultiLevelVectorField internal_get_mr_levels_alldirs ( std::string const & name, - int finest_level + int finest_level, + bool skip_level_0 ); [[nodiscard]] ConstMultiLevelVectorField internal_get_mr_levels_alldirs ( std::string const & name, - int finest_level + int finest_level, + bool skip_level_0 ) const; void diff --git a/Source/ablastr/fields/MultiFabRegister.cpp b/Source/ablastr/fields/MultiFabRegister.cpp index 2c384a90089..a1266deeab0 100644 --- a/Source/ablastr/fields/MultiFabRegister.cpp +++ b/Source/ablastr/fields/MultiFabRegister.cpp @@ -350,9 +350,7 @@ namespace ablastr::fields ) { if (m_mf_register.count(internal_name) == 0) { - // FIXME: temporary, throw a std::runtime_error - // throw std::runtime_error("MultiFabRegister::get name does not exist in register: " + key); - return nullptr; + throw std::runtime_error("MultiFabRegister::get name does not exist in register: " + internal_name); } amrex::MultiFab & mf = m_mf_register.at(internal_name).m_mf; @@ -365,9 +363,7 @@ namespace ablastr::fields ) const { if (m_mf_register.count(internal_name) == 0) { - // FIXME: temporary, throw a std::runtime_error - // throw std::runtime_error("MultiFabRegister::get name does not exist in register: " + internal_name); - return nullptr; + throw std::runtime_error("MultiFabRegister::get name does not exist in register: " + internal_name); } amrex::MultiFab const & mf = m_mf_register.at(internal_name).m_mf; @@ -419,14 +415,22 @@ namespace ablastr::fields MultiLevelScalarField MultiFabRegister::internal_get_mr_levels ( std::string const & name, - int finest_level + int finest_level, + bool skip_level_0 ) { MultiLevelScalarField field_on_level; field_on_level.reserve(finest_level+1); for (int lvl = 0; lvl <= finest_level; lvl++) { - field_on_level.push_back(internal_get(name, lvl)); + if (lvl == 0 && skip_level_0) + { + field_on_level.push_back(nullptr); + } + else + { + field_on_level.push_back(internal_get(name, lvl)); + } } return field_on_level; } @@ -434,14 +438,22 @@ namespace ablastr::fields ConstMultiLevelScalarField MultiFabRegister::internal_get_mr_levels ( std::string const & name, - int finest_level + int finest_level, + bool skip_level_0 ) const { ConstMultiLevelScalarField field_on_level; field_on_level.reserve(finest_level+1); for (int lvl = 0; lvl <= finest_level; lvl++) { - field_on_level.push_back(internal_get(name, lvl)); + if (lvl == 0 && skip_level_0) + { + field_on_level.push_back(nullptr); + } + else + { + field_on_level.push_back(internal_get(name, lvl)); + } } return field_on_level; } @@ -483,7 +495,8 @@ namespace ablastr::fields MultiLevelVectorField MultiFabRegister::internal_get_mr_levels_alldirs ( std::string const & name, - int finest_level + int finest_level, + bool skip_level_0 ) { MultiLevelVectorField field_on_level; @@ -497,7 +510,14 @@ namespace ablastr::fields // insert components for (Direction const & dir : m_all_dirs) { - field_on_level[lvl][dir] = internal_get(name, dir, lvl); + if (lvl == 0 && skip_level_0) + { + field_on_level[lvl][dir] = nullptr; + } + else + { + field_on_level[lvl][dir] = internal_get(name, dir, lvl); + } } } return field_on_level; @@ -506,7 +526,8 @@ namespace ablastr::fields ConstMultiLevelVectorField MultiFabRegister::internal_get_mr_levels_alldirs ( std::string const & name, - int finest_level + int finest_level, + bool skip_level_0 ) const { ConstMultiLevelVectorField field_on_level; @@ -520,7 +541,14 @@ namespace ablastr::fields // insert components for (Direction const & dir : m_all_dirs) { - field_on_level[lvl][dir] = internal_get(name, dir, lvl); + if (lvl == 0 && skip_level_0) + { + field_on_level[lvl][dir] = nullptr; + } + else + { + field_on_level[lvl][dir] = internal_get(name, dir, lvl); + } } } return field_on_level; From ca9b8f6d48105e398adb672e46df132b6cf5798c Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 3 Feb 2025 11:20:34 -0800 Subject: [PATCH 207/278] Doc: Frontier OpenMP Load (#5631) Work-around for the ROCm module that does not add the `llvm/lib` sub-directory to the `LD_LIBRARY_PATH`. Only an issue on `install`, if runpath is stripped (default). --- Tools/machines/frontier-olcf/frontier_warpx.profile.example | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Tools/machines/frontier-olcf/frontier_warpx.profile.example b/Tools/machines/frontier-olcf/frontier_warpx.profile.example index ad78ab1acaf..b51946ce832 100644 --- a/Tools/machines/frontier-olcf/frontier_warpx.profile.example +++ b/Tools/machines/frontier-olcf/frontier_warpx.profile.example @@ -13,6 +13,9 @@ module load cray-mpich/8.1.28 module load cce/17.0.0 # must be loaded after rocm # https://docs.olcf.ornl.gov/systems/frontier_user_guide.html#compatible-compiler-rocm-toolchain-versions +# Fix for OpenMP Runtime (OLCFHELP-21543) +export LD_LIBRARY_PATH=${LD_LIBRARY_PATH}:${ROCM_PATH}/llvm/lib + # optional: faster builds module load ccache module load ninja From cb30300cbba174b321bc244a55f07a4e8583f2aa Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 3 Feb 2025 11:27:54 -0800 Subject: [PATCH 208/278] Add FieldPoyntingFlux reduced diagnostic (#5475) This adds a reduced diagnostic that calculates the Poynting flux on the surfaces of the domain, providing the power flow into and out of the domain. This also includes the time integrated data. When using the implicit evolve scheme, to get the energy accounting correct, the flux needs to be calculated at the mid step. For this reason, the `ComputeDiagsMidStep` was added which is called directly at the appropriate times. Because of the time integration, there are two main differences of this reduced diagnostic compared to the others. The first is that it is calculated every time step in order to get the full resolution in time. The intervals parameter still controls how often the diagnostic data is written out. The second is that a facility is added to write out the values of the time integration to a file when a checkpoint is made, so on a restart the integration can continue with the previous values. The facility was written in a general way so that other reduced diagnostics can also do this. The CI test using the implicit solver is dependent on PR #5498 and PR #5489. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Docs/source/usage/parameters.rst | 6 + Examples/Tests/pec/CMakeLists.txt | 20 ++ .../pec/analysis_pec_insulator_implicit.py | 57 +++ ...nputs_test_2d_pec_field_insulator_implicit | 73 ++++ ...st_2d_pec_field_insulator_implicit_restart | 5 + .../inputs_test_3d_reduced_diags | 4 +- Python/pywarpx/picmi.py | 1 + .../test_2d_pec_field_insulator_implicit.json | 14 + ..._pec_field_insulator_implicit_restart.json | 14 + .../FlushFormats/FlushFormatCheckpoint.H | 2 + .../FlushFormats/FlushFormatCheckpoint.cpp | 12 + .../Diagnostics/ReducedDiags/CMakeLists.txt | 1 + .../ReducedDiags/FieldPoyntingFlux.H | 63 ++++ .../ReducedDiags/FieldPoyntingFlux.cpp | 333 ++++++++++++++++++ Source/Diagnostics/ReducedDiags/Make.package | 1 + .../ReducedDiags/MultiReducedDiags.H | 11 + .../ReducedDiags/MultiReducedDiags.cpp | 39 ++ .../Diagnostics/ReducedDiags/ReducedDiags.H | 21 ++ .../Diagnostics/ReducedDiags/ReducedDiags.cpp | 21 ++ Source/Diagnostics/WarpXIO.cpp | 3 + .../ImplicitSolvers/SemiImplicitEM.cpp | 2 + .../StrangImplicitSpectralEM.cpp | 2 + .../ImplicitSolvers/ThetaImplicitEM.cpp | 2 + 23 files changed, 706 insertions(+), 1 deletion(-) create mode 100755 Examples/Tests/pec/analysis_pec_insulator_implicit.py create mode 100644 Examples/Tests/pec/inputs_test_2d_pec_field_insulator_implicit create mode 100644 Examples/Tests/pec/inputs_test_2d_pec_field_insulator_implicit_restart create mode 100644 Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator_implicit.json create mode 100644 Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator_implicit_restart.json create mode 100644 Source/Diagnostics/ReducedDiags/FieldPoyntingFlux.H create mode 100644 Source/Diagnostics/ReducedDiags/FieldPoyntingFlux.cpp diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 7c92b5cf9e7..aaba7130b87 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -3182,6 +3182,12 @@ This shifts analysis from post-processing to runtime calculation of reduction op Note that the fields are averaged on the cell centers before their maximum values are computed. + * ``FieldPoyntingFlux`` + Integrates the normal Poynting flux over each domain boundary surface and also integrates the flux over time. + This provides the power and total energy loss into or out of the simulation domain. + The output columns are the flux for each dimension on the lower boundaries, then the higher boundaries, + then the integrated energy loss for each dimension on the the lower and higher boundaries. + * ``FieldProbe`` This type computes the value of each component of the electric and magnetic fields and of the Poynting vector (a measure of electromagnetic flux) at points in the domain. diff --git a/Examples/Tests/pec/CMakeLists.txt b/Examples/Tests/pec/CMakeLists.txt index f331249ded0..66d9dd1c13e 100644 --- a/Examples/Tests/pec/CMakeLists.txt +++ b/Examples/Tests/pec/CMakeLists.txt @@ -40,3 +40,23 @@ add_warpx_test( "analysis_default_regression.py --path diags/diag1000010" # checksum OFF # dependency ) + +add_warpx_test( + test_2d_pec_field_insulator_implicit # name + 2 # dims + 2 # nprocs + inputs_test_2d_pec_field_insulator_implicit # inputs + "analysis_pec_insulator_implicit.py diags/diag1000020" # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum + OFF # dependency +) + +add_warpx_test( + test_2d_pec_field_insulator_implicit_restart # name + 2 # dims + 2 # nprocs + inputs_test_2d_pec_field_insulator_implicit_restart # inputs + "analysis_pec_insulator_implicit.py diags/diag1000020" # analysis + "analysis_default_regression.py --path diags/diag1000020" # checksum + test_2d_pec_field_insulator_implicit # dependency +) diff --git a/Examples/Tests/pec/analysis_pec_insulator_implicit.py b/Examples/Tests/pec/analysis_pec_insulator_implicit.py new file mode 100755 index 00000000000..1fdbc2261a8 --- /dev/null +++ b/Examples/Tests/pec/analysis_pec_insulator_implicit.py @@ -0,0 +1,57 @@ +#!/usr/bin/env python3 + +# +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL +# +# This is a script that analyses the simulation results from +# the scripts `inputs_test_2d_pec_field_insulator_implicit` and +# `inputs_test_2d_pec_field_insulator_implicit_restart`. +# The scripts model an insulator boundary condition on part of the +# upper x boundary that pushes B field into the domain. The implicit +# solver is used, converging to machine tolerance. The energy accounting +# should be exact to machine precision, so that the energy is the system +# should be the same as the amount of energy pushed in from the boundary. +# This is checked using the FieldEnergy and FieldPoyntingFlux reduced +# diagnostics. +import sys + +import matplotlib + +matplotlib.use("Agg") +import matplotlib.pyplot as plt +import numpy as np + +# this will be the name of the plot file +fn = sys.argv[1] + +EE = np.loadtxt(f"{fn}/../reducedfiles/fieldenergy.txt", skiprows=1) +SS = np.loadtxt(f"{fn}/../reducedfiles/poyntingflux.txt", skiprows=1) +SSsum = SS[:, 2:6].sum(1) +EEloss = SS[:, 7:].sum(1) + +dt = EE[1, 1] + +fig, ax = plt.subplots() +ax.plot(EE[:, 0], EE[:, 2], label="field energy") +ax.plot(SS[:, 0], -EEloss, label="-flux*dt") +ax.legend() +ax.set_xlabel("time (s)") +ax.set_ylabel("energy (J)") +fig.savefig("energy_history.png") + +fig, ax = plt.subplots() +ax.plot(EE[:, 0], (EE[:, 2] + EEloss) / EE[:, 2].max()) +ax.set_xlabel("time (s)") +ax.set_ylabel("energy difference/max energy (1)") +fig.savefig("energy_difference.png") + +tolerance_rel = 1.0e-13 + +energy_difference_fraction = np.abs((EE[:, 2] + EEloss) / EE[:, 2].max()).max() +print(f"energy accounting error = {energy_difference_fraction}") +print(f"tolerance_rel = {tolerance_rel}") + +assert energy_difference_fraction < tolerance_rel diff --git a/Examples/Tests/pec/inputs_test_2d_pec_field_insulator_implicit b/Examples/Tests/pec/inputs_test_2d_pec_field_insulator_implicit new file mode 100644 index 00000000000..ec61e3f8605 --- /dev/null +++ b/Examples/Tests/pec/inputs_test_2d_pec_field_insulator_implicit @@ -0,0 +1,73 @@ +# Maximum number of time steps +max_step = 20 + +# number of grid points +amr.n_cell = 32 32 +amr.blocking_factor = 16 + +# Maximum level in hierarchy (for now must be 0, i.e., one level in total) +amr.max_level = 0 + +# Geometry +geometry.dims = 2 +geometry.prob_lo = 0. 2.e-2 # physical domain +geometry.prob_hi = 1.e-2 3.e-2 + +# Boundary condition +boundary.field_lo = neumann periodic +boundary.field_hi = pec_insulator periodic + +insulator.area_x_hi(y,z) = (2.25e-2 <= z and z <= 2.75e-2) +insulator.By_x_hi(y,z,t) = min(t/1.0e-12,1)*1.e1*3.3e-4 + +warpx.serialize_initial_conditions = 1 + +# Implicit setup +# Note that this is the CFL step size for the explicit simulation, over 2. +# This value allows quick convergence of the Picard solver. +warpx.const_dt = 7.37079480234276e-13/2. + +algo.maxwell_solver = Yee +algo.evolve_scheme = "theta_implicit_em" +#algo.evolve_scheme = "semi_implicit_em" + +implicit_evolve.theta = 0.5 +#implicit_evolve.max_particle_iterations = 21 +#implicit_evolve.particle_tolerance = 1.0e-12 + +implicit_evolve.nonlinear_solver = "picard" +picard.verbose = true +picard.max_iterations = 25 +picard.relative_tolerance = 0.0 +picard.absolute_tolerance = 0.0 +picard.require_convergence = false + +#implicit_evolve.nonlinear_solver = "newton" +#newton.verbose = true +#newton.max_iterations = 20 +#newton.relative_tolerance = 1.0e-20 +#newton.absolute_tolerance = 0.0 +#newton.require_convergence = false + +#gmres.verbose_int = 2 +#gmres.max_iterations = 1000 +#gmres.relative_tolerance = 1.0e-20 +#gmres.absolute_tolerance = 0.0 + +# Verbosity +warpx.verbose = 1 + +# Diagnostics +diagnostics.diags_names = diag1 chk +diag1.intervals = 20 +diag1.diag_type = Full + +chk.intervals = 10 +chk.diag_type = Full +chk.format = checkpoint + +warpx.reduced_diags_names = fieldenergy poyntingflux +poyntingflux.type = FieldPoyntingFlux +poyntingflux.intervals = 1 +fieldenergy.type = FieldEnergy +fieldenergy.intervals = 1 diff --git a/Examples/Tests/pec/inputs_test_2d_pec_field_insulator_implicit_restart b/Examples/Tests/pec/inputs_test_2d_pec_field_insulator_implicit_restart new file mode 100644 index 00000000000..35b78d01acd --- /dev/null +++ b/Examples/Tests/pec/inputs_test_2d_pec_field_insulator_implicit_restart @@ -0,0 +1,5 @@ +# base input parameters +FILE = inputs_test_2d_pec_field_insulator_implicit + +# test input parameters +amr.restart = "../test_2d_pec_field_insulator_implicit/diags/chk000010" diff --git a/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags b/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags index dc0c57264ba..cc1b658c27f 100644 --- a/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags +++ b/Examples/Tests/reduced_diags/inputs_test_3d_reduced_diags @@ -68,7 +68,7 @@ photons.uz_th = 0.2 ################################# ###### REDUCED DIAGS ############ ################################# -warpx.reduced_diags_names = EP NP EF PP PF MF MR FP FP_integrate FP_line FP_plane FR_Max FR_Min FR_Integral Edotj +warpx.reduced_diags_names = EP NP EF PP PF MF PX MR FP FP_integrate FP_line FP_plane FR_Max FR_Min FR_Integral Edotj EP.type = ParticleEnergy EP.intervals = 200 EF.type = FieldEnergy @@ -79,6 +79,8 @@ PF.type = FieldMomentum PF.intervals = 200 MF.type = FieldMaximum MF.intervals = 200 +PX.type = FieldPoyntingFlux +PX.intervals = 200 FP.type = FieldProbe FP.intervals = 200 #The probe is placed at a cell center to match the value in the plotfile diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index f8261cd7847..da673671953 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -4074,6 +4074,7 @@ def __init__( "FieldEnergy", "FieldMomentum", "FieldMaximum", + "FieldPoyntingFlux", "RhoMaximum", "ParticleNumber", "LoadBalanceCosts", diff --git a/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator_implicit.json b/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator_implicit.json new file mode 100644 index 00000000000..fcb3081f6ae --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator_implicit.json @@ -0,0 +1,14 @@ +{ + "lev=0": { + "Bx": 0.0, + "By": 0.35907571934346943, + "Bz": 0.0, + "Ex": 36840284.366667606, + "Ey": 0.0, + "Ez": 107777138.0847348, + "jx": 0.0, + "jy": 0.0, + "jz": 0.0 + } +} + diff --git a/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator_implicit_restart.json b/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator_implicit_restart.json new file mode 100644 index 00000000000..fcb3081f6ae --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_2d_pec_field_insulator_implicit_restart.json @@ -0,0 +1,14 @@ +{ + "lev=0": { + "Bx": 0.0, + "By": 0.35907571934346943, + "Bz": 0.0, + "Ex": 36840284.366667606, + "Ey": 0.0, + "Ez": 107777138.0847348, + "jx": 0.0, + "jy": 0.0, + "jz": 0.0 + } +} + diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H index cb0a6c4b6c7..e2cd28f9e1c 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H +++ b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.H @@ -35,6 +35,8 @@ class FlushFormatCheckpoint final : public FlushFormatPlotfile const amrex::Vector& particle_diags) const; void WriteDMaps (const std::string& dir, int nlev) const; + + void WriteReducedDiagsData (std::string const & dir) const; }; #endif // WARPX_FLUSHFORMATCHECKPOINT_H_ diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp index a3a348d90ee..fc308dee936 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp @@ -5,6 +5,7 @@ # include "BoundaryConditions/PML_RZ.H" #endif #include "Diagnostics/ParticleDiag/ParticleDiag.H" +#include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "Fields.H" #include "Particles/WarpXParticleContainer.H" #include "Utils/TextMsg.H" @@ -174,6 +175,8 @@ FlushFormatCheckpoint::WriteToFile ( WriteDMaps(checkpointname, nlev); + WriteReducedDiagsData(checkpointname); + VisMF::SetHeaderVersion(current_version); } @@ -263,3 +266,12 @@ FlushFormatCheckpoint::WriteDMaps (const std::string& dir, int nlev) const } } } + +void +FlushFormatCheckpoint::WriteReducedDiagsData (std::string const & dir) const +{ + if (ParallelDescriptor::IOProcessor()) { + auto & warpx = WarpX::GetInstance(); + warpx.reduced_diags->WriteCheckpointData(dir); + } +} diff --git a/Source/Diagnostics/ReducedDiags/CMakeLists.txt b/Source/Diagnostics/ReducedDiags/CMakeLists.txt index bbf1b6b65b0..4fbfc489aba 100644 --- a/Source/Diagnostics/ReducedDiags/CMakeLists.txt +++ b/Source/Diagnostics/ReducedDiags/CMakeLists.txt @@ -9,6 +9,7 @@ foreach(D IN LISTS WarpX_DIMS) FieldEnergy.cpp FieldMaximum.cpp FieldMomentum.cpp + FieldPoyntingFlux.cpp FieldProbe.cpp FieldProbeParticleContainer.cpp FieldReduction.cpp diff --git a/Source/Diagnostics/ReducedDiags/FieldPoyntingFlux.H b/Source/Diagnostics/ReducedDiags/FieldPoyntingFlux.H new file mode 100644 index 00000000000..3a45bd6c789 --- /dev/null +++ b/Source/Diagnostics/ReducedDiags/FieldPoyntingFlux.H @@ -0,0 +1,63 @@ +/* Copyright 2019-2020 + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_DIAGNOSTICS_REDUCEDDIAGS_FIELDPOYTINGFLUX_H_ +#define WARPX_DIAGNOSTICS_REDUCEDDIAGS_FIELDPOYTINGFLUX_H_ + +#include "ReducedDiags.H" + +#include + +/** + * \brief This class mainly contains a function that computes the field Poynting flux, + * S = E cross B, integrated over each face of the domain. + */ +class FieldPoyntingFlux : public ReducedDiags +{ +public: + + /** + * \brief Constructor + * + * \param[in] rd_name reduced diags names + */ + FieldPoyntingFlux (const std::string& rd_name); + + /** + * \brief Call the routine to compute the Poynting flux if needed + * + * \param[in] step current time step + */ + void ComputeDiags (int step) final; + + /** + * \brief Call the routine to compute the Poynting flux at the mid step time level + * + * \param[in] step current time step + */ + void ComputeDiagsMidStep (int step) final; + + /** + * \brief This function computes the electromagnetic Poynting flux, + * obtained by integrating the electromagnetic Poynting flux density g = eps0 * (E x B) + * on the surface of the domain. + * + * \param[in] step current time step + */ + void ComputePoyntingFlux (); + + void WriteCheckpointData (std::string const & dir) final; + + void ReadCheckpointData (std::string const & dir) final; + +private: + + bool use_mid_step_value = false; + +}; + +#endif diff --git a/Source/Diagnostics/ReducedDiags/FieldPoyntingFlux.cpp b/Source/Diagnostics/ReducedDiags/FieldPoyntingFlux.cpp new file mode 100644 index 00000000000..f760516f2b9 --- /dev/null +++ b/Source/Diagnostics/ReducedDiags/FieldPoyntingFlux.cpp @@ -0,0 +1,333 @@ +/* Copyright 2019-2020 + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#include "FieldPoyntingFlux.H" + +#include "Fields.H" +#include "Utils/TextMsg.H" +#include "Utils/WarpXConst.H" +#include "WarpX.H" + +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +using namespace amrex::literals; + +FieldPoyntingFlux::FieldPoyntingFlux (const std::string& rd_name) + : ReducedDiags{rd_name} +{ + // Resize data array + // lo and hi is 2 + // space dims is AMREX_SPACEDIM + // instantaneous and integrated is 2 + // The order will be outward flux for low faces, then high faces, + // energy loss for low faces, then high faces + m_data.resize(2*AMREX_SPACEDIM*2, 0.0_rt); + + if (amrex::ParallelDescriptor::IOProcessor()) + { + if (m_write_header) + { + // Open file + std::ofstream ofs{m_path + m_rd_name + "." + m_extension, std::ofstream::out}; + + int c = 0; + + // Write header row + ofs << "#"; + ofs << "[" << c++ << "]step()"; + ofs << m_sep; + ofs << "[" << c++ << "]time(s)"; + + std::vector sides = {"lo", "hi"}; + +#if defined(WARPX_DIM_3D) + std::vector space_coords = {"x", "y", "z"}; +#elif defined(WARPX_DIM_XZ) + std::vector space_coords = {"x", "z"}; +#elif defined(WARPX_DIM_1D_Z) + std::vector space_coords = {"z"}; +#elif defined(WARPX_DIM_RZ) + std::vector space_coords = {"r", "z"}; +#endif + + // Only on level 0 + for (int iside = 0; iside < 2; iside++) { + for (int ic = 0; ic < AMREX_SPACEDIM; ic++) { + ofs << m_sep; + ofs << "[" << c++ << "]outward_power_" + sides[iside] + "_" + space_coords[ic] +"(W)"; + }} + for (int iside = 0; iside < 2; iside++) { + for (int ic = 0; ic < AMREX_SPACEDIM; ic++) { + ofs << m_sep; + ofs << "[" << c++ << "]integrated_energy_loss_" + sides[iside] + "_" + space_coords[ic] +"(J)"; + }} + + ofs << "\n"; + ofs.close(); + } + } +} + +void FieldPoyntingFlux::ComputeDiags (int /*step*/) +{ + // This will be called at the end of the time step. Only calculate the + // flux if it had not already been calculated mid step. + if (!use_mid_step_value) { + ComputePoyntingFlux(); + } +} + +void FieldPoyntingFlux::ComputeDiagsMidStep (int /*step*/) +{ + // If this is called, always use the value calculated here. + use_mid_step_value = true; + ComputePoyntingFlux(); +} + +void FieldPoyntingFlux::ComputePoyntingFlux () +{ + using warpx::fields::FieldType; + using ablastr::fields::Direction; + + // Note that this is calculated every step to get the + // full resolution on the integrated data + + int const lev = 0; + + // Get a reference to WarpX instance + auto & warpx = WarpX::GetInstance(); + + // RZ coordinate only working with one mode +#if defined(WARPX_DIM_RZ) + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(warpx.n_rz_azimuthal_modes == 1, + "FieldPoyntingFlux reduced diagnostics only implemented in RZ geometry for one mode"); +#endif + + amrex::Box domain_box = warpx.Geom(lev).Domain(); + domain_box.surroundingNodes(); + + // Get MultiFab data at given refinement level + amrex::MultiFab const & Ex = *warpx.m_fields.get(FieldType::Efield_fp, Direction{0}, lev); + amrex::MultiFab const & Ey = *warpx.m_fields.get(FieldType::Efield_fp, Direction{1}, lev); + amrex::MultiFab const & Ez = *warpx.m_fields.get(FieldType::Efield_fp, Direction{2}, lev); + amrex::MultiFab const & Bx = *warpx.m_fields.get(FieldType::Bfield_fp, Direction{0}, lev); + amrex::MultiFab const & By = *warpx.m_fields.get(FieldType::Bfield_fp, Direction{1}, lev); + amrex::MultiFab const & Bz = *warpx.m_fields.get(FieldType::Bfield_fp, Direction{2}, lev); + + // Coarsening ratio (no coarsening) + amrex::GpuArray const cr{1,1,1}; + + // Reduction component (fourth component in Array4) + constexpr int comp = 0; + + // Index type (staggering) of each MultiFab + // (with third component set to zero in 2D) + amrex::GpuArray Ex_stag{0,0,0}; + amrex::GpuArray Ey_stag{0,0,0}; + amrex::GpuArray Ez_stag{0,0,0}; + amrex::GpuArray Bx_stag{0,0,0}; + amrex::GpuArray By_stag{0,0,0}; + amrex::GpuArray Bz_stag{0,0,0}; + for (int i = 0; i < AMREX_SPACEDIM; ++i) + { + Ex_stag[i] = Ex.ixType()[i]; + Ey_stag[i] = Ey.ixType()[i]; + Ez_stag[i] = Ez.ixType()[i]; + Bx_stag[i] = Bx.ixType()[i]; + By_stag[i] = By.ixType()[i]; + Bz_stag[i] = Bz.ixType()[i]; + } + + for (amrex::OrientationIter face; face; ++face) { + + int const face_dir = face().coordDir(); + + if (face().isHigh() && WarpX::field_boundary_hi[face_dir] == FieldBoundaryType::Periodic) { + // For upper periodic boundaries, copy the lower value instead of regenerating it. + int const iu = int(face()); + int const il = int(face().flip()); + m_data[iu] = -m_data[il]; + m_data[iu + 2*AMREX_SPACEDIM] = -m_data[il + 2*AMREX_SPACEDIM]; + continue; + } + + amrex::Box const boundary = amrex::bdryNode(domain_box, face()); + + // Get cell area + amrex::Real const *dx = warpx.Geom(lev).CellSize(); + std::array dxtemp = {AMREX_D_DECL(dx[0], dx[1], dx[2])}; + dxtemp[face_dir] = 1._rt; + amrex::Real const dA = AMREX_D_TERM(dxtemp[0], *dxtemp[1], *dxtemp[2]); + + // Node-centered in the face direction, Cell-centered in other directions + amrex::GpuArray cc{0,0,0}; + cc[face_dir] = 1; + + // Only calculate the ExB term that is normal to the surface. + // normal_dir is the normal direction relative to the WarpX coordinates +#if (defined WARPX_DIM_XZ) || (defined WARPX_DIM_RZ) + // For 2D : it is either 0, or 2 + int const normal_dir = 2*face_dir; +#elif (defined WARPX_DIM_1D_Z) + // For 1D : it is always 2 + int const normal_dir = 2; +#else + // For 3D : it is the same as the face direction + int const normal_dir = face_dir; +#endif + + amrex::ReduceOps reduce_ops; + amrex::ReduceData reduce_data(reduce_ops); + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + // Loop over boxes, interpolate E,B data to cell face centers + // and compute sum over cells of (E x B) components + for (amrex::MFIter mfi(Ex, amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) + { + amrex::Array4 const & Ex_arr = Ex[mfi].array(); + amrex::Array4 const & Ey_arr = Ey[mfi].array(); + amrex::Array4 const & Ez_arr = Ez[mfi].array(); + amrex::Array4 const & Bx_arr = Bx[mfi].array(); + amrex::Array4 const & By_arr = By[mfi].array(); + amrex::Array4 const & Bz_arr = Bz[mfi].array(); + + amrex::Box box = enclosedCells(mfi.nodaltilebox()); + box.surroundingNodes(face_dir); + + // Find the intersection with the boundary + // boundary needs to have the same type as box + amrex::Box const boundary_matched = amrex::convert(boundary, box.ixType()); + box &= boundary_matched; + +#if defined(WARPX_DIM_RZ) + // Lower corner of box physical domain + amrex::XDim3 const xyzmin = WarpX::LowerCorner(box, lev, 0._rt); + amrex::Dim3 const lo = amrex::lbound(box); + amrex::Real const dr = warpx.Geom(lev).CellSize(lev); + amrex::Real const rmin = xyzmin.x; + int const irmin = lo.x; +#endif + + auto area_factor = [=] AMREX_GPU_DEVICE(int i, int j, int k) noexcept { + amrex::ignore_unused(i,j,k); +#if defined WARPX_DIM_RZ + amrex::Real r; + if (normal_dir == 0) { + r = rmin + (i - irmin)*dr; + } else { + r = rmin + (i + 0.5_rt - irmin)*dr; + } + return 2._rt*MathConst::pi*r; +#else + return 1._rt; +#endif + }; + + // Compute E x B + reduce_ops.eval(box, reduce_data, + [=] AMREX_GPU_DEVICE (int i, int j, int k) -> amrex::GpuTuple + { + amrex::Real Ex_cc = 0._rt, Ey_cc = 0._rt, Ez_cc = 0._rt; + amrex::Real Bx_cc = 0._rt, By_cc = 0._rt, Bz_cc = 0._rt; + + if (normal_dir == 1 || normal_dir == 2) { + Ex_cc = ablastr::coarsen::sample::Interp(Ex_arr, Ex_stag, cc, cr, i, j, k, comp); + Bx_cc = ablastr::coarsen::sample::Interp(Bx_arr, Bx_stag, cc, cr, i, j, k, comp); + } + + if (normal_dir == 0 || normal_dir == 2) { + Ey_cc = ablastr::coarsen::sample::Interp(Ey_arr, Ey_stag, cc, cr, i, j, k, comp); + By_cc = ablastr::coarsen::sample::Interp(By_arr, By_stag, cc, cr, i, j, k, comp); + } + if (normal_dir == 0 || normal_dir == 1) { + Ez_cc = ablastr::coarsen::sample::Interp(Ez_arr, Ez_stag, cc, cr, i, j, k, comp); + Bz_cc = ablastr::coarsen::sample::Interp(Bz_arr, Bz_stag, cc, cr, i, j, k, comp); + } + + amrex::Real const af = area_factor(i,j,k); + if (normal_dir == 0) { return af*(Ey_cc * Bz_cc - Ez_cc * By_cc); } + else if (normal_dir == 1) { return af*(Ez_cc * Bx_cc - Ex_cc * Bz_cc); } + else { return af*(Ex_cc * By_cc - Ey_cc * Bx_cc); } + }); + } + + int const sign = (face().isLow() ? -1 : 1); + auto r = reduce_data.value(); + int const ii = int(face()); + m_data[ii] = sign*amrex::get<0>(r)/PhysConst::mu0*dA; + + } + + amrex::ParallelDescriptor::ReduceRealSum(m_data.data(), 2*AMREX_SPACEDIM); + + amrex::Real const dt = warpx.getdt(lev); + for (int ii=0 ; ii < 2*AMREX_SPACEDIM ; ii++) { + m_data[ii + 2*AMREX_SPACEDIM] += m_data[ii]*dt; + } + +} + +void +FieldPoyntingFlux::WriteCheckpointData (std::string const & dir) +{ + // Write out the current values of the time integrated data + std::ofstream chkfile{dir + "/FieldPoyntingFlux_data.txt", std::ofstream::out}; + if (!chkfile.good()) { + WARPX_ABORT_WITH_MESSAGE("FieldPoyntingFlux::WriteCheckpointData: could not open file for writing checkpoint data"); + } + + chkfile.precision(17); + + for (int i=0; i < 2*AMREX_SPACEDIM; i++) { + chkfile << m_data[2*AMREX_SPACEDIM + i] << "\n"; + } +} + +void +FieldPoyntingFlux::ReadCheckpointData (std::string const & dir) +{ + // Read in the current values of the time integrated data + std::ifstream chkfile{dir + "/FieldPoyntingFlux_data.txt", std::ifstream::in}; + if (!chkfile.good()) { + WARPX_ABORT_WITH_MESSAGE("FieldPoyntingFlux::ReadCheckpointData: could not open file for reading checkpoint data"); + } + + for (int i=0; i < 2*AMREX_SPACEDIM; i++) { + amrex::Real data; + if (chkfile >> data) { + m_data[2*AMREX_SPACEDIM + i] = data; + } else { + WARPX_ABORT_WITH_MESSAGE("FieldPoyntingFlux::ReadCheckpointData: could not read in time integrated data"); + } + } +} diff --git a/Source/Diagnostics/ReducedDiags/Make.package b/Source/Diagnostics/ReducedDiags/Make.package index 2611831a3dd..4d2e4d7def9 100644 --- a/Source/Diagnostics/ReducedDiags/Make.package +++ b/Source/Diagnostics/ReducedDiags/Make.package @@ -7,6 +7,7 @@ CEXE_sources += DifferentialLuminosity.cpp CEXE_sources += FieldEnergy.cpp CEXE_sources += FieldMaximum.cpp CEXE_sources += FieldMomentum.cpp +CEXE_sources += FieldPoyntingFlux.cpp CEXE_sources += FieldProbe.cpp CEXE_sources += FieldProbeParticleContainer.cpp CEXE_sources += FieldReduction.cpp diff --git a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.H b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.H index 1a2f51794c6..5a782db7118 100644 --- a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.H +++ b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.H @@ -49,10 +49,21 @@ public: * @param[in] step current iteration time */ void ComputeDiags (int step); + /** Loop over all ReducedDiags and call their ComputeDiagsMidStep + * @param[in] step current iteration time */ + void ComputeDiagsMidStep (int step); + /** Loop over all ReducedDiags and call their WriteToFile * @param[in] step current iteration time */ void WriteToFile (int step); + /** \brief Loop over all ReducedDiags and call their WriteCheckpointData + * @param[in] dir checkpoint directory */ + void WriteCheckpointData (std::string const & dir); + + /** \brief Loop over all ReducedDiags and call their ReadCheckpointData + * @param[in] dir checkpoint directory */ + void ReadCheckpointData (std::string const & dir); }; #endif diff --git a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp index 5035eac58a8..0ce18174111 100644 --- a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp +++ b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp @@ -13,6 +13,7 @@ #include "FieldEnergy.H" #include "FieldMaximum.H" #include "FieldMomentum.H" +#include "FieldPoyntingFlux.H" #include "FieldProbe.H" #include "FieldReduction.H" #include "LoadBalanceCosts.H" @@ -66,6 +67,7 @@ MultiReducedDiags::MultiReducedDiags () {"FieldEnergy", [](CS s){return std::make_unique(s);}}, {"FieldMaximum", [](CS s){return std::make_unique(s);}}, {"FieldMomentum", [](CS s){return std::make_unique(s);}}, + {"FieldPoyntingFlux", [](CS s){return std::make_unique(s);}}, {"FieldProbe", [](CS s){return std::make_unique(s);}}, {"FieldReduction", [](CS s){return std::make_unique(s);}}, {"LoadBalanceCosts", [](CS s){return std::make_unique(s);}}, @@ -124,6 +126,20 @@ void MultiReducedDiags::ComputeDiags (int step) } // end void MultiReducedDiags::ComputeDiags +// call functions to compute diags at the mid step time level +void MultiReducedDiags::ComputeDiagsMidStep (int step) +{ + WARPX_PROFILE("MultiReducedDiags::ComputeDiagsMidStep()"); + + // loop over all reduced diags + for (int i_rd = 0; i_rd < static_cast(m_rd_names.size()); ++i_rd) + { + m_multi_rd[i_rd] -> ComputeDiagsMidStep(step); + } + // end loop over all reduced diags +} +// end void MultiReducedDiags::ComputeDiagsMidStep + // function to write data void MultiReducedDiags::WriteToFile (int step) { @@ -142,3 +158,26 @@ void MultiReducedDiags::WriteToFile (int step) // end loop over all reduced diags } // end void MultiReducedDiags::WriteToFile + +void MultiReducedDiags::WriteCheckpointData (std::string const & dir) +{ + // Only the I/O rank does + if ( !ParallelDescriptor::IOProcessor() ) { return; } + + // loop over all reduced diags + for (int i_rd = 0; i_rd < static_cast(m_rd_names.size()); ++i_rd) + { + m_multi_rd[i_rd]->WriteCheckpointData(dir); + } + // end loop over all reduced diags +} + +void MultiReducedDiags::ReadCheckpointData (std::string const & dir) +{ + // loop over all reduced diags + for (int i_rd = 0; i_rd < static_cast(m_rd_names.size()); ++i_rd) + { + m_multi_rd[i_rd]->ReadCheckpointData(dir); + } + // end loop over all reduced diags +} diff --git a/Source/Diagnostics/ReducedDiags/ReducedDiags.H b/Source/Diagnostics/ReducedDiags/ReducedDiags.H index 2c942e1df6d..a32de30cc6f 100644 --- a/Source/Diagnostics/ReducedDiags/ReducedDiags.H +++ b/Source/Diagnostics/ReducedDiags/ReducedDiags.H @@ -83,6 +83,13 @@ public: */ virtual void ComputeDiags (int step) = 0; + /** + * function to compute diags at the mid step time level + * + * @param[in] step current time step + */ + virtual void ComputeDiagsMidStep (int step); + /** * write to file function * @@ -90,6 +97,20 @@ public: */ virtual void WriteToFile (int step) const; + /** + * \brief Write out checkpoint related data + * + * \param[in] dir Directory where checkpoint data is written + */ + virtual void WriteCheckpointData (std::string const & dir); + + /** + * \brief Read in checkpoint related data + * + * \param[in] dir Directory where checkpoint data was written + */ + virtual void ReadCheckpointData (std::string const & dir); + /** * This function queries deprecated input parameters and aborts * the run if one of them is specified. diff --git a/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp b/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp index a3529cd305d..b0e20584a12 100644 --- a/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp +++ b/Source/Diagnostics/ReducedDiags/ReducedDiags.cpp @@ -92,6 +92,27 @@ void ReducedDiags::LoadBalance () // load balancing operations } +void ReducedDiags::ComputeDiagsMidStep (int /*step*/) +{ + // Defines an empty function ComputeDiagsMidStep() to be overwritten if needed. + // Function used to calculate the diagnostic at the mid step time leve + // (instead of at the end of the step). +} + +void ReducedDiags::WriteCheckpointData (std::string const & /*dir*/) +{ + // Defines an empty function WriteCheckpointData() to be overwritten if needed. + // Function used to write out and data needed by the diagnostic in + // the checkpoint. +} + +void ReducedDiags::ReadCheckpointData (std::string const & /*dir*/) +{ + // Defines an empty function ReadCheckpointData() to be overwritten if needed. + // Function used to read in any data that was written out in the checkpoint + // when doing a restart. +} + void ReducedDiags::BackwardCompatibility () const { const amrex::ParmParse pp_rd_name(m_rd_name); diff --git a/Source/Diagnostics/WarpXIO.cpp b/Source/Diagnostics/WarpXIO.cpp index f2921f820fd..e90ae98eb17 100644 --- a/Source/Diagnostics/WarpXIO.cpp +++ b/Source/Diagnostics/WarpXIO.cpp @@ -19,6 +19,7 @@ #include "Utils/WarpXProfilerWrapper.H" #include "WarpX.H" #include "Diagnostics/MultiDiagnostics.H" +#include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include #include @@ -400,6 +401,8 @@ WarpX::InitFromCheckpoint () if (EB::enabled()) { InitializeEBGridData(maxLevel()); } + reduced_diags->ReadCheckpointData(restart_chkfile); + // Initialize particles mypc->AllocData(); mypc->Restart(restart_chkfile); diff --git a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp index 41fdf515581..bf8441e1992 100644 --- a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp @@ -5,6 +5,7 @@ * License: BSD-3-Clause-LBNL */ #include "SemiImplicitEM.H" +#include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "WarpX.H" using warpx::fields::FieldType; @@ -83,6 +84,7 @@ void SemiImplicitEM::OneStep ( amrex::Real start_time, // Update WarpX owned Efield_fp to t_{n+1/2} m_WarpX->SetElectricFieldAndApplyBCs( m_E, half_time ); + m_WarpX->reduced_diags->ComputeDiagsMidStep(a_step); // Advance particles from time n+1/2 to time n+1 m_WarpX->FinishImplicitParticleUpdate(); diff --git a/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp index cd672f18f98..b8be6b93c63 100644 --- a/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp @@ -6,6 +6,7 @@ */ #include "Fields.H" #include "StrangImplicitSpectralEM.H" +#include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "WarpX.H" using namespace warpx::fields; @@ -84,6 +85,7 @@ void StrangImplicitSpectralEM::OneStep ( amrex::Real start_time, // Update WarpX owned Efield_fp and Bfield_fp to t_{n+1/2} UpdateWarpXFields( m_E, half_time ); + m_WarpX->reduced_diags->ComputeDiagsMidStep(a_step); // Advance particles from time n+1/2 to time n+1 m_WarpX->FinishImplicitParticleUpdate(); diff --git a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp index aa6ee63f7df..1e6596f5eaa 100644 --- a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp @@ -6,6 +6,7 @@ */ #include "Fields.H" #include "ThetaImplicitEM.H" +#include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "WarpX.H" using warpx::fields::FieldType; @@ -109,6 +110,7 @@ void ThetaImplicitEM::OneStep ( const amrex::Real start_time, // Update WarpX owned Efield_fp and Bfield_fp to t_{n+theta} UpdateWarpXFields( m_E, start_time ); + m_WarpX->reduced_diags->ComputeDiagsMidStep(a_step); // Advance particles from time n+1/2 to time n+1 m_WarpX->FinishImplicitParticleUpdate(); From 409d346b60dd5bc16ab4aa332a1c1e7d2a551119 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 3 Feb 2025 15:37:38 -0800 Subject: [PATCH 209/278] Doc Lassen: Pip Cache Disabled (#5632) Script aborted on `python3 -m pip cache purge`. No extra `--no-cache-dir` suffixes needed to compensate since, as the error says, system disabled pip caches. --- Tools/machines/lassen-llnl/install_v100_dependencies_toss3.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Tools/machines/lassen-llnl/install_v100_dependencies_toss3.sh b/Tools/machines/lassen-llnl/install_v100_dependencies_toss3.sh index 1b14159cd22..86f330060f6 100644 --- a/Tools/machines/lassen-llnl/install_v100_dependencies_toss3.sh +++ b/Tools/machines/lassen-llnl/install_v100_dependencies_toss3.sh @@ -114,7 +114,7 @@ rm -rf ${SW_DIR}/venvs/warpx-lassen-toss3 python3 -m venv ${SW_DIR}/venvs/warpx-lassen-toss3 source ${SW_DIR}/venvs/warpx-lassen-toss3/bin/activate python3 -m pip install --upgrade pip -python3 -m pip cache purge +# python3 -m pip cache purge # error: pip cache commands can not function since cache is disabled python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel From 8bc62d8cd77336e248bccc30ddb39164e2988fb1 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 3 Feb 2025 15:40:22 -0800 Subject: [PATCH 210/278] [pre-commit.ci] pre-commit autoupdate (#5633) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.3 → v0.9.4](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.3...v0.9.4) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index bb03acf77ca..577f0ffc1f0 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.3 + rev: v0.9.4 hooks: # Run the linter - id: ruff From 57931b81ea0efdbfec4bc1f84f789b5188b036ed Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Mon, 3 Feb 2025 16:02:49 -0800 Subject: [PATCH 211/278] Add execution of `afterEpush` callback in hybrid solver (#5629) Signed-off-by: roelof-groenewald --- .../FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp | 3 +++ 1 file changed, 3 insertions(+) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index ba6bb0e042c..64ee83b10e0 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -10,6 +10,7 @@ #include "HybridPICModel.H" #include "EmbeddedBoundary/Enabled.H" +#include "Python/callbacks.H" #include "Fields.H" #include "WarpX.H" @@ -304,6 +305,8 @@ void HybridPICModel::HybridPICSolveE ( eb_update_E[lev], lev, solve_for_Faraday ); } + // Allow execution of Python callback after E-field push + ExecutePythonCallback("afterEpush"); } void HybridPICModel::HybridPICSolveE ( From 57f6317adbec30eb0314c94592b975d42fc217c5 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Mon, 3 Feb 2025 16:45:30 -0800 Subject: [PATCH 212/278] Fix bug with DSMC collisions in RZ (#5622) I realized there is a bug in the DSMC module for RZ geometry where the velocity vectors for the colliding pair was not rotated so that a proper center-of-momentum calculation could be done. This PR fixes the bug. To check that the fix in this PR works, I compared the azimuthal velocity distribution for energetic ions created from NBI with finite impact parameter (such that a net rotation should be imparted on the ions), when simulated in 3d versus RZ: 3d result: ![image](https://github.com/user-attachments/assets/458f7e9c-b7b2-46ec-b456-8733ce959f94) RZ result: ![image](https://github.com/user-attachments/assets/ce37a51c-f127-44be-b9b9-4f9a1d7d2cbb) --- .../Collision/BinaryCollision/DSMC/DSMCFunc.H | 1 + .../DSMC/SplitAndScatterFunc.H | 26 +++++++++++++++++++ 2 files changed, 27 insertions(+) diff --git a/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.H b/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.H index 5a3c925e9bd..6051aab1b59 100644 --- a/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.H +++ b/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.H @@ -176,6 +176,7 @@ public: m_process_count, m_scattering_processes_data, engine); #if (defined WARPX_DIM_RZ) + /* Undo the earlier velocity rotation. */ amrex::ParticleReal const u1xbuf_new = u1x[I1[i1]]; u1x[I1[i1]] = u1xbuf_new*std::cos(-theta) - u1y[I1[i1]]*std::sin(-theta); u1y[I1[i1]] = u1xbuf_new*std::sin(-theta) + u1y[I1[i1]]*std::cos(-theta); diff --git a/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H b/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H index 473199a6b21..239a76c50c7 100644 --- a/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H +++ b/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H @@ -154,6 +154,25 @@ public: auto& uy2 = soa_products_data[1].m_rdata[PIdx::uy][product2_index]; auto& uz2 = soa_products_data[1].m_rdata[PIdx::uz][product2_index]; +#if (defined WARPX_DIM_RZ) + /* In RZ geometry, macroparticles can collide with other macroparticles + * in the same *cylindrical* cell. For this reason, collisions between macroparticles + * are actually not local in space. In this case, the underlying assumption is that + * particles within the same cylindrical cell represent a cylindrically-symmetry + * momentum distribution function. Therefore, here, we temporarily rotate the + * momentum of one of the macroparticles in agreement with this cylindrical symmetry. + * (This is technically only valid if we use only the m=0 azimuthal mode in the simulation; + * there is a corresponding assert statement at initialization.) + */ + amrex::ParticleReal const theta = ( + soa_products_data[1].m_rdata[PIdx::theta][product2_index] + - soa_products_data[0].m_rdata[PIdx::theta][product1_index] + ); + amrex::ParticleReal const ux1buf = ux1; + ux1 = ux1buf*std::cos(theta) - uy1*std::sin(theta); + uy1 = ux1buf*std::sin(theta) + uy1*std::cos(theta); +#endif + // for simplicity (for now) we assume non-relativistic particles // and simply calculate the center-of-momentum velocity from the // rest masses @@ -213,6 +232,13 @@ public: ux2 += uCOM_x; uy2 += uCOM_y; uz2 += uCOM_z; + +#if (defined WARPX_DIM_RZ) + /* Undo the earlier velocity rotation. */ + amrex::ParticleReal const ux1buf_new = ux1; + ux1 = ux1buf_new*std::cos(-theta) - uy1*std::sin(-theta); + uy1 = ux1buf_new*std::sin(-theta) + uy1*std::cos(-theta); +#endif } }); From 93466dd9065f3849997e85baa35b8d1ed95a2ff5 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 3 Feb 2025 21:48:12 -0800 Subject: [PATCH 213/278] Fix Dangling Ref in EB Init (#5635) Follow-up to #5209: My compiler says those locations would reference temporary objects that were destroyed at the end of the line. That seems to be the case indeed. Copy instead to make the temporary a named and thus persistent variable. ![Screenshot from 2025-02-03 16-59-22](https://github.com/user-attachments/assets/8259f6d7-099b-4d09-8382-f24baefb5793) --- Source/EmbeddedBoundary/WarpXInitEB.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Source/EmbeddedBoundary/WarpXInitEB.cpp b/Source/EmbeddedBoundary/WarpXInitEB.cpp index 3f33259a313..371bd6a0570 100644 --- a/Source/EmbeddedBoundary/WarpXInitEB.cpp +++ b/Source/EmbeddedBoundary/WarpXInitEB.cpp @@ -147,7 +147,7 @@ WarpX::MarkReducedShapeCells ( amrex::Array4 const & eb_reduce_particle_shape_arr = eb_reduce_particle_shape->array(mfi); // Check if the box (including one layer of guard cells) contains a mix of covered and regular cells - const amrex::Box& eb_info_box = mfi.tilebox(amrex::IntVect::TheCellVector()).grow(1); + const amrex::Box eb_info_box = mfi.tilebox(amrex::IntVect::TheCellVector()).grow(1); amrex::FabType const fab_type = eb_flag[mfi].getType( eb_info_box ); if (fab_type == amrex::FabType::regular) { // All cells in the box are regular @@ -240,7 +240,7 @@ WarpX::MarkUpdateCellsStairCase ( amrex::Array4 const & eb_update_arr = eb_update[idim]->array(mfi); // Check if the box (including one layer of guard cells) contains a mix of covered and regular cells - const amrex::Box& eb_info_box = mfi.tilebox(amrex::IntVect::TheCellVector()).grow(1); + const amrex::Box eb_info_box = mfi.tilebox(amrex::IntVect::TheCellVector()).grow(1); amrex::FabType const fab_type = eb_flag[mfi].getType( eb_info_box ); if (fab_type == amrex::FabType::regular) { // All cells in the box are regular From 12269a0ee7622f73326be2577f8458f0e935b465 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Tue, 4 Feb 2025 15:28:36 -0800 Subject: [PATCH 214/278] =?UTF-8?q?Fixing=20bug=20in=20hyper-resistivity?= =?UTF-8?q?=20calculation=20which=20had=20missing=20terms=20i=E2=80=A6=20(?= =?UTF-8?q?#5638)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …n vector laplacian evaluation. Additioanally fixing a staggering bug for density calculation in RZ. --------- Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- .../Tests/ohm_solver_em_modes/analysis_rz.py | 2 +- .../test_rz_ohm_solver_em_modes_picmi.json | 14 ++++---- .../HybridPICSolveE.cpp | 36 ++++++++++++++----- 3 files changed, 36 insertions(+), 16 deletions(-) diff --git a/Examples/Tests/ohm_solver_em_modes/analysis_rz.py b/Examples/Tests/ohm_solver_em_modes/analysis_rz.py index 841e1177630..7cd5086c408 100755 --- a/Examples/Tests/ohm_solver_em_modes/analysis_rz.py +++ b/Examples/Tests/ohm_solver_em_modes/analysis_rz.py @@ -179,5 +179,5 @@ def process(it): amps = np.abs(F_kw[2, 1, len(kz) // 2 - 2 : len(kz) // 2 + 2]) print("Amplitude sample: ", amps) assert np.allclose( - amps, np.array([61.02377286, 19.80026021, 100.47687017, 10.83331295]) + amps, np.array([59.23850009, 19.26746169, 92.65794174, 10.83627164]) ) diff --git a/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_em_modes_picmi.json b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_em_modes_picmi.json index ec1b6272092..feca88922e2 100644 --- a/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_em_modes_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_em_modes_picmi.json @@ -1,12 +1,12 @@ { "lev=0": {}, "ions": { - "particle_momentum_x": 5.0438993756415296e-17, - "particle_momentum_y": 5.0444406612873916e-17, - "particle_momentum_z": 5.0519292431385393e-17, - "particle_position_x": 143164.41713467025, - "particle_position_y": 143166.51845281923, - "particle_theta": 2573261.8729711357, - "particle_weight": 8.128680645366887e+18 + "particle_momentum_x": 5.043784704795177e-17, + "particle_momentum_y": 5.0444695502620235e-17, + "particle_momentum_z": 5.05193106847111e-17, + "particle_position_x": 143164.53685279266, + "particle_position_y": 143166.5185853012, + "particle_theta": 2573262.446840369, + "particle_weight": 8.128680645366886e+18 } } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 47e45bbe753..2047e87b696 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -611,9 +611,10 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (include_hyper_resistivity_term) { // r on cell-centered point (Jr is cell-centered in r) - Real const r = rmin + (i + 0.5_rt)*dr; - - auto nabla2Jr = T_Algo::Dr_rDr_over_r(Jr, r, dr, coefs_r, n_coefs_r, i, j, 0, 0); + const Real r = rmin + (i + 0.5_rt)*dr; + const Real jr_val = Interp(Jr, Jr_stag, Er_stag, coarsen, i, j, 0, 0); + auto nabla2Jr = T_Algo::Dr_rDr_over_r(Jr, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) + + T_Algo::Dzz(Jr, coefs_z, n_coefs_z, i, j, 0, 0) - jr_val/(r*r); Er(i, j, 0) -= eta_h * nabla2Jr; } }, @@ -633,7 +634,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Er_stag, coarsen, i, j, 0, 0); + Real rho_val = Interp(rho, nodal, Et_stag, coarsen, i, j, 0, 0); // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -659,7 +660,13 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Et(i, j, 0) += eta(rho_val, jtot_val) * Jt(i, j, 0); } - // Note: Hyper-resisitivity should be revisited here when modal decomposition is implemented + if (include_hyper_resistivity_term) { + const Real jt_val = Interp(Jt, Jt_stag, Et_stag, coarsen, i, j, 0, 0); + auto nabla2Jt = T_Algo::Dr_rDr_over_r(Jt, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) + + T_Algo::Dzz(Jt, coefs_z, n_coefs_z, i, j, 0, 0) - jt_val/(r*r); + + Et(i, j, 0) -= eta_h * nabla2Jt; + } }, // Ez calculation @@ -697,7 +704,14 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (solve_for_Faraday) { Ez(i, j, 0) += eta(rho_val, jtot_val) * Jz(i, j, 0); } if (include_hyper_resistivity_term) { + // r on nodal point (Jz is nodal in r) + Real const r = rmin + i*dr; + auto nabla2Jz = T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, 0, 0); + if (r > 0.5_rt*dr) { + nabla2Jz += T_Algo::Dr_rDr_over_r(Jz, r, dr, coefs_r, n_coefs_r, i, j, 0, 0); + } + Ez(i, j, 0) -= eta_h * nabla2Jz; } } @@ -918,7 +932,9 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (solve_for_Faraday) { Ex(i, j, k) += eta(rho_val, jtot_val) * Jx(i, j, k); } if (include_hyper_resistivity_term) { - auto nabla2Jx = T_Algo::Dxx(Jx, coefs_x, n_coefs_x, i, j, k); + auto nabla2Jx = T_Algo::Dxx(Jx, coefs_x, n_coefs_x, i, j, k) + + T_Algo::Dyy(Jx, coefs_y, n_coefs_y, i, j, k) + + T_Algo::Dzz(Jx, coefs_z, n_coefs_z, i, j, k); Ex(i, j, k) -= eta_h * nabla2Jx; } }, @@ -958,7 +974,9 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (solve_for_Faraday) { Ey(i, j, k) += eta(rho_val, jtot_val) * Jy(i, j, k); } if (include_hyper_resistivity_term) { - auto nabla2Jy = T_Algo::Dyy(Jy, coefs_y, n_coefs_y, i, j, k); + auto nabla2Jy = T_Algo::Dxx(Jy, coefs_x, n_coefs_x, i, j, k) + + T_Algo::Dyy(Jy, coefs_y, n_coefs_y, i, j, k) + + T_Algo::Dzz(Jy, coefs_z, n_coefs_z, i, j, k); Ey(i, j, k) -= eta_h * nabla2Jy; } }, @@ -998,7 +1016,9 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (solve_for_Faraday) { Ez(i, j, k) += eta(rho_val, jtot_val) * Jz(i, j, k); } if (include_hyper_resistivity_term) { - auto nabla2Jz = T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, k); + auto nabla2Jz = T_Algo::Dxx(Jz, coefs_x, n_coefs_x, i, j, k) + + T_Algo::Dyy(Jz, coefs_y, n_coefs_y, i, j, k) + + T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, k); Ez(i, j, k) -= eta_h * nabla2Jz; } } From cdb9e279ef2c385f447c175509df549ec1456e42 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 4 Feb 2025 15:29:17 -0800 Subject: [PATCH 215/278] Release 25.02 (#5639) Prepare the February release of WarpX: ```bash # update dependencies ./Tools/Release/updateAMReX.py ./Tools/Release/updatePICSAR.py ./Tools/Release/updatepyAMReX.py # bump version number ./Tools/Release/newVersion.sh ``` Following this workflow: https://warpx.readthedocs.io/en/latest/maintenance/release.html --------- Signed-off-by: Axel Huebl --- .github/workflows/cuda.yml | 2 +- CMakeLists.txt | 2 +- Docs/source/conf.py | 4 ++-- Python/setup.py | 2 +- Tools/Release/releasePR.py | 2 +- Tools/Release/weeklyUpdate.py | 2 +- cmake/dependencies/AMReX.cmake | 4 ++-- cmake/dependencies/PICSAR.cmake | 4 ++-- cmake/dependencies/pyAMReX.cmake | 4 ++-- setup.py | 2 +- 10 files changed, 14 insertions(+), 14 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 12a68d327f7..21f762f4819 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -127,7 +127,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 69f1ac884c6aba4d9881260819ade3bb25ed8aad && cd - + cd ../amrex && git checkout --detach 25.02 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/CMakeLists.txt b/CMakeLists.txt index 24e9338982e..bb3ee66f786 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ # Preamble #################################################################### # cmake_minimum_required(VERSION 3.24.0) -project(WarpX VERSION 25.01) +project(WarpX VERSION 25.02) include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake) diff --git a/Docs/source/conf.py b/Docs/source/conf.py index 247e11faa4f..666aaf858fa 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -107,9 +107,9 @@ def __init__(self, *args, **kwargs): # built documents. # # The short X.Y version. -version = "25.01" +version = "25.02" # The full version, including alpha/beta/rc tags. -release = "25.01" +release = "25.02" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/Python/setup.py b/Python/setup.py index a50b467c070..e0ec6c98a7d 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -65,7 +65,7 @@ setup( name="pywarpx", - version="25.01", + version="25.02", packages=["pywarpx"], package_dir={"pywarpx": "pywarpx"}, description="""Wrapper of WarpX""", diff --git a/Tools/Release/releasePR.py b/Tools/Release/releasePR.py index 9dfa178e5b4..47a380901b1 100755 --- a/Tools/Release/releasePR.py +++ b/Tools/Release/releasePR.py @@ -93,7 +93,7 @@ def concat_answers(answers): # PICSAR New Version ########################################################## -PICSAR_version = "24.09" +PICSAR_version = "25.01" answers = concat_answers(["y", PICSAR_version, PICSAR_version, "y"]) process = subprocess.Popen( diff --git a/Tools/Release/weeklyUpdate.py b/Tools/Release/weeklyUpdate.py index 005c8c5d373..6c32993f79e 100755 --- a/Tools/Release/weeklyUpdate.py +++ b/Tools/Release/weeklyUpdate.py @@ -88,7 +88,7 @@ def concat_answers(answers): # PICSAR New Version ########################################################## -PICSAR_version = "24.09" +PICSAR_version = "25.01" answers = concat_answers(["y", PICSAR_version, PICSAR_version, "y"]) process = subprocess.Popen( diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 9c8907e835b..83feb0ff1db 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -271,7 +271,7 @@ macro(find_amrex) endif() set(COMPONENT_PRECISION ${WarpX_PRECISION} P${WarpX_PARTICLE_PRECISION}) - find_package(AMReX 25.01 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} ${COMPONENT_FFT} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) + find_package(AMReX 25.02 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} ${COMPONENT_FFT} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) # note: TINYP skipped because user-configured and optional # AMReX CMake helper scripts @@ -294,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "69f1ac884c6aba4d9881260819ade3bb25ed8aad" +set(WarpX_amrex_branch "25.02" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/PICSAR.cmake b/cmake/dependencies/PICSAR.cmake index 9eb9162238a..d5249b61641 100644 --- a/cmake/dependencies/PICSAR.cmake +++ b/cmake/dependencies/PICSAR.cmake @@ -88,7 +88,7 @@ function(find_picsar) #message(STATUS "PICSAR: Using version '${PICSAR_VERSION}'") else() # not supported by PICSAR (yet) - #find_package(PICSAR 24.09 CONFIG REQUIRED QED) + #find_package(PICSAR 25.01 CONFIG REQUIRED QED) #message(STATUS "PICSAR: Found version '${PICSAR_VERSION}'") message(FATAL_ERROR "PICSAR: Cannot be used as externally installed " "library yet. " @@ -109,7 +109,7 @@ if(WarpX_QED) set(WarpX_picsar_repo "https://github.com/ECP-WarpX/picsar.git" CACHE STRING "Repository URI to pull and build PICSAR from if(WarpX_picsar_internal)") - set(WarpX_picsar_branch "24.09" + set(WarpX_picsar_branch "25.01" CACHE STRING "Repository branch for WarpX_picsar_repo if(WarpX_picsar_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index 257bc264f21..975644ebf2b 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -59,7 +59,7 @@ function(find_pyamrex) endif() elseif(NOT WarpX_pyamrex_internal) # TODO: MPI control - find_package(pyAMReX 25.01 CONFIG REQUIRED) + find_package(pyAMReX 25.02 CONFIG REQUIRED) message(STATUS "pyAMReX: Found version '${pyAMReX_VERSION}'") endif() endfunction() @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "458c9ae7ab3cd4ca4e4e9736e82c60f9a7e7606c" +set(WarpX_pyamrex_branch "25.02" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") diff --git a/setup.py b/setup.py index 9538adcb106..fae11aa0654 100644 --- a/setup.py +++ b/setup.py @@ -280,7 +280,7 @@ def build_extension(self, ext): setup( name="pywarpx", # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version="25.01", + version="25.02", packages=["pywarpx"], package_dir={"pywarpx": "Python/pywarpx"}, author="Jean-Luc Vay, David P. Grote, Maxence Thévenet, Rémi Lehe, Andrew Myers, Weiqun Zhang, Axel Huebl, et al.", From 10af74faacdc0c34c3648b780f052f6e9e32394a Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 5 Feb 2025 15:04:31 -0800 Subject: [PATCH 216/278] AMReX/pyAMReX/PICSAR: Weekly Update (#5643) Weekly update to latest AMReX. Weekly update to latest pyAMReX. Weekly update to latest PICSAR (no changes). ```console ./Tools/Release/updateAMReX.py ./Tools/Release/updatepyAMReX.py ./Tools/Release/updatePICSAR.py ``` --------- Signed-off-by: Axel Huebl --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- cmake/dependencies/pyAMReX.cmake | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 21f762f4819..0943de41e55 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -127,7 +127,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 25.02 && cd - + cd ../amrex && git checkout --detach 78bdf0faabc4101d5333ebb421e553efcc7ec04e && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 83feb0ff1db..5136cb8f2f4 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -294,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "25.02" +set(WarpX_amrex_branch "78bdf0faabc4101d5333ebb421e553efcc7ec04e" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index 975644ebf2b..b716e883be9 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "25.02" +set(WarpX_pyamrex_branch "006bf94a4c68466fac8a1281750391b5a6083d82" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") From 609b163bb731b269fd1ce415431a492773ab04b7 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Thu, 6 Feb 2025 10:57:25 -0800 Subject: [PATCH 217/278] RTD: Fix GA Integration (#5645) GA was dropped from RTD in early Oct, 2024. This adds it again. --- Docs/requirements.txt | 1 + Docs/source/conf.py | 5 +++++ 2 files changed, 6 insertions(+) diff --git a/Docs/requirements.txt b/Docs/requirements.txt index 14fafe406fb..14d07e29f6e 100644 --- a/Docs/requirements.txt +++ b/Docs/requirements.txt @@ -27,5 +27,6 @@ sphinx-copybutton sphinx-design sphinx_rtd_theme>=1.1.1 sphinxcontrib-bibtex +sphinxcontrib-googleanalytics sphinxcontrib-napoleon yt # for checksumAPI diff --git a/Docs/source/conf.py b/Docs/source/conf.py index 666aaf858fa..a5fed3a4614 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -56,8 +56,13 @@ "sphinx_design", "breathe", "sphinxcontrib.bibtex", + "sphinxcontrib.googleanalytics", ] +# Google Analytics +googleanalytics_id = "G-QZGY5060MZ" +googleanalytics_enabled = True + # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] From 4f0f16302dbb3db346d371bbcf1a636685dab76f Mon Sep 17 00:00:00 2001 From: Brian Jensen <127121969+budjensen@users.noreply.github.com> Date: Thu, 6 Feb 2025 20:29:45 -0500 Subject: [PATCH 218/278] Add MCC forward scattering (#5621) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Added a version of forward scattering suggested in [J F J Janssen et al (2016)](https://doi.org/10.1088/0963-0252/25/5/055026). This process decreases total particle energy by the process' energy threshold. If no energy threshold is given in the input file, this process is equivalent to no collision being carried out (no scattering and no energy change). Adjusted documentation appropriately and fixed a pre-existing typo. Feature was tested on my own machine by confirming with python callbacks that the pre-collision and post-collision velocities were equal in the case of no energy cost threshold being supplied, and that the velocities were scaled down by the appropriate amount when a threshold was supplied (it was also checked that particle direction was the same before and after collision). No formal test was added since no current MCC tests exist and adding a framework to access cross sections while executing tests was prohibitive. --- Docs/source/refs.bib | 11 +++++++++++ Docs/source/theory/multiphysics/collisions.rst | 12 +++++++++++- Docs/source/usage/parameters.rst | 4 ++-- .../BackgroundMCC/BackgroundMCCCollision.cpp | 8 ++++++++ .../Collision/BinaryCollision/DSMC/DSMCFunc.cpp | 8 ++++++++ .../BinaryCollision/DSMC/SplitAndScatterFunc.H | 2 ++ Source/Particles/Collision/ScatteringProcess.H | 1 + Source/Particles/Collision/ScatteringProcess.cpp | 2 ++ 8 files changed, 45 insertions(+), 3 deletions(-) diff --git a/Docs/source/refs.bib b/Docs/source/refs.bib index d6c81c34404..6623bacd452 100644 --- a/Docs/source/refs.bib +++ b/Docs/source/refs.bib @@ -35,6 +35,17 @@ @ARTICLE{Birdsall1991 year = {1991} } +@misc{Janssen2016 +author = {Janssen, J. F. J. and Pitchford L. C. and Hagelaar G. J. M. and van Dijk J.}, +doi = {10.1088/0963-0252/25/5/055026}, +journal = {Plasma Sources Science and Technology}, +number = {5}, +pages = {055026}, +title = {{Evaluation of angular scattering models for electron-neutral collisions in Monte Carlo simulations}}, +volume = {25}, +year = {2016} +} + @misc{Lim2007, author = {Lim, Chul-Hyun}, issn = {0419-4217}, diff --git a/Docs/source/theory/multiphysics/collisions.rst b/Docs/source/theory/multiphysics/collisions.rst index a2b11bf42a2..1c7593a0e4e 100644 --- a/Docs/source/theory/multiphysics/collisions.rst +++ b/Docs/source/theory/multiphysics/collisions.rst @@ -121,13 +121,23 @@ The particle velocity in the COM frame is then isotropically scattered using the Back scattering ^^^^^^^^^^^^^^^ -The process is the same as for elastic scattering above expect the scattering angle is fixed at :math:`\pi`, meaning the particle velocity in the COM frame is updated to :math:`-\vec{u}_c`. +The process is the same as for elastic scattering above except the scattering angle is fixed at :math:`\pi`, meaning the particle velocity in the COM frame is updated to :math:`-\vec{u}_c`. Excitation ^^^^^^^^^^ The process is also the same as for elastic scattering except the excitation energy cost is subtracted from the particle energy. This is done by reducing the velocity before a scattering angle is chosen. +Forward scattering +^^^^^^^^^^^^^^^^^^ + +This process operates in two ways: + +1. If an excitation energy cost is provided, the energy cost is subtracted from the particle energy and no scattering is performed. +2. If an excitation energy cost is not provided, the particle is not scattered and the velocity is unchanged (corresponding to a scattering angle of :math:`0` in the elastic scattering process above). + +See :cite:t:`b-Janssen2016` for a recommended use of this process. + Benchmarks ---------- diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index aaba7130b87..2de029127fa 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2167,8 +2167,8 @@ Details about the collision models can be found in the :ref:`theory section .scattering_processes`` (`strings` separated by spaces) Only for ``dsmc`` and ``background_mcc``. The scattering processes that should be - included. Available options are ``elastic``, ``back`` & ``charge_exchange`` - for ions and ``elastic``, ``excitationX`` & ``ionization`` for electrons. + included. Available options are ``elastic``, ``excitationX``, ``forward``, ``back``, and ``charge_exchange`` + for ions and ``elastic``, ``excitationX``, ``ionization`` & ``forward`` for electrons. Multiple excitation events can be included for electrons corresponding to excitation to different levels, the ``X`` above can be changed to a unique identifier for each excitation process. For each scattering process specified diff --git a/Source/Particles/Collision/BackgroundMCC/BackgroundMCCCollision.cpp b/Source/Particles/Collision/BackgroundMCC/BackgroundMCCCollision.cpp index 80ce13744fd..8becd7d231a 100644 --- a/Source/Particles/Collision/BackgroundMCC/BackgroundMCCCollision.cpp +++ b/Source/Particles/Collision/BackgroundMCC/BackgroundMCCCollision.cpp @@ -106,6 +106,14 @@ BackgroundMCCCollision::BackgroundMCCCollision (std::string const& collision_nam utils::parser::getWithParser( pp_collision_name, kw_energy.c_str(), energy); } + // if the scattering process is forward scattering get the energy + // associated with the process if it is given (this allows forward + // scattering to be used both with and without a fixed energy loss) + else if (scattering_process.find("forward") != std::string::npos) { + const std::string kw_energy = scattering_process + "_energy"; + utils::parser::queryWithParser( + pp_collision_name, kw_energy.c_str(), energy); + } ScatteringProcess process(scattering_process, cross_section_file, energy); diff --git a/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.cpp b/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.cpp index e40a4e9822c..cf5f8de8d3c 100644 --- a/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.cpp +++ b/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.cpp @@ -46,6 +46,14 @@ DSMCFunc::DSMCFunc ( utils::parser::getWithParser( pp_collision_name, kw_energy.c_str(), energy); } + // if the scattering process is forward scattering get the energy + // associated with the process if it is given (this allows forward + // scattering to be used both with and without a fixed energy loss) + else if (scattering_process.find("forward") != std::string::npos) { + const std::string kw_energy = scattering_process + "_energy"; + utils::parser::queryWithParser( + pp_collision_name, kw_energy.c_str(), energy); + } ScatteringProcess process(scattering_process, cross_section_file, energy); diff --git a/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H b/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H index 239a76c50c7..db04dbc7f32 100644 --- a/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H +++ b/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H @@ -221,6 +221,8 @@ public: else { amrex::Abort("Uneven mass charge-exchange not implemented yet."); } + } else if (mask[i] == int(ScatteringProcessType::FORWARD)) { + amrex::Abort("Forward scattering with DSMC not implemented yet."); } else { amrex::Abort("Unknown scattering process."); diff --git a/Source/Particles/Collision/ScatteringProcess.H b/Source/Particles/Collision/ScatteringProcess.H index 59ef7a02afb..0c3f2daf8c1 100644 --- a/Source/Particles/Collision/ScatteringProcess.H +++ b/Source/Particles/Collision/ScatteringProcess.H @@ -21,6 +21,7 @@ enum class ScatteringProcessType { CHARGE_EXCHANGE, EXCITATION, IONIZATION, + FORWARD, }; class ScatteringProcess diff --git a/Source/Particles/Collision/ScatteringProcess.cpp b/Source/Particles/Collision/ScatteringProcess.cpp index ea1b4b40f54..ad3f179fa18 100644 --- a/Source/Particles/Collision/ScatteringProcess.cpp +++ b/Source/Particles/Collision/ScatteringProcess.cpp @@ -87,6 +87,8 @@ ScatteringProcess::parseProcessType(const std::string& scattering_process) return ScatteringProcessType::IONIZATION; } else if (scattering_process.find("excitation") != std::string::npos) { return ScatteringProcessType::EXCITATION; + } else if (scattering_process.find("forward") != std::string::npos) { + return ScatteringProcessType::FORWARD; } else { return ScatteringProcessType::INVALID; } From 86806f9cb777c55f5caffadd24c47f7efc8fb752 Mon Sep 17 00:00:00 2001 From: David Grote Date: Thu, 6 Feb 2025 22:36:59 -0800 Subject: [PATCH 219/278] Add PMC boundary conditions (#5628) --- Docs/source/theory/boundary_conditions.rst | 20 ++++ Docs/source/usage/parameters.rst | 2 + Examples/Tests/pec/CMakeLists.txt | 10 ++ Examples/Tests/pec/inputs_test_3d_pmc_field | 54 +++++++++++ .../test_3d_magnetostatic_eb.json | 30 +++--- .../test_3d_magnetostatic_eb_picmi.json | 46 ++++----- .../benchmarks_json/test_3d_pmc_field.json | 6 ++ .../WarpXFieldBoundaries.cpp | 94 ++++++++++++++----- Source/BoundaryConditions/WarpX_PEC.H | 5 +- Source/BoundaryConditions/WarpX_PEC.cpp | 82 ++++++++++------ .../ImplicitSolvers/ImplicitSolver.cpp | 3 +- .../DivCleaner/ProjectionDivCleaner.cpp | 4 +- Source/Utils/WarpXAlgorithmSelection.H | 2 +- Source/Utils/WarpXUtil.cpp | 9 ++ Source/WarpX.H | 10 +- 15 files changed, 272 insertions(+), 105 deletions(-) create mode 100644 Examples/Tests/pec/inputs_test_3d_pmc_field create mode 100644 Regression/Checksum/benchmarks_json/test_3d_pmc_field.json diff --git a/Docs/source/theory/boundary_conditions.rst b/Docs/source/theory/boundary_conditions.rst index 395b072ccbe..d8b3de40c11 100644 --- a/Docs/source/theory/boundary_conditions.rst +++ b/Docs/source/theory/boundary_conditions.rst @@ -301,3 +301,23 @@ the right boundary is reflecting. .. bibliography:: :keyprefix: bc- + +.. _theory-bc-pmc: + +Perfect Magnetic Conductor +---------------------------- + +This boundary can be used to model a symmetric surface, where charges and current are +symmetric across the boundary. +This is equivalent to the Neumann (zero-derivative) boundary condition. +For the electromagnetic solve, at PMC, the tangential magnetic field and the normal electric +field are odd across the boundary and set to 0 on the boundary. +In the guard-cell region, those fields are set equal and +opposite to the respective field component in the mirror location across the PMC boundary. +The other components, the normal magnetic field and tangential electric field, are even +and set equal to the field component in the mirror location in the domain across the PMC boundary. + +The PMC boundary condition also impacts the deposition of charge and current density. +The charge and current densities deposited into the guard cells are reflected back into +the domain, adding them to the mirror cells in the domain. +This represents the charge and current from the virtual symmetric particles in the guard cells. diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 2de029127fa..253f9ca0071 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -533,6 +533,8 @@ Domain Boundary Conditions * ``pec``: This option can be used to set a Perfect Electric Conductor at the simulation boundary. Please see the :ref:`PEC theory section ` for more details. Note that PEC boundary is invalid at `r=0` for the RZ solver. Please use ``none`` option. This boundary condition does not work with the spectral solver. + * ``pmc``: This option can be used to set a Perfect Magnetic Conductor at the simulation boundary. Please see the :ref:`PEC theory section ` for more details. This is equivalent to ``Neumann``. This boundary condition does not work with the spectral solver. + * ``pec_insulator``: This option specifies a mixed perfect electric conductor and insulator boundary, where some part of the boundary is PEC and some is insulator. In the insulator portion, the normal fields are extrapolated and the tangential fields are either set to the specified value or extrapolated. The region that is insulator is specified using a spatially dependent expression with the insulator being in the area where the value of the expression is greater than zero. diff --git a/Examples/Tests/pec/CMakeLists.txt b/Examples/Tests/pec/CMakeLists.txt index 66d9dd1c13e..15aa17c2d5f 100644 --- a/Examples/Tests/pec/CMakeLists.txt +++ b/Examples/Tests/pec/CMakeLists.txt @@ -41,6 +41,16 @@ add_warpx_test( OFF # dependency ) +add_warpx_test( + test_3d_pmc_field # name + 3 # dims + 2 # nprocs + inputs_test_3d_pmc_field # inputs + "analysis_pec.py diags/diag1000134" # analysis + "analysis_default_regression.py --path diags/diag1000134" # checksum + OFF # dependency +) + add_warpx_test( test_2d_pec_field_insulator_implicit # name 2 # dims diff --git a/Examples/Tests/pec/inputs_test_3d_pmc_field b/Examples/Tests/pec/inputs_test_3d_pmc_field new file mode 100644 index 00000000000..2fc1cb9e5ab --- /dev/null +++ b/Examples/Tests/pec/inputs_test_3d_pmc_field @@ -0,0 +1,54 @@ +# Set-up to test the PMC Boundary condition for the fields +# Constructive interference between the incident and reflected wave result in a +# standing wave. + +# max step +max_step = 134 + +# number of grid points +amr.n_cell = 32 32 256 + +# Maximum allowable size of each subdomain +amr.max_grid_size = 1024 +amr.blocking_factor = 32 + +amr.max_level = 0 + +# Geometry +geometry.dims = 3 +geometry.prob_lo = -8.e-6 -8.e-6 -4.e-6 +geometry.prob_hi = 8.e-6 8.e-6 4.e-6 + +# Boundary condition +boundary.field_lo = periodic periodic pmc +boundary.field_hi = periodic periodic pmc + +warpx.serialize_initial_conditions = 1 + +# Verbosity +warpx.verbose = 1 + +# Algorithms +algo.current_deposition = esirkepov +# CFL +warpx.cfl = 0.9 + + +my_constants.z1 = -2.e-6 +my_constants.z2 = 2.e-6 +my_constants.wavelength = 1.e-6 +warpx.E_ext_grid_init_style = parse_E_ext_grid_function +warpx.Ez_external_grid_function(x,y,z) = "0." +warpx.Ex_external_grid_function(x,y,z) = "0." +warpx.Ey_external_grid_function(x,y,z) = "((1.e5*sin(2*pi*(z)/wavelength)) * (zz1))" + +warpx.B_ext_grid_init_style = parse_B_ext_grid_function +warpx.Bx_external_grid_function(x,y,z)= "(((-1.e5*sin(2*pi*(z)/wavelength))/clight))*(zz1) " +warpx.By_external_grid_function(x,y,z)= "0." +warpx.Bz_external_grid_function(x,y,z) = "0." + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 134 +diag1.diag_type = Full +diag1.fields_to_plot = Ey Bx diff --git a/Regression/Checksum/benchmarks_json/test_3d_magnetostatic_eb.json b/Regression/Checksum/benchmarks_json/test_3d_magnetostatic_eb.json index a1ec0b4c831..6415fc3e930 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_magnetostatic_eb.json +++ b/Regression/Checksum/benchmarks_json/test_3d_magnetostatic_eb.json @@ -1,21 +1,21 @@ { "lev=0": { - "Az": 11.358663326449284, - "Bx": 111.55929407644248, - "By": 111.55929407644244, - "Ex": 31257180402.55472, - "Ey": 31257180402.55473, - "jz": 1034841325.9848926, - "phi": 3143521213.0157924, - "rho": 3.449203918900721 + "Az": 11.358663299932457, + "Bx": 111.55929615203162, + "By": 111.55929615203165, + "Ex": 31463410849.74626, + "Ey": 31463410849.746258, + "jz": 1034841323.6861029, + "phi": 3164328318.15416, + "rho": 3.4565836983918676 }, "beam": { - "particle_momentum_x": 1.3604657334742729e-21, - "particle_momentum_y": 1.3604657334742772e-21, - "particle_momentum_z": 7.150873450281544e-16, - "particle_position_x": 11163.99997371537, - "particle_position_y": 11163.999973715368, - "particle_position_z": 131662.50031035842, + "particle_momentum_x": 1.3829464728617761e-21, + "particle_momentum_y": 1.3829464728617792e-21, + "particle_momentum_z": 7.150871807235339e-16, + "particle_position_x": 11163.99997715059, + "particle_position_y": 11163.999977150592, + "particle_position_z": 131662.5003102683, "particle_weight": 20895107655113.465 } -} \ No newline at end of file +} diff --git a/Regression/Checksum/benchmarks_json/test_3d_magnetostatic_eb_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_magnetostatic_eb_picmi.json index abe91ac9e9d..2c99a4218c2 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_magnetostatic_eb_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_3d_magnetostatic_eb_picmi.json @@ -1,27 +1,27 @@ { + "lev=0": { + "Ax": 1.40889223759456e-05, + "Ay": 1.4088922375945606e-05, + "Az": 11.423480450267745, + "Bx": 112.23826705481486, + "By": 112.23826705481484, + "Bz": 0.00019199345672949735, + "Ex": 31557746267.686367, + "Ey": 31557746267.686363, + "Ez": 3339526660.3539834, + "jx": 1980.6549408566577, + "jy": 1980.6549408566577, + "jz": 1038931605.1197203, + "phi": 3171976204.251914, + "rho": 3.4840085919357926 + }, "beam": { - "particle_momentum_x": 1.3878812158350944e-21, - "particle_momentum_y": 1.387881215835094e-21, - "particle_momentum_z": 7.150872953138685e-16, - "particle_position_x": 11163.999973134894, - "particle_position_y": 11163.999973134896, - "particle_position_z": 131662.5003103311, + "particle_momentum_x": 1.4011190163358655e-21, + "particle_momentum_y": 1.401119016335865e-21, + "particle_momentum_z": 7.15087179293042e-16, + "particle_position_x": 11163.99997543546, + "particle_position_y": 11163.999975435456, + "particle_position_z": 131662.50031026747, "particle_weight": 20895107655113.465 - }, - "lev=0": { - "Ax": 1.408892468360627e-05, - "Ay": 1.4088924683606269e-05, - "Az": 11.423480469161868, - "Bx": 112.23826555908032, - "By": 112.2382655590803, - "Bz": 0.00019186770330025167, - "Ex": 31418238386.183773, - "Ey": 31418238386.183773, - "Ez": 3461330433.5026026, - "jx": 1961.0003914783667, - "jy": 1961.0003914783663, - "jz": 1038931606.7573991, - "phi": 3157908107.1102533, - "rho": 3.46977258905983 } -} \ No newline at end of file +} diff --git a/Regression/Checksum/benchmarks_json/test_3d_pmc_field.json b/Regression/Checksum/benchmarks_json/test_3d_pmc_field.json new file mode 100644 index 00000000000..486f8bb965d --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_pmc_field.json @@ -0,0 +1,6 @@ +{ + "lev=0": { + "Bx": 4.1354151621557795, + "Ey": 8373879983.480644 + } +} diff --git a/Source/BoundaryConditions/WarpXFieldBoundaries.cpp b/Source/BoundaryConditions/WarpXFieldBoundaries.cpp index 692c9938e86..6217eb04a33 100644 --- a/Source/BoundaryConditions/WarpXFieldBoundaries.cpp +++ b/Source/BoundaryConditions/WarpXFieldBoundaries.cpp @@ -56,10 +56,8 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type, amrex::Real if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { if (patch_type == PatchType::fine) { PEC::ApplyPECtoEfield( - {m_fields.get(FieldType::Efield_fp, Direction{0}, lev), - m_fields.get(FieldType::Efield_fp, Direction{1}, lev), - m_fields.get(FieldType::Efield_fp, Direction{2}, lev)}, - field_boundary_lo, field_boundary_hi, + m_fields.get_alldirs(FieldType::Efield_fp, lev), + field_boundary_lo, field_boundary_hi, FieldBoundaryType::PEC, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio); if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { @@ -67,25 +65,59 @@ void WarpX::ApplyEfieldBoundary(const int lev, PatchType patch_type, amrex::Real const bool split_pml_field = true; PEC::ApplyPECtoEfield( m_fields.get_alldirs(FieldType::pml_E_fp, lev), - field_boundary_lo, field_boundary_hi, + field_boundary_lo, field_boundary_hi, FieldBoundaryType::PEC, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio, split_pml_field); } } else { PEC::ApplyPECtoEfield( - {m_fields.get(FieldType::Efield_cp,Direction{0},lev), - m_fields.get(FieldType::Efield_cp,Direction{1},lev), - m_fields.get(FieldType::Efield_cp,Direction{2},lev)}, - field_boundary_lo, field_boundary_hi, - get_ng_fieldgather(), Geom(lev), - lev, patch_type, ref_ratio); + m_fields.get_alldirs(FieldType::Efield_cp, lev), + field_boundary_lo, field_boundary_hi, FieldBoundaryType::PEC, + get_ng_fieldgather(), Geom(lev), + lev, patch_type, ref_ratio); if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { // apply pec on split E-fields in PML region const bool split_pml_field = true; PEC::ApplyPECtoEfield( m_fields.get_alldirs(FieldType::pml_E_cp, lev), - field_boundary_lo, field_boundary_hi, + field_boundary_lo, field_boundary_hi, FieldBoundaryType::PEC, + get_ng_fieldgather(), Geom(lev), + lev, patch_type, ref_ratio, + split_pml_field); + } + } + } + + if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { + if (patch_type == PatchType::fine) { + PEC::ApplyPECtoBfield( + m_fields.get_alldirs(FieldType::Efield_fp, lev), + field_boundary_lo, field_boundary_hi, FieldBoundaryType::PMC, + get_ng_fieldgather(), Geom(lev), + lev, patch_type, ref_ratio); + if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { + // apply pec on split E-fields in PML region + const bool split_pml_field = true; + PEC::ApplyPECtoBfield( + m_fields.get_alldirs(FieldType::pml_E_fp, lev), + field_boundary_lo, field_boundary_hi, FieldBoundaryType::PMC, + get_ng_fieldgather(), Geom(lev), + lev, patch_type, ref_ratio, + split_pml_field); + } + } else { + PEC::ApplyPECtoBfield( + m_fields.get_alldirs(FieldType::Efield_cp, lev), + field_boundary_lo, field_boundary_hi, FieldBoundaryType::PMC, + get_ng_fieldgather(), Geom(lev), + lev, patch_type, ref_ratio); + if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { + // apply pec on split E-fields in PML region + const bool split_pml_field = true; + PEC::ApplyPECtoBfield( + m_fields.get_alldirs(FieldType::pml_E_cp, lev), + field_boundary_lo, field_boundary_hi, FieldBoundaryType::PMC, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio, split_pml_field); @@ -152,19 +184,31 @@ void WarpX::ApplyBfieldBoundary (const int lev, PatchType patch_type, DtType a_d if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { if (patch_type == PatchType::fine) { - PEC::ApplyPECtoBfield( { - m_fields.get(FieldType::Bfield_fp,Direction{0},lev), - m_fields.get(FieldType::Bfield_fp,Direction{1},lev), - m_fields.get(FieldType::Bfield_fp,Direction{2},lev) }, - field_boundary_lo, field_boundary_hi, + PEC::ApplyPECtoBfield( + m_fields.get_alldirs(FieldType::Bfield_fp, lev), + field_boundary_lo, field_boundary_hi, FieldBoundaryType::PEC, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio); } else { - PEC::ApplyPECtoBfield( { - m_fields.get(FieldType::Bfield_cp,Direction{0},lev), - m_fields.get(FieldType::Bfield_cp,Direction{1},lev), - m_fields.get(FieldType::Bfield_cp,Direction{2},lev) }, - field_boundary_lo, field_boundary_hi, + PEC::ApplyPECtoBfield( + m_fields.get_alldirs(FieldType::Bfield_cp, lev), + field_boundary_lo, field_boundary_hi, FieldBoundaryType::PEC, + get_ng_fieldgather(), Geom(lev), + lev, patch_type, ref_ratio); + } + } + + if (::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { + if (patch_type == PatchType::fine) { + PEC::ApplyPECtoEfield( + m_fields.get_alldirs(FieldType::Bfield_fp, lev), + field_boundary_lo, field_boundary_hi, FieldBoundaryType::PMC, + get_ng_fieldgather(), Geom(lev), + lev, patch_type, ref_ratio); + } else { + PEC::ApplyPECtoEfield( + m_fields.get_alldirs(FieldType::Bfield_cp, lev), + field_boundary_lo, field_boundary_hi, FieldBoundaryType::PMC, get_ng_fieldgather(), Geom(lev), lev, patch_type, ref_ratio); } @@ -224,7 +268,8 @@ void WarpX::ApplyRhofieldBoundary (const int lev, MultiFab* rho, { if (::isAnyBoundary(particle_boundary_lo, particle_boundary_hi) || ::isAnyBoundary(particle_boundary_lo, particle_boundary_hi) || - ::isAnyBoundary(field_boundary_lo, field_boundary_hi)) + ::isAnyBoundary(field_boundary_lo, field_boundary_hi) || + ::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { PEC::ApplyReflectiveBoundarytoRhofield(rho, field_boundary_lo, field_boundary_hi, @@ -239,7 +284,8 @@ void WarpX::ApplyJfieldBoundary (const int lev, amrex::MultiFab* Jx, { if (::isAnyBoundary(particle_boundary_lo, particle_boundary_hi) || ::isAnyBoundary(particle_boundary_lo, particle_boundary_hi) || - ::isAnyBoundary(field_boundary_lo, field_boundary_hi)) + ::isAnyBoundary(field_boundary_lo, field_boundary_hi) || + ::isAnyBoundary(field_boundary_lo, field_boundary_hi)) { PEC::ApplyReflectiveBoundarytoJfield(Jx, Jy, Jz, field_boundary_lo, field_boundary_hi, diff --git a/Source/BoundaryConditions/WarpX_PEC.H b/Source/BoundaryConditions/WarpX_PEC.H index c387d8c1793..e3fd804b62c 100644 --- a/Source/BoundaryConditions/WarpX_PEC.H +++ b/Source/BoundaryConditions/WarpX_PEC.H @@ -33,6 +33,7 @@ namespace PEC { std::array Efield, const amrex::Array& field_boundary_lo, const amrex::Array& field_boundary_hi, + FieldBoundaryType bc_type, const amrex::IntVect& ng_fieldgather, const amrex::Geometry& geom, int lev, PatchType patch_type, const amrex::Vector& ref_ratios, bool split_pml_field = false); @@ -54,8 +55,10 @@ namespace PEC { std::array Bfield, const amrex::Array& field_boundary_lo, const amrex::Array& field_boundary_hi, + FieldBoundaryType bc_type, const amrex::IntVect& ng_fieldgather, const amrex::Geometry& geom, - int lev, PatchType patch_type, const amrex::Vector& ref_ratios); + int lev, PatchType patch_type, const amrex::Vector& ref_ratios, + bool split_pml_field = false); /** * \brief Reflects charge density deposited over the PEC boundary back into diff --git a/Source/BoundaryConditions/WarpX_PEC.cpp b/Source/BoundaryConditions/WarpX_PEC.cpp index bedc5b264b7..a3b75791582 100644 --- a/Source/BoundaryConditions/WarpX_PEC.cpp +++ b/Source/BoundaryConditions/WarpX_PEC.cpp @@ -121,7 +121,8 @@ namespace amrex::Array4 const& Efield, const amrex::IntVect& is_nodal, amrex::GpuArray const& fbndry_lo, - amrex::GpuArray const& fbndry_hi ) + amrex::GpuArray const& fbndry_hi, + FieldBoundaryType bc_type) { // Tangential Efield components in guard cells set equal and opposite to cells // in the mirror locations across the PEC boundary, whereas normal E-field @@ -136,8 +137,8 @@ namespace // Loop over sides, iside = 0 (lo), iside = 1 (hi) for (int iside = 0; iside < 2; ++iside) { const bool isPECBoundary = ( (iside == 0) - ? fbndry_lo[idim] == FieldBoundaryType::PEC - : fbndry_hi[idim] == FieldBoundaryType::PEC ); + ? fbndry_lo[idim] == bc_type + : fbndry_hi[idim] == bc_type ); #if (defined WARPX_DIM_XZ) || (defined WARPX_DIM_RZ) // For 2D : for icomp==1, (Ey in XZ, Etheta in RZ), // icomp=1 is tangential to both x and z boundaries @@ -260,7 +261,8 @@ namespace amrex::Array4 const& Bfield, const amrex::IntVect & is_nodal, amrex::GpuArray const& fbndry_lo, - amrex::GpuArray const& fbndry_hi ) + amrex::GpuArray const& fbndry_hi, + FieldBoundaryType bc_type) { amrex::IntVect ijk_mirror = ijk_vec; bool OnPECBoundary = false; @@ -271,8 +273,8 @@ namespace // Loop over sides, iside = 0 (lo), iside = 1 (hi) for (int iside = 0; iside < 2; ++iside) { const bool isPECBoundary = ( (iside == 0) - ? fbndry_lo[idim] == FieldBoundaryType::PEC - : fbndry_hi[idim] == FieldBoundaryType::PEC ); + ? fbndry_lo[idim] == bc_type + : fbndry_hi[idim] == bc_type ); if (isPECBoundary) { #if (defined WARPX_DIM_XZ) || (defined WARPX_DIM_RZ) // For 2D : for icomp==1, (By in XZ, Btheta in RZ), @@ -357,7 +359,7 @@ namespace amrex::Array4 const& field, amrex::GpuArray, AMREX_SPACEDIM> const& mirrorfac, amrex::GpuArray, AMREX_SPACEDIM> const& psign, - amrex::GpuArray, AMREX_SPACEDIM> const& is_reflective, + amrex::GpuArray, AMREX_SPACEDIM> const& is_reflective, amrex::GpuArray const& tangent_to_bndy, amrex::Box const& fabbox) { @@ -374,11 +376,11 @@ namespace amrex::IntVect iv_mirror = ijk_vec; iv_mirror[idim] = mirrorfac[idim][iside] - ijk_vec[idim]; - // On the PEC boundary the charge/current density is set to 0 - if (ijk_vec == iv_mirror) { - field(ijk_vec, n) = 0._rt; - // otherwise update the internal cell if the mirror guard cell exists + // Update the cell if the mirror guard cell exists + if (ijk_vec == iv_mirror && is_reflective[idim][iside] == 1) { + field(ijk_vec,n) = 0._rt; } else if (fabbox.contains(iv_mirror)) { + // Note that this includes the cells on the boundary for PMC field(ijk_vec,n) += psign[idim][iside] * field(iv_mirror,n); } } @@ -459,6 +461,7 @@ PEC::ApplyPECtoEfield ( std::array Efield, const amrex::Array& field_boundary_lo, const amrex::Array& field_boundary_hi, + FieldBoundaryType bc_type, const amrex::IntVect& ng_fieldgather, const amrex::Geometry& geom, const int lev, PatchType patch_type, const amrex::Vector& ref_ratios, const bool split_pml_field) @@ -514,7 +517,7 @@ PEC::ApplyPECtoEfield ( const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); const int icomp = 0; ::SetEfieldOnPEC(icomp, domain_lo, domain_hi, iv, n, - Ex, Ex_nodal, fbndry_lo, fbndry_hi); + Ex, Ex_nodal, fbndry_lo, fbndry_hi, bc_type); }, tey, nComp_y, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { @@ -522,7 +525,7 @@ PEC::ApplyPECtoEfield ( const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); const int icomp = 1; ::SetEfieldOnPEC(icomp, domain_lo, domain_hi, iv, n, - Ey, Ey_nodal, fbndry_lo, fbndry_hi); + Ey, Ey_nodal, fbndry_lo, fbndry_hi, bc_type); }, tez, nComp_z, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { @@ -530,7 +533,7 @@ PEC::ApplyPECtoEfield ( const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); const int icomp = 2; ::SetEfieldOnPEC(icomp, domain_lo, domain_hi, iv, n, - Ez, Ez_nodal, fbndry_lo, fbndry_hi); + Ez, Ez_nodal, fbndry_lo, fbndry_hi, bc_type); } ); } @@ -542,8 +545,10 @@ PEC::ApplyPECtoBfield ( std::array Bfield, const amrex::Array& field_boundary_lo, const amrex::Array& field_boundary_hi, + FieldBoundaryType bc_type, const amrex::IntVect& ng_fieldgather, const amrex::Geometry& geom, - const int lev, PatchType patch_type, const amrex::Vector& ref_ratios) + const int lev, PatchType patch_type, const amrex::Vector& ref_ratios, + const bool split_pml_field) { amrex::Box domain_box = geom.Domain(); if (patch_type == PatchType::coarse && (lev > 0)) { @@ -579,9 +584,12 @@ PEC::ApplyPECtoBfield ( // gather fields from in the guard-cell region are included. // Note that for simulations without particles or laser, ng_field_gather is 0 // and the guard-cell values of the B-field multifab will not be modified. - amrex::Box const& tbx = mfi.tilebox(Bfield[0]->ixType().toIntVect(), ng_fieldgather); - amrex::Box const& tby = mfi.tilebox(Bfield[1]->ixType().toIntVect(), ng_fieldgather); - amrex::Box const& tbz = mfi.tilebox(Bfield[2]->ixType().toIntVect(), ng_fieldgather); + amrex::Box const& tbx = (split_pml_field) ? mfi.tilebox(Bfield[0]->ixType().toIntVect()) + : mfi.tilebox(Bfield[0]->ixType().toIntVect(), ng_fieldgather); + amrex::Box const& tby = (split_pml_field) ? mfi.tilebox(Bfield[1]->ixType().toIntVect()) + : mfi.tilebox(Bfield[1]->ixType().toIntVect(), ng_fieldgather); + amrex::Box const& tbz = (split_pml_field) ? mfi.tilebox(Bfield[2]->ixType().toIntVect()) + : mfi.tilebox(Bfield[2]->ixType().toIntVect(), ng_fieldgather); // loop over cells and update fields amrex::ParallelFor( @@ -591,7 +599,7 @@ PEC::ApplyPECtoBfield ( const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); const int icomp = 0; ::SetBfieldOnPEC(icomp, domain_lo, domain_hi, iv, n, - Bx, Bx_nodal, fbndry_lo, fbndry_hi); + Bx, Bx_nodal, fbndry_lo, fbndry_hi, bc_type); }, tby, nComp_y, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { @@ -599,7 +607,7 @@ PEC::ApplyPECtoBfield ( const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); const int icomp = 1; ::SetBfieldOnPEC(icomp, domain_lo, domain_hi, iv, n, - By, By_nodal, fbndry_lo, fbndry_hi); + By, By_nodal, fbndry_lo, fbndry_hi, bc_type); }, tbz, nComp_z, [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) { @@ -607,7 +615,7 @@ PEC::ApplyPECtoBfield ( const amrex::IntVect iv(AMREX_D_DECL(i,j,k)); const int icomp = 2; ::SetBfieldOnPEC(icomp, domain_lo, domain_hi, iv, n, - Bz, Bz_nodal, fbndry_lo, fbndry_hi); + Bz, Bz_nodal, fbndry_lo, fbndry_hi, bc_type); } ); } @@ -650,7 +658,7 @@ PEC::ApplyReflectiveBoundarytoRhofield ( // cells for boundaries that are NOT PEC amrex::Box grown_domain_box = domain_box; - amrex::GpuArray, AMREX_SPACEDIM> is_reflective; + amrex::GpuArray, AMREX_SPACEDIM> is_reflective; amrex::GpuArray is_tangent_to_bndy; amrex::GpuArray, AMREX_SPACEDIM> psign; amrex::GpuArray, AMREX_SPACEDIM> mirrorfac; @@ -658,9 +666,11 @@ PEC::ApplyReflectiveBoundarytoRhofield ( is_reflective[idim][0] = ( particle_boundary_lo[idim] == ParticleBoundaryType::Reflecting) || ( particle_boundary_lo[idim] == ParticleBoundaryType::Thermal) || ( field_boundary_lo[idim] == FieldBoundaryType::PEC); + if (field_boundary_lo[idim] == FieldBoundaryType::PMC) { is_reflective[idim][0] = 2; } is_reflective[idim][1] = ( particle_boundary_hi[idim] == ParticleBoundaryType::Reflecting) || ( particle_boundary_hi[idim] == ParticleBoundaryType::Thermal) || ( field_boundary_hi[idim] == FieldBoundaryType::PEC); + if (field_boundary_hi[idim] == FieldBoundaryType::PMC) { is_reflective[idim][1] = 2; } if (!is_reflective[idim][0]) { grown_domain_box.growLo(idim, ng_fieldgather[idim]); } if (!is_reflective[idim][1]) { grown_domain_box.growHi(idim, ng_fieldgather[idim]); } @@ -669,10 +679,12 @@ PEC::ApplyReflectiveBoundarytoRhofield ( is_tangent_to_bndy[idim] = true; psign[idim][0] = ((particle_boundary_lo[idim] == ParticleBoundaryType::Reflecting) - ||(particle_boundary_lo[idim] == ParticleBoundaryType::Thermal)) + ||(particle_boundary_lo[idim] == ParticleBoundaryType::Thermal) + ||(field_boundary_lo[idim] == FieldBoundaryType::PMC)) ? 1._rt : -1._rt; psign[idim][1] = ((particle_boundary_hi[idim] == ParticleBoundaryType::Reflecting) - ||(particle_boundary_hi[idim] == ParticleBoundaryType::Thermal)) + ||(particle_boundary_hi[idim] == ParticleBoundaryType::Thermal) + ||(field_boundary_hi[idim] == FieldBoundaryType::PMC)) ? 1._rt : -1._rt; mirrorfac[idim][0] = 2*domain_lo[idim] - (1 - rho_nodal[idim]); mirrorfac[idim][1] = 2*domain_hi[idim] + (1 - rho_nodal[idim]); @@ -746,17 +758,21 @@ PEC::ApplyReflectiveBoundarytoJfield( // directions of the current density multifab const amrex::IntVect ng_fieldgather = Jx->nGrowVect(); - amrex::GpuArray, AMREX_SPACEDIM> is_reflective; + amrex::GpuArray, AMREX_SPACEDIM> is_reflective; amrex::GpuArray, 3> is_tangent_to_bndy; amrex::GpuArray, AMREX_SPACEDIM>, 3> psign; amrex::GpuArray, AMREX_SPACEDIM>, 3> mirrorfac; for (int idim=0; idim < AMREX_SPACEDIM; ++idim) { is_reflective[idim][0] = ( particle_boundary_lo[idim] == ParticleBoundaryType::Reflecting) || ( particle_boundary_lo[idim] == ParticleBoundaryType::Thermal) - || ( field_boundary_lo[idim] == FieldBoundaryType::PEC); + || ( field_boundary_lo[idim] == FieldBoundaryType::PEC) + || ( field_boundary_lo[idim] == FieldBoundaryType::PMC); + if (field_boundary_lo[idim] == FieldBoundaryType::PMC) { is_reflective[idim][0] = 2; } is_reflective[idim][1] = ( particle_boundary_hi[idim] == ParticleBoundaryType::Reflecting) || ( particle_boundary_hi[idim] == ParticleBoundaryType::Thermal) - || ( field_boundary_hi[idim] == FieldBoundaryType::PEC); + || ( field_boundary_hi[idim] == FieldBoundaryType::PEC) + || ( field_boundary_hi[idim] == FieldBoundaryType::PMC); + if (field_boundary_hi[idim] == FieldBoundaryType::PMC) { is_reflective[idim][1] = 2; } if (!is_reflective[idim][0]) { grown_domain_box.growLo(idim, ng_fieldgather[idim]); } if (!is_reflective[idim][1]) { grown_domain_box.growHi(idim, ng_fieldgather[idim]); } @@ -778,18 +794,22 @@ PEC::ApplyReflectiveBoundarytoJfield( if (is_tangent_to_bndy[icomp][idim]){ psign[icomp][idim][0] = ( (particle_boundary_lo[idim] == ParticleBoundaryType::Reflecting) - ||(particle_boundary_lo[idim] == ParticleBoundaryType::Thermal)) + ||(particle_boundary_lo[idim] == ParticleBoundaryType::Thermal) + ||(field_boundary_lo[idim] == FieldBoundaryType::PMC)) ? 1._rt : -1._rt; psign[icomp][idim][1] = ( (particle_boundary_hi[idim] == ParticleBoundaryType::Reflecting) - ||(particle_boundary_hi[idim] == ParticleBoundaryType::Thermal)) + ||(particle_boundary_hi[idim] == ParticleBoundaryType::Thermal) + ||(field_boundary_hi[idim] == FieldBoundaryType::PMC)) ? 1._rt : -1._rt; } else { psign[icomp][idim][0] = ( (particle_boundary_lo[idim] == ParticleBoundaryType::Reflecting) - ||(particle_boundary_lo[idim] == ParticleBoundaryType::Thermal)) + ||(particle_boundary_lo[idim] == ParticleBoundaryType::Thermal) + ||(field_boundary_lo[idim] == FieldBoundaryType::PMC)) ? -1._rt : 1._rt; psign[icomp][idim][1] = ( (particle_boundary_hi[idim] == ParticleBoundaryType::Reflecting) - ||(particle_boundary_hi[idim] == ParticleBoundaryType::Thermal)) + ||(particle_boundary_hi[idim] == ParticleBoundaryType::Thermal) + ||(field_boundary_hi[idim] == FieldBoundaryType::PMC)) ? -1._rt : 1._rt; } } diff --git a/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp index da60bc62c46..d06e84859d8 100644 --- a/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp +++ b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp @@ -62,13 +62,12 @@ Array ImplicitSolver::convertFieldBCToLinOpBC (const lbc[i] = LinOpBCType::Periodic; } else if (a_fbc[i] == FieldBoundaryType::PEC) { WARPX_ABORT_WITH_MESSAGE("LinOpBCType not set for this FieldBoundaryType"); - } else if (a_fbc[i] == FieldBoundaryType::PMC) { - WARPX_ABORT_WITH_MESSAGE("LinOpBCType not set for this FieldBoundaryType"); } else if (a_fbc[i] == FieldBoundaryType::Damped) { WARPX_ABORT_WITH_MESSAGE("LinOpBCType not set for this FieldBoundaryType"); } else if (a_fbc[i] == FieldBoundaryType::Absorbing_SilverMueller) { WARPX_ABORT_WITH_MESSAGE("LinOpBCType not set for this FieldBoundaryType"); } else if (a_fbc[i] == FieldBoundaryType::Neumann) { + // Also for FieldBoundaryType::PMC lbc[i] = LinOpBCType::Neumann; } else if (a_fbc[i] == FieldBoundaryType::None) { WARPX_ABORT_WITH_MESSAGE("LinOpBCType not set for this FieldBoundaryType"); diff --git a/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp b/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp index 670f962f7c3..1209f621e31 100644 --- a/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp +++ b/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp @@ -141,7 +141,7 @@ ProjectionDivCleaner::solve () std::map bcmap{ {FieldBoundaryType::PEC, LinOpBCType::Dirichlet}, - {FieldBoundaryType::Neumann, LinOpBCType::Neumann}, + {FieldBoundaryType::Neumann, LinOpBCType::Neumann}, // Note that PMC is the same as Neumann {FieldBoundaryType::Periodic, LinOpBCType::Periodic}, {FieldBoundaryType::None, LinOpBCType::Neumann} }; @@ -151,7 +151,7 @@ ProjectionDivCleaner::solve () auto ithi = bcmap.find(WarpX::field_boundary_hi[idim]); if (itlo == bcmap.end() || ithi == bcmap.end()) { WARPX_ABORT_WITH_MESSAGE( - "Field boundary conditions have to be either periodic, PEC or neumann " + "Field boundary conditions have to be either periodic, PEC, PMC, or neumann " "when using the MLMG projection based divergence cleaner solver." ); } diff --git a/Source/Utils/WarpXAlgorithmSelection.H b/Source/Utils/WarpXAlgorithmSelection.H index 187be924666..278088e16b6 100644 --- a/Source/Utils/WarpXAlgorithmSelection.H +++ b/Source/Utils/WarpXAlgorithmSelection.H @@ -124,11 +124,11 @@ AMREX_ENUM(FieldBoundaryType, Periodic, PEC, //!< perfect electric conductor (PEC) with E_tangential=0 PMC, //!< perfect magnetic conductor (PMC) with B_tangential=0 + Neumann = PMC, // For electrostatic, the normal E is set to zero Damped, // Fields in the guard cells are damped for PSATD //in the moving window direction Absorbing_SilverMueller, // Silver-Mueller boundary condition absorbingsilvermueller = Absorbing_SilverMueller, - Neumann, // For electrostatic, the normal E is set to zero None, // The fields values at the boundary are not updated. This is // useful for RZ simulations, at r=0. Open, // Used in the Integrated Green Function Poisson solver diff --git a/Source/Utils/WarpXUtil.cpp b/Source/Utils/WarpXUtil.cpp index dcaa3118ab4..ae2adfac043 100644 --- a/Source/Utils/WarpXUtil.cpp +++ b/Source/Utils/WarpXUtil.cpp @@ -443,6 +443,15 @@ void ReadBCParams () "PEC boundary not implemented for PSATD, yet!" ); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + (electromagnetic_solver_id != ElectromagneticSolverAlgo::PSATD) || + ( + WarpX::field_boundary_lo[idim] != FieldBoundaryType::PMC && + WarpX::field_boundary_hi[idim] != FieldBoundaryType::PMC + ), + "PMC boundary not implemented for PSATD, yet!" + ); + if(WarpX::field_boundary_lo[idim] == FieldBoundaryType::Open && WarpX::field_boundary_hi[idim] == FieldBoundaryType::Open){ WARPX_ALWAYS_ASSERT_WITH_MESSAGE( diff --git a/Source/WarpX.H b/Source/WarpX.H index 077e8f5d954..729e6f7d126 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -212,17 +212,15 @@ public: * (BackwardEuler - 0, Lax-Wendroff - 1) */ static inline auto macroscopic_solver_algo = MacroscopicSolverAlgo::Default; - /** Integers that correspond to boundary condition applied to fields at the - * lower domain boundaries - * (0 to 6 correspond to PML, Periodic, PEC, PMC, Damped, Absorbing Silver-Mueller, None) + /** Boundary conditions applied to fields at the lower domain boundaries + * (Possible values PML, Periodic, PEC, PMC, Neumann, Damped, Absorbing Silver-Mueller, None) */ static inline amrex::Array field_boundary_lo {AMREX_D_DECL(FieldBoundaryType::Default, FieldBoundaryType::Default, FieldBoundaryType::Default)}; - /** Integers that correspond to boundary condition applied to fields at the - * upper domain boundaries - * (0 to 6 correspond to PML, Periodic, PEC, PMC, Damped, Absorbing Silver-Mueller, None) + /** Boundary conditions applied to fields at the upper domain boundaries + * (Possible values PML, Periodic, PEC, PMC, Neumann, Damped, Absorbing Silver-Mueller, None) */ static inline amrex::Array field_boundary_hi {AMREX_D_DECL(FieldBoundaryType::Default, From c0eacd9225b7ed0b54ba637a3974c8c1758023db Mon Sep 17 00:00:00 2001 From: Andrew Myers Date: Sat, 8 Feb 2025 10:49:28 -0800 Subject: [PATCH 220/278] Remove NamedComponentParticleContainer (Use from AMReX) (#5481) This capability has been upstreamed to AMReX. Co-authored-by: Axel Huebl --- Docs/source/developers/particles.rst | 2 +- ...puts_test_2d_particle_attr_access_picmi.py | 4 +- .../inputs_test_2d_prev_positions_picmi.py | 4 +- ...inputs_test_2d_runtime_components_picmi.py | 7 +- Python/pywarpx/particle_containers.py | 16 +- Source/Diagnostics/BTDiagnostics.cpp | 11 + .../FlushFormats/FlushFormatCheckpoint.cpp | 26 +- .../FlushFormats/FlushFormatInSitu.cpp | 11 +- .../FlushFormats/FlushFormatPlotfile.cpp | 18 +- .../Diagnostics/ParticleDiag/ParticleDiag.cpp | 17 +- Source/Diagnostics/ParticleIO.cpp | 38 +-- Source/Diagnostics/WarpXOpenPMD.cpp | 61 ++--- .../ImplicitSolvers/ImplicitSolver.cpp | 12 +- .../ImplicitSolvers/WarpXImplicitOps.cpp | 28 +-- Source/Particles/AddPlasmaUtilities.H | 13 +- .../DSMC/SplitAndScatterFunc.H | 2 +- .../BinaryCollision/ParticleCreationFunc.H | 2 +- .../ElementaryProcess/QEDPairGeneration.H | 4 +- .../ElementaryProcess/QEDPhotonEmission.H | 4 +- Source/Particles/LaserParticleContainer.cpp | 6 +- Source/Particles/MultiParticleContainer.cpp | 3 +- .../NamedComponentParticleContainer.H | 222 ------------------ Source/Particles/ParticleBoundaryBuffer.H | 4 +- Source/Particles/ParticleBoundaryBuffer.cpp | 38 ++- .../ParticleCreation/DefaultInitialization.H | 35 +-- .../ParticleCreation/FilterCopyTransform.H | 6 +- .../FilterCreateTransformFromFAB.H | 4 +- Source/Particles/ParticleCreation/SmartCopy.H | 8 +- .../Particles/ParticleCreation/SmartCreate.H | 4 +- .../Particles/ParticleCreation/SmartUtils.H | 4 +- .../Particles/ParticleCreation/SmartUtils.cpp | 13 +- Source/Particles/PhotonParticleContainer.cpp | 2 +- .../Particles/PhysicalParticleContainer.cpp | 70 +++--- .../Particles/PinnedMemoryParticleContainer.H | 4 +- Source/Particles/Pusher/GetAndSetPosition.H | 1 - .../RigidInjectedParticleContainer.cpp | 2 +- Source/Particles/WarpXParticleContainer.H | 77 +++++- Source/Particles/WarpXParticleContainer.cpp | 54 +++-- Source/Python/Particles/CMakeLists.txt | 1 - .../PinnedMemoryParticleContainer.cpp | 31 --- .../Particles/WarpXParticleContainer.cpp | 14 +- Source/Python/pyWarpX.cpp | 2 - 42 files changed, 368 insertions(+), 517 deletions(-) delete mode 100644 Source/Particles/NamedComponentParticleContainer.H delete mode 100644 Source/Python/Particles/PinnedMemoryParticleContainer.cpp diff --git a/Docs/source/developers/particles.rst b/Docs/source/developers/particles.rst index 45a92107ae9..9f199bdbb91 100644 --- a/Docs/source/developers/particles.rst +++ b/Docs/source/developers/particles.rst @@ -141,7 +141,7 @@ Attribute name ``int``/``real`` Description Wher Wheeler process physics is used. ==================== ================ ================================== ===== ==== ====================== -WarpX allows extra runtime attributes to be added to particle containers (through ``NewRealComp("attrname")`` or ``NewIntComp("attrname")``). +WarpX allows extra runtime attributes to be added to particle containers (through ``AddRealComp("attrname")`` or ``AddIntComp("attrname")``). The attribute name can then be used to access the values of that attribute. For example, using a particle iterator, ``pti``, to loop over the particles the command ``pti.GetAttribs(particle_comps["attrname"]).dataPtr();`` will return the values of the ``"attrname"`` attribute. diff --git a/Examples/Tests/particle_data_python/inputs_test_2d_particle_attr_access_picmi.py b/Examples/Tests/particle_data_python/inputs_test_2d_particle_attr_access_picmi.py index dbd29a43bc7..0d8c2ac209b 100755 --- a/Examples/Tests/particle_data_python/inputs_test_2d_particle_attr_access_picmi.py +++ b/Examples/Tests/particle_data_python/inputs_test_2d_particle_attr_access_picmi.py @@ -150,8 +150,8 @@ def add_particles(): ########################## assert elec_wrapper.nps == 270 / (2 - args.unique) -assert elec_wrapper.particle_container.get_comp_index("w") == 2 -assert elec_wrapper.particle_container.get_comp_index("newPid") == 6 +assert elec_wrapper.particle_container.get_real_comp_index("w") == 2 +assert elec_wrapper.particle_container.get_real_comp_index("newPid") == 6 new_pid_vals = elec_wrapper.get_particle_real_arrays("newPid", 0) for vals in new_pid_vals: diff --git a/Examples/Tests/particle_data_python/inputs_test_2d_prev_positions_picmi.py b/Examples/Tests/particle_data_python/inputs_test_2d_prev_positions_picmi.py index 2ad86ecea95..c15409edb0c 100755 --- a/Examples/Tests/particle_data_python/inputs_test_2d_prev_positions_picmi.py +++ b/Examples/Tests/particle_data_python/inputs_test_2d_prev_positions_picmi.py @@ -111,8 +111,8 @@ elec_count = elec_wrapper.nps # check that the runtime attributes have the right indices -assert elec_wrapper.particle_container.get_comp_index("prev_x") == 6 -assert elec_wrapper.particle_container.get_comp_index("prev_z") == 7 +assert elec_wrapper.particle_container.get_real_comp_index("prev_x") == 6 +assert elec_wrapper.particle_container.get_real_comp_index("prev_z") == 7 # sanity check that the prev_z values are reasonable and # that the correct number of values are returned diff --git a/Examples/Tests/restart/inputs_test_2d_runtime_components_picmi.py b/Examples/Tests/restart/inputs_test_2d_runtime_components_picmi.py index e90bfd266a7..746dff27a42 100755 --- a/Examples/Tests/restart/inputs_test_2d_runtime_components_picmi.py +++ b/Examples/Tests/restart/inputs_test_2d_runtime_components_picmi.py @@ -107,7 +107,8 @@ np.random.seed(30025025) electron_wrapper = particle_containers.ParticleContainerWrapper("electrons") -electron_wrapper.add_real_comp("newPid") +if not sim.amr_restart: + electron_wrapper.add_real_comp("newPid") def add_particles(): @@ -140,8 +141,8 @@ def add_particles(): ########################## assert electron_wrapper.nps == 90 -assert electron_wrapper.particle_container.get_comp_index("w") == 2 -assert electron_wrapper.particle_container.get_comp_index("newPid") == 6 +assert electron_wrapper.particle_container.get_real_comp_index("w") == 2 +assert electron_wrapper.particle_container.get_real_comp_index("newPid") == 6 new_pid_vals = electron_wrapper.get_particle_real_arrays("newPid", 0) for vals in new_pid_vals: diff --git a/Python/pywarpx/particle_containers.py b/Python/pywarpx/particle_containers.py index 3d77a61cb07..a66fd131aed 100644 --- a/Python/pywarpx/particle_containers.py +++ b/Python/pywarpx/particle_containers.py @@ -170,7 +170,9 @@ def add_particles( # --- Note that the velocities are handled separately and not included in attr # --- (even though they are stored as attributes in the C++) for key, vals in kwargs.items(): - attr[:, self.particle_container.get_comp_index(key) - built_in_attrs] = vals + attr[ + :, self.particle_container.get_real_comp_index(key) - built_in_attrs + ] = vals nattr_int = 0 attr_int = np.empty([0], dtype=np.int32) @@ -264,7 +266,7 @@ def get_particle_real_arrays(self, comp_name, level, copy_to_host=False): List of arrays The requested particle array data """ - comp_idx = self.particle_container.get_comp_index(comp_name) + comp_idx = self.particle_container.get_real_comp_index(comp_name) data_array = [] for pti in libwarpx.libwarpx_so.WarpXParIter(self.particle_container, level): @@ -309,7 +311,7 @@ def get_particle_int_arrays(self, comp_name, level, copy_to_host=False): List of arrays The requested particle array data """ - comp_idx = self.particle_container.get_icomp_index(comp_name) + comp_idx = self.particle_container.get_int_comp_index(comp_name) data_array = [] for pti in libwarpx.libwarpx_so.WarpXParIter(self.particle_container, level): @@ -842,16 +844,16 @@ def get_particle_boundary_buffer(self, species_name, boundary, comp_name, level) ) data_array = [] # loop over the real attributes - if comp_name in part_container.real_comp_names: - comp_idx = part_container.real_comp_names[comp_name] + if comp_name in part_container.real_soa_names: + comp_idx = part_container.get_real_comp_index(comp_name) for ii, pti in enumerate( libwarpx.libwarpx_so.BoundaryBufferParIter(part_container, level) ): soa = pti.soa() data_array.append(xp.array(soa.get_real_data(comp_idx), copy=False)) # loop over the integer attributes - elif comp_name in part_container.int_comp_names: - comp_idx = part_container.int_comp_names[comp_name] + elif comp_name in part_container.int_soa_names: + comp_idx = part_container.get_int_comp_index(comp_name) for ii, pti in enumerate( libwarpx.libwarpx_so.BoundaryBufferParIter(part_container, level) ): diff --git a/Source/Diagnostics/BTDiagnostics.cpp b/Source/Diagnostics/BTDiagnostics.cpp index 09167452c1a..cae2d2bbc03 100644 --- a/Source/Diagnostics/BTDiagnostics.cpp +++ b/Source/Diagnostics/BTDiagnostics.cpp @@ -1462,6 +1462,17 @@ BTDiagnostics::InitializeParticleBuffer () m_totalParticles_in_buffer[i][isp] = 0; m_particles_buffer[i][isp] = std::make_unique(WarpX::GetInstance().GetParGDB()); const int idx = mpc.getSpeciesID(m_output_species_names[isp]); + + // SoA component names + { + auto &pc = mpc.GetParticleContainer(idx); + auto rn = pc.GetRealSoANames(); + rn.resize(WarpXParticleContainer::NArrayReal); // strip runtime comps + auto in = pc.GetRealSoANames(); + in.resize(WarpXParticleContainer::NArrayInt); // strip runtime comps + m_particles_buffer[i][isp]->SetSoACompileTimeNames(rn, in); + } + m_output_species[i].push_back(ParticleDiag(m_diag_name, m_output_species_names[isp], mpc.GetParticleContainerPtr(idx), diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp index fc308dee936..ba371464782 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatCheckpoint.cpp @@ -209,27 +209,25 @@ FlushFormatCheckpoint::CheckpointParticles ( write_real_comps.push_back(1); } - int const compile_time_comps = static_cast(real_names.size()); - - // get the names of the real comps - // note: skips the mandatory AMREX_SPACEDIM positions for pure SoA + // get the names of the extra real comps real_names.resize(pc->NumRealComps() - AMREX_SPACEDIM); write_real_comps.resize(pc->NumRealComps() - AMREX_SPACEDIM); - auto runtime_rnames = pc->getParticleRuntimeComps(); - for (auto const& x : runtime_rnames) { - int const i = x.second + PIdx::nattribs - AMREX_SPACEDIM; - real_names[i] = x.first; - write_real_comps[i] = pc->h_redistribute_real_comp[i + compile_time_comps]; + + // note, skip the required compnent names here + auto rnames = pc->GetRealSoANames(); + for (std::size_t index = PIdx::nattribs; index < rnames.size(); ++index) { + std::size_t const i = index - AMREX_SPACEDIM; + real_names[i] = rnames[index]; + write_real_comps[i] = pc->h_redistribute_real_comp[index]; } // and the int comps int_names.resize(pc->NumIntComps()); write_int_comps.resize(pc->NumIntComps()); - auto runtime_inames = pc->getParticleRuntimeiComps(); - for (auto const& x : runtime_inames) { - int const i = x.second + 0; - int_names[i] = x.first; - write_int_comps[i] = pc->h_redistribute_int_comp[i+AMREX_SPACEDIM]; + auto inames = pc->GetIntSoANames(); + for (std::size_t index = 0; index < inames.size(); ++index) { + int_names[index] = inames[index]; + write_int_comps[index] = pc->h_redistribute_int_comp[index]; } pc->Checkpoint(dir, part_diag.getSpeciesName(), diff --git a/Source/Diagnostics/FlushFormats/FlushFormatInSitu.cpp b/Source/Diagnostics/FlushFormats/FlushFormatInSitu.cpp index d5313d71727..af8f53df9b9 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatInSitu.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatInSitu.cpp @@ -37,7 +37,7 @@ FlushFormatInSitu::WriteParticles(const amrex::Vector& particle_di WarpXParticleContainer* pc = particle_diag.getParticleContainer(); // get names of real comps - std::map real_comps_map = pc->getParticleComps(); + std::vector real_comps_map = pc->GetRealSoANames(); // WarpXParticleContainer compile-time extra AoS attributes (Real): 0 // WarpXParticleContainer compile-time extra AoS attributes (int): 0 @@ -46,14 +46,7 @@ FlushFormatInSitu::WriteParticles(const amrex::Vector& particle_di // not an efficient search, but N is small... for(int j = 0; j < PIdx::nattribs; ++j) { - auto rvn_it = real_comps_map.begin(); - for (; rvn_it != real_comps_map.end(); ++rvn_it) - if (rvn_it->second == j) - break; - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - rvn_it != real_comps_map.end(), - "WarpX In Situ: SoA real attribute not found"); - std::string varname = rvn_it->first; + std::string varname = real_comps_map.at(j); particle_varnames.push_back(prefix + "_" + varname); } // WarpXParticleContainer compile-time extra SoA attributes (int): 0 diff --git a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp index 879a5986434..13117bad105 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatPlotfile.cpp @@ -372,13 +372,13 @@ FlushFormatPlotfile::WriteParticles(const std::string& dir, real_names.push_back("theta"); #endif - // get the names of the real comps - - // note: skips the mandatory AMREX_SPACEDIM positions for pure SoA + // get the names of the extra real comps real_names.resize(tmp.NumRealComps() - AMREX_SPACEDIM); - auto runtime_rnames = tmp.getParticleRuntimeComps(); - for (auto const& x : runtime_rnames) { - real_names[x.second + PIdx::nattribs - AMREX_SPACEDIM] = x.first; + + // note, skip the required compnent names here + auto rnames = tmp.GetRealSoANames(); + for (std::size_t index = PIdx::nattribs; index < rnames.size(); ++index) { + real_names[index - AMREX_SPACEDIM] = rnames[index]; } // plot any "extra" fields by default @@ -390,8 +390,10 @@ FlushFormatPlotfile::WriteParticles(const std::string& dir, // and the names int_names.resize(tmp.NumIntComps()); - auto runtime_inames = tmp.getParticleRuntimeiComps(); - for (auto const& x : runtime_inames) { int_names[x.second+0] = x.first; } + auto inames = tmp.GetIntSoANames(); + for (std::size_t index = 0; index < inames.size(); ++index) { + int_names[index] = inames[index]; + } // plot by default int_flags.resize(tmp.NumIntComps(), 1); diff --git a/Source/Diagnostics/ParticleDiag/ParticleDiag.cpp b/Source/Diagnostics/ParticleDiag/ParticleDiag.cpp index 1a64ae20f0e..8e61e7464ad 100644 --- a/Source/Diagnostics/ParticleDiag/ParticleDiag.cpp +++ b/Source/Diagnostics/ParticleDiag/ParticleDiag.cpp @@ -36,26 +36,23 @@ ParticleDiag::ParticleDiag ( std::fill(m_plot_flags.begin(), m_plot_flags.end(), 0); bool contains_positions = false; if (variables[0] != "none"){ - std::map existing_variable_names = pc->getParticleComps(); + for (auto& var : variables){ #ifdef WARPX_DIM_RZ - // we reconstruct to Cartesian x,y,z for RZ particle output - existing_variable_names["y"] = PIdx::theta; + // we reconstruct to Cartesian x,y,z for RZ particle output + if (var == "y") { var = "theta"; } #endif - for (const auto& var : variables){ if (var == "phi") { // User requests phi on particle. This is *not* part of the variables that // the particle container carries, and is only added to particles during output. // Therefore, this case needs to be treated specifically. m_plot_phi = true; } else { - const auto search = existing_variable_names.find(var); - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - search != existing_variable_names.end(), + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(pc->HasRealComp(var), "variables argument '" + var +"' is not an existing attribute for this species"); - m_plot_flags[existing_variable_names.at(var)] = 1; + m_plot_flags[pc->GetRealCompIndex(var)] = 1; - if (var == "x" || var == "y" || var == "z") { + if (var == "x" || var == "y" || var == "z" || var == "theta") { contains_positions = true; } } @@ -75,7 +72,7 @@ ParticleDiag::ParticleDiag ( // Always write out theta, whether or not it's requested, // to be consistent with always writing out r and z. // TODO: openPMD does a reconstruction to Cartesian, so we can now skip force-writing this - m_plot_flags[pc->getParticleComps().at("theta")] = 1; + m_plot_flags[pc->GetRealCompIndex("theta")] = 1; #endif // build filter functors diff --git a/Source/Diagnostics/ParticleIO.cpp b/Source/Diagnostics/ParticleIO.cpp index d7a26326e52..62a5e126558 100644 --- a/Source/Diagnostics/ParticleIO.cpp +++ b/Source/Diagnostics/ParticleIO.cpp @@ -153,27 +153,30 @@ MultiParticleContainer::Restart (const std::string& dir) real_comp_names.push_back(comp_name); } - for (auto const& comp : pc->getParticleRuntimeComps()) { - auto search = std::find(real_comp_names.begin(), real_comp_names.end(), comp.first); + int n_rc = 0; + for (auto const& comp : pc->GetRealSoANames()) { + // skip compile-time components + if (n_rc < WarpXParticleContainer::NArrayReal) { continue; } + n_rc++; + + auto search = std::find(real_comp_names.begin(), real_comp_names.end(), comp); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( search != real_comp_names.end(), "Species " + species_names[i] - + "needs runtime real component " + comp.first + + " needs runtime real component " + comp + ", but it was not found in the checkpoint file." ); } for (int j = PIdx::nattribs-AMREX_SPACEDIM; j < nr; ++j) { const auto& comp_name = real_comp_names[j]; - auto current_comp_names = pc->getParticleComps(); - auto search = current_comp_names.find(comp_name); - if (search == current_comp_names.end()) { + if (!pc->HasRealComp(comp_name)) { amrex::Print() << Utils::TextMsg::Info( "Runtime real component " + comp_name + " was found in the checkpoint file, but it has not been added yet. " + " Adding it now." ); - pc->NewRealComp(comp_name); + pc->AddRealComp(comp_name); } } @@ -187,26 +190,29 @@ MultiParticleContainer::Restart (const std::string& dir) int_comp_names.push_back(comp_name); } - for (auto const& comp : pc->getParticleRuntimeiComps()) { - auto search = std::find(int_comp_names.begin(), int_comp_names.end(), comp.first); + int n_ic = 0; + for (auto const& comp : pc->GetIntSoANames()) { + // skip compile-time components + if (n_ic < WarpXParticleContainer::NArrayInt) { continue; } + n_ic++; + + auto search = std::find(int_comp_names.begin(), int_comp_names.end(), comp); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( search != int_comp_names.end(), - "Species " + species_names[i] + "needs runtime int component " + comp.first + "Species " + species_names[i] + " needs runtime int component " + comp + ", but it was not found in the checkpoint file." ); } for (int j = 0; j < ni; ++j) { const auto& comp_name = int_comp_names[j]; - auto current_comp_names = pc->getParticleiComps(); - auto search = current_comp_names.find(comp_name); - if (search == current_comp_names.end()) { + if (!pc->HasIntComp(comp_name)) { amrex::Print()<< Utils::TextMsg::Info( "Runtime int component " + comp_name + " was found in the checkpoint file, but it has not been added yet. " + " Adding it now." ); - pc->NewIntComp(comp_name); + pc->AddIntComp(comp_name); } } @@ -258,8 +264,8 @@ storePhiOnParticles ( PinnedMemoryParticleContainer& tmp, is_full_diagnostic, "Output of the electrostatic potential (phi) on the particles was requested, " "but this is only available with `diag_type = Full`."); - tmp.NewRealComp("phi"); - int const phi_index = tmp.getParticleComps().at("phi"); + tmp.AddRealComp("phi"); + int const phi_index = tmp.GetRealCompIndex("phi"); auto& warpx = WarpX::GetInstance(); for (int lev=0; lev<=warpx.finestLevel(); lev++) { const amrex::Geometry& geom = warpx.Geom(lev); diff --git a/Source/Diagnostics/WarpXOpenPMD.cpp b/Source/Diagnostics/WarpXOpenPMD.cpp index 2fac8ede452..96e8bb846bb 100644 --- a/Source/Diagnostics/WarpXOpenPMD.cpp +++ b/Source/Diagnostics/WarpXOpenPMD.cpp @@ -10,7 +10,6 @@ #include "Diagnostics/ParticleDiag/ParticleDiag.H" #include "FieldIO.H" #include "Particles/Filter/FilterFunctors.H" -#include "Particles/NamedComponentParticleContainer.H" #include "Utils/TextMsg.H" #include "Utils/Parser/ParserUtils.H" #include "Utils/RelativeCellPosition.H" @@ -591,44 +590,52 @@ for (const auto & particle_diag : particle_diags) { storePhiOnParticles( tmp, WarpX::electrostatic_solver_id, !use_pinned_pc ); } - // names of amrex::Real and int particle attributes in SoA data + // names of amrex::ParticleReal and int particle attributes in SoA data + auto const rn = tmp.GetRealSoANames(); + auto const in = tmp.GetIntSoANames(); amrex::Vector real_names; - amrex::Vector int_names; - amrex::Vector int_flags; - amrex::Vector real_flags; - // see openPMD ED-PIC extension for namings - // note: an underscore separates the record name from its component - // for non-scalar records - // note: in RZ, we reconstruct x,y,z positions from r,z,theta in WarpX + amrex::Vector int_names(in.begin(), in.end()); + + // transform names to openPMD, separated by underscores + { + // see openPMD ED-PIC extension for namings + // note: an underscore separates the record name from its component + // for non-scalar records + // note: in RZ, we reconstruct x,y,z positions from r,z,theta in WarpX #if !defined (WARPX_DIM_1D_Z) - real_names.push_back("position_x"); + real_names.push_back("position_x"); #endif #if defined (WARPX_DIM_3D) || defined(WARPX_DIM_RZ) - real_names.push_back("position_y"); + real_names.push_back("position_y"); #endif - real_names.push_back("position_z"); - real_names.push_back("weighting"); - real_names.push_back("momentum_x"); - real_names.push_back("momentum_y"); - real_names.push_back("momentum_z"); - // get the names of the real comps - real_names.resize(tmp.NumRealComps()); - auto runtime_rnames = tmp.getParticleRuntimeComps(); - for (auto const& x : runtime_rnames) + real_names.push_back("position_z"); + real_names.push_back("weighting"); + real_names.push_back("momentum_x"); + real_names.push_back("momentum_y"); + real_names.push_back("momentum_z"); + } + for (size_t i = real_names.size(); i < rn.size(); ++i) { - real_names[x.second+PIdx::nattribs] = detail::snakeToCamel(x.first); + real_names.push_back(rn[i]); } + + for (size_t i = PIdx::nattribs; i < rn.size(); ++i) + { + real_names[i] = detail::snakeToCamel(rn[i]); + } + // plot any "extra" fields by default - real_flags = particle_diag.m_plot_flags; + amrex::Vector real_flags = particle_diag.m_plot_flags; real_flags.resize(tmp.NumRealComps(), 1); - // and the names - int_names.resize(tmp.NumIntComps()); - auto runtime_inames = tmp.getParticleRuntimeiComps(); - for (auto const& x : runtime_inames) + + // and the int names + for (size_t i = 0; i < in.size(); ++i) { - int_names[x.second+0] = detail::snakeToCamel(x.first); + int_names[i] = detail::snakeToCamel(in[i]); } + // plot by default + amrex::Vector int_flags; int_flags.resize(tmp.NumIntComps(), 1); // real_names contains a list of all real particle attributes. diff --git a/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp index d06e84859d8..ab064772922 100644 --- a/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp +++ b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp @@ -13,15 +13,15 @@ void ImplicitSolver::CreateParticleAttributes () const // Add space to save the positions and velocities at the start of the time steps for (auto const& pc : m_WarpX->GetPartContainer()) { #if (AMREX_SPACEDIM >= 2) - pc->NewRealComp("x_n", comm); + pc->AddRealComp("x_n", comm); #endif #if defined(WARPX_DIM_3D) || defined(WARPX_DIM_RZ) - pc->NewRealComp("y_n", comm); + pc->AddRealComp("y_n", comm); #endif - pc->NewRealComp("z_n", comm); - pc->NewRealComp("ux_n", comm); - pc->NewRealComp("uy_n", comm); - pc->NewRealComp("uz_n", comm); + pc->AddRealComp("z_n", comm); + pc->AddRealComp("ux_n", comm); + pc->AddRealComp("uy_n", comm); + pc->AddRealComp("uz_n", comm); } } diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp index 9b62bd91b0c..06e1820854c 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp +++ b/Source/FieldSolver/ImplicitSolvers/WarpXImplicitOps.cpp @@ -169,7 +169,7 @@ WarpX::SaveParticlesAtImplicitStepStart ( ) #endif { - auto particle_comps = pc->getParticleComps(); + auto particle_comps = pc->GetRealSoANames(); for (WarpXParIter pti(*pc, lev); pti.isValid(); ++pti) { @@ -181,15 +181,15 @@ WarpX::SaveParticlesAtImplicitStepStart ( ) amrex::ParticleReal* const AMREX_RESTRICT uz = attribs[PIdx::uz].dataPtr(); #if (AMREX_SPACEDIM >= 2) - amrex::ParticleReal* x_n = pti.GetAttribs(particle_comps["x_n"]).dataPtr(); + amrex::ParticleReal* x_n = pti.GetAttribs("x_n").dataPtr(); #endif #if defined(WARPX_DIM_3D) || defined(WARPX_DIM_RZ) - amrex::ParticleReal* y_n = pti.GetAttribs(particle_comps["y_n"]).dataPtr(); + amrex::ParticleReal* y_n = pti.GetAttribs("y_n").dataPtr(); #endif - amrex::ParticleReal* z_n = pti.GetAttribs(particle_comps["z_n"]).dataPtr(); - amrex::ParticleReal* ux_n = pti.GetAttribs(particle_comps["ux_n"]).dataPtr(); - amrex::ParticleReal* uy_n = pti.GetAttribs(particle_comps["uy_n"]).dataPtr(); - amrex::ParticleReal* uz_n = pti.GetAttribs(particle_comps["uz_n"]).dataPtr(); + amrex::ParticleReal* z_n = pti.GetAttribs("z_n").dataPtr(); + amrex::ParticleReal* ux_n = pti.GetAttribs("ux_n").dataPtr(); + amrex::ParticleReal* uy_n = pti.GetAttribs("uy_n").dataPtr(); + amrex::ParticleReal* uz_n = pti.GetAttribs("uz_n").dataPtr(); const long np = pti.numParticles(); @@ -239,7 +239,7 @@ WarpX::FinishImplicitParticleUpdate () #endif { - auto particle_comps = pc->getParticleComps(); + auto particle_comps = pc->GetRealSoANames(); for (WarpXParIter pti(*pc, lev); pti.isValid(); ++pti) { @@ -252,15 +252,15 @@ WarpX::FinishImplicitParticleUpdate () amrex::ParticleReal* const AMREX_RESTRICT uz = attribs[PIdx::uz].dataPtr(); #if (AMREX_SPACEDIM >= 2) - amrex::ParticleReal* x_n = pti.GetAttribs(particle_comps["x_n"]).dataPtr(); + amrex::ParticleReal* x_n = pti.GetAttribs("x_n").dataPtr(); #endif #if defined(WARPX_DIM_3D) || defined(WARPX_DIM_RZ) - amrex::ParticleReal* y_n = pti.GetAttribs(particle_comps["y_n"]).dataPtr(); + amrex::ParticleReal* y_n = pti.GetAttribs("y_n").dataPtr(); #endif - amrex::ParticleReal* z_n = pti.GetAttribs(particle_comps["z_n"]).dataPtr(); - amrex::ParticleReal* ux_n = pti.GetAttribs(particle_comps["ux_n"]).dataPtr(); - amrex::ParticleReal* uy_n = pti.GetAttribs(particle_comps["uy_n"]).dataPtr(); - amrex::ParticleReal* uz_n = pti.GetAttribs(particle_comps["uz_n"]).dataPtr(); + amrex::ParticleReal* z_n = pti.GetAttribs("z_n").dataPtr(); + amrex::ParticleReal* ux_n = pti.GetAttribs("ux_n").dataPtr(); + amrex::ParticleReal* uy_n = pti.GetAttribs("uy_n").dataPtr(); + amrex::ParticleReal* uz_n = pti.GetAttribs("uz_n").dataPtr(); const long np = pti.numParticles(); diff --git a/Source/Particles/AddPlasmaUtilities.H b/Source/Particles/AddPlasmaUtilities.H index 7b8e4e58105..12d964adf64 100644 --- a/Source/Particles/AddPlasmaUtilities.H +++ b/Source/Particles/AddPlasmaUtilities.H @@ -251,8 +251,6 @@ struct PlasmaParserHelper PlasmaParserHelper (SoAType& a_soa, std::size_t old_size, const std::vector& a_user_int_attribs, const std::vector& a_user_real_attribs, - std::map& a_particle_icomps, - std::map& a_particle_comps, const PlasmaParserWrapper& wrapper) : m_wrapper_ptr(&wrapper) { m_pa_user_int_pinned.resize(a_user_int_attribs.size()); @@ -266,10 +264,10 @@ struct PlasmaParserHelper #endif for (std::size_t ia = 0; ia < a_user_int_attribs.size(); ++ia) { - m_pa_user_int_pinned[ia] = a_soa.GetIntData(a_particle_icomps[a_user_int_attribs[ia]]).data() + old_size; + m_pa_user_int_pinned[ia] = a_soa.GetIntData(a_user_int_attribs[ia]).data() + old_size; } for (std::size_t ia = 0; ia < a_user_real_attribs.size(); ++ia) { - m_pa_user_real_pinned[ia] = a_soa.GetRealData(a_particle_comps[a_user_real_attribs[ia]]).data() + old_size; + m_pa_user_real_pinned[ia] = a_soa.GetRealData(a_user_real_attribs[ia]).data() + old_size; } #ifdef AMREX_USE_GPU @@ -308,7 +306,6 @@ struct QEDHelper { template QEDHelper (SoAType& a_soa, std::size_t old_size, - std::map& a_particle_comps, bool a_has_quantum_sync, bool a_has_breit_wheeler, const std::shared_ptr& a_shr_p_qs_engine, const std::shared_ptr& a_shr_p_bw_engine) @@ -317,14 +314,12 @@ struct QEDHelper if(has_quantum_sync){ quantum_sync_get_opt = a_shr_p_qs_engine->build_optical_depth_functor(); - p_optical_depth_QSR = a_soa.GetRealData( - a_particle_comps["opticalDepthQSR"]).data() + old_size; + p_optical_depth_QSR = a_soa.GetRealData("opticalDepthQSR").data() + old_size; } if(has_breit_wheeler){ breit_wheeler_get_opt = a_shr_p_bw_engine->build_optical_depth_functor(); - p_optical_depth_BW = a_soa.GetRealData( - a_particle_comps["opticalDepthBW"]).data() + old_size; + p_optical_depth_BW = a_soa.GetRealData("opticalDepthBW").data() + old_size; } } diff --git a/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H b/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H index db04dbc7f32..e4b4d8a6a3a 100644 --- a/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H +++ b/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H @@ -252,7 +252,7 @@ public: ParticleCreation::DefaultInitializeRuntimeAttributes(*tile_products[i], 0, 0, pc_products[i]->getUserRealAttribs(), pc_products[i]->getUserIntAttribs(), - pc_products[i]->getParticleComps(), pc_products[i]->getParticleiComps(), + pc_products[i]->GetRealSoANames(), pc_products[i]->GetIntSoANames(), pc_products[i]->getUserRealAttribParser(), pc_products[i]->getUserIntAttribParser(), #ifdef WARPX_QED diff --git a/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.H b/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.H index e4772aab7c9..59565c92516 100644 --- a/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.H +++ b/Source/Particles/Collision/BinaryCollision/ParticleCreationFunc.H @@ -235,7 +235,7 @@ public: ParticleCreation::DefaultInitializeRuntimeAttributes(*tile_products[i], 0, 0, pc_products[i]->getUserRealAttribs(), pc_products[i]->getUserIntAttribs(), - pc_products[i]->getParticleComps(), pc_products[i]->getParticleiComps(), + pc_products[i]->GetRealSoANames(), pc_products[i]->GetIntSoANames(), pc_products[i]->getUserRealAttribParser(), pc_products[i]->getUserIntAttribParser(), #ifdef WARPX_QED diff --git a/Source/Particles/ElementaryProcess/QEDPairGeneration.H b/Source/Particles/ElementaryProcess/QEDPairGeneration.H index f1beb8363a7..99e87b5c796 100644 --- a/Source/Particles/ElementaryProcess/QEDPairGeneration.H +++ b/Source/Particles/ElementaryProcess/QEDPairGeneration.H @@ -41,7 +41,7 @@ public: /** * \brief Constructor of the PairGenerationFilterFunc functor. * - * @param[in] opt_depth_runtime_comp index of the optical depth component + * @param[in] opt_depth_runtime_comp index of the optical depth runtime component */ PairGenerationFilterFunc(int const opt_depth_runtime_comp) : m_opt_depth_runtime_comp(opt_depth_runtime_comp) @@ -67,7 +67,7 @@ public: } private: - int m_opt_depth_runtime_comp = 0; /*!< Index of the optical depth component of the species.*/ + int m_opt_depth_runtime_comp = 0; /*!< Index of the optical depth runtime component of the species. */ }; /** diff --git a/Source/Particles/ElementaryProcess/QEDPhotonEmission.H b/Source/Particles/ElementaryProcess/QEDPhotonEmission.H index 0b6836a38bc..f509f884c48 100644 --- a/Source/Particles/ElementaryProcess/QEDPhotonEmission.H +++ b/Source/Particles/ElementaryProcess/QEDPhotonEmission.H @@ -47,7 +47,7 @@ public: /** * \brief Constructor of the PhotonEmissionFilterFunc functor. * - * @param[in] opt_depth_runtime_comp Index of the optical depth component + * @param[in] opt_depth_runtime_comp Index of the optical depth component in the runtime real data */ PhotonEmissionFilterFunc(int const opt_depth_runtime_comp) : m_opt_depth_runtime_comp(opt_depth_runtime_comp) @@ -73,7 +73,7 @@ public: } private: - int m_opt_depth_runtime_comp; /*!< Index of the optical depth component of the source species*/ + int m_opt_depth_runtime_comp; /*!< Index of the optical depth runtime component of the source species */ }; /** diff --git a/Source/Particles/LaserParticleContainer.cpp b/Source/Particles/LaserParticleContainer.cpp index 1954b822084..c79d1f675b5 100644 --- a/Source/Particles/LaserParticleContainer.cpp +++ b/Source/Particles/LaserParticleContainer.cpp @@ -873,18 +873,18 @@ LaserParticleContainer::update_laser_particle (WarpXParIter& pti, #if (AMREX_SPACEDIM >= 2) ParticleReal* x_n = nullptr; if (push_type == PushType::Implicit) { - x_n = pti.GetAttribs(particle_comps["x_n"]).dataPtr(); + x_n = pti.GetAttribs("x_n").dataPtr(); } #endif #if defined(WARPX_DIM_3D) || defined(WARPX_DIM_RZ) ParticleReal* y_n = nullptr; if (push_type == PushType::Implicit) { - y_n = pti.GetAttribs(particle_comps["y_n"]).dataPtr(); + y_n = pti.GetAttribs("y_n").dataPtr(); } #endif ParticleReal* z_n = nullptr; if (push_type == PushType::Implicit) { - z_n = pti.GetAttribs(particle_comps["z_n"]).dataPtr(); + z_n = pti.GetAttribs("z_n").dataPtr(); } // Copy member variables to tmp copies for GPU runs. diff --git a/Source/Particles/MultiParticleContainer.cpp b/Source/Particles/MultiParticleContainer.cpp index c6724b5185a..6c08dc6aa8d 100644 --- a/Source/Particles/MultiParticleContainer.cpp +++ b/Source/Particles/MultiParticleContainer.cpp @@ -21,7 +21,6 @@ # include "Particles/ElementaryProcess/QEDPhotonEmission.H" #endif #include "Particles/LaserParticleContainer.H" -#include "Particles/NamedComponentParticleContainer.H" #include "Particles/ParticleCreation/FilterCopyTransform.H" #ifdef WARPX_QED # include "Particles/ParticleCreation/FilterCreateTransformFromFAB.H" @@ -1622,7 +1621,7 @@ void MultiParticleContainer::doQedQuantumSync (int lev, auto Transform = PhotonEmissionTransformFunc( m_shr_p_qs_engine->build_optical_depth_functor(), - pc_source->particle_runtime_comps["opticalDepthQSR"], + pc_source->GetRealCompIndex("opticalDepthQSR") - pc_source->NArrayReal, m_shr_p_qs_engine->build_phot_em_functor(), pti, lev, Ex.nGrowVect(), Ex[pti], Ey[pti], Ez[pti], diff --git a/Source/Particles/NamedComponentParticleContainer.H b/Source/Particles/NamedComponentParticleContainer.H deleted file mode 100644 index 57c65746d18..00000000000 --- a/Source/Particles/NamedComponentParticleContainer.H +++ /dev/null @@ -1,222 +0,0 @@ -/* Copyright 2022 Remi Lehe - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ -#ifndef WARPX_NamedComponentParticleContainer_H_ -#define WARPX_NamedComponentParticleContainer_H_ - -#include "Utils/TextMsg.H" - -#include -#include -#include - -#include -#include -#include - - -/** Real Particle Attributes stored in amrex::ParticleContainer's struct of array - */ -struct PIdx -{ - enum { -#if !defined (WARPX_DIM_1D_Z) - x, -#endif -#if defined (WARPX_DIM_3D) - y, -#endif - z, - w, ///< weight - ux, uy, uz, -#ifdef WARPX_DIM_RZ - theta, ///< RZ needs all three position components -#endif - nattribs ///< number of compile-time attributes - }; -}; - -/** Integer Particle Attributes stored in amrex::ParticleContainer's struct of array - */ -struct PIdxInt -{ - enum { - nattribs ///< number of compile-time attributes - }; -}; - -/** Particle Container class that allows to add/access particle components - * with a name (string) instead of doing so with an integer index. - * (The "components" are all the particle amrex::Real quantities.) - * - * This is done by storing maps that give the index of the component - * that corresponds to a given string. - * - * @tparam T_Allocator Mainly controls in which type of memory (e.g. device - * arena, pinned memory arena, etc.) the particle data will be stored - */ -template class T_Allocator=amrex::DefaultAllocator> -class NamedComponentParticleContainer : -public amrex::ParticleContainerPureSoA -{ -public: - /** Construct an empty NamedComponentParticleContainer **/ - NamedComponentParticleContainer () : amrex::ParticleContainerPureSoA() {} - - /** Construct a NamedComponentParticleContainer from an AmrParGDB object - * - * In this case, the only components are the default ones: - * weight, momentum and (in RZ geometry) theta. - * - * @param amr_pgdb A pointer to a ParGDBBase, which contains pointers to - * the Geometry, DistributionMapping, and BoxArray objects that define the - * AMR hierarchy. Usually, this is generated by an AmrCore or AmrLevel object. - */ - NamedComponentParticleContainer (amrex::AmrParGDB* amr_pgdb) - : amrex::ParticleContainerPureSoA(amr_pgdb) { - // build up the map of string names to particle component numbers -#if !defined (WARPX_DIM_1D_Z) - particle_comps["x"] = PIdx::x; -#endif -#if defined (WARPX_DIM_3D) - particle_comps["y"] = PIdx::y; -#endif - particle_comps["z"] = PIdx::z; - particle_comps["w"] = PIdx::w; - particle_comps["ux"] = PIdx::ux; - particle_comps["uy"] = PIdx::uy; - particle_comps["uz"] = PIdx::uz; -#ifdef WARPX_DIM_RZ - particle_comps["theta"] = PIdx::theta; -#endif - } - - /** Destructor for NamedComponentParticleContainer */ - ~NamedComponentParticleContainer() override = default; - - /** Construct a NamedComponentParticleContainer from a regular - * amrex::ParticleContainer, and additional name-to-index maps - * - * @param pc regular particle container, where components are not named (only indexed) - * @param p_comps name-to-index map for compile-time and run-time real components - * @param p_icomps name-to-index map for compile-time and run-time integer components - * @param p_rcomps name-to-index map for run-time real components - * @param p_ricomps name-to-index map for run-time integer components - */ - NamedComponentParticleContainer( - amrex::ParticleContainerPureSoA && pc, - std::map p_comps, - std::map p_icomps, - std::map p_rcomps, - std::map p_ricomps) - : amrex::ParticleContainerPureSoA(std::move(pc)), - particle_comps(std::move(p_comps)), - particle_icomps(std::move(p_icomps)), - particle_runtime_comps(std::move(p_rcomps)), - particle_runtime_icomps(std::move(p_ricomps)) {} - - /** Copy constructor for NamedComponentParticleContainer */ - NamedComponentParticleContainer ( const NamedComponentParticleContainer &) = delete; - /** Copy operator for NamedComponentParticleContainer */ - NamedComponentParticleContainer& operator= ( const NamedComponentParticleContainer & ) = delete; - - /** Move constructor for NamedComponentParticleContainer */ - NamedComponentParticleContainer ( NamedComponentParticleContainer && ) noexcept = default; - /** Move operator for NamedComponentParticleContainer */ - NamedComponentParticleContainer& operator= ( NamedComponentParticleContainer && ) noexcept = default; - - /** Create an empty particle container - * - * This creates a new NamedComponentParticleContainer with same compile-time - * and run-time attributes. But it can change its allocator. - * - * This function overloads the corresponding function from the parent - * class (amrex::ParticleContainer) - */ - template class NewAllocator=amrex::DefaultAllocator> - NamedComponentParticleContainer - make_alike () const { - auto tmp = NamedComponentParticleContainer( - amrex::ParticleContainerPureSoA::template make_alike(), - particle_comps, - particle_icomps, - particle_runtime_comps, - particle_runtime_icomps); - - return tmp; - } - - using amrex::ParticleContainerPureSoA::NumRealComps; - using amrex::ParticleContainerPureSoA::NumIntComps; - using amrex::ParticleContainerPureSoA::AddRealComp; - using amrex::ParticleContainerPureSoA::AddIntComp; - - /** Allocate a new run-time real component - * - * @param name Name of the new component - * @param comm Whether to communicate this component, in the particle Redistribute - */ - void NewRealComp (const std::string& name, bool comm=true) - { - auto search = particle_comps.find(name); - if (search == particle_comps.end()) { - particle_comps[name] = NumRealComps(); - particle_runtime_comps[name] = NumRealComps() - PIdx::nattribs; - AddRealComp(comm); - } else { - amrex::Print() << Utils::TextMsg::Info( - name + " already exists in particle_comps, not adding."); - } - } - - /** Allocate a new run-time integer component - * - * @param name Name of the new component - * @param comm Whether to communicate this component, in the particle Redistribute - */ - void NewIntComp (const std::string& name, bool comm=true) - { - auto search = particle_icomps.find(name); - if (search == particle_icomps.end()) { - particle_icomps[name] = NumIntComps(); - particle_runtime_icomps[name] = NumIntComps() - 0; - AddIntComp(comm); - } else { - amrex::Print() << Utils::TextMsg::Info( - name + " already exists in particle_icomps, not adding."); - } - } - - void defineAllParticleTiles () noexcept - { - for (int lev = 0; lev <= amrex::ParticleContainerPureSoA::finestLevel(); ++lev) - { - for (auto mfi = amrex::ParticleContainerPureSoA::MakeMFIter(lev); mfi.isValid(); ++mfi) - { - const int grid_id = mfi.index(); - const int tile_id = mfi.LocalTileIndex(); - amrex::ParticleContainerPureSoA::DefineAndReturnParticleTile(lev, grid_id, tile_id); - } - } - } - - /** Return the name-to-index map for the compile-time and runtime-time real components */ - [[nodiscard]] std::map getParticleComps () const noexcept { return particle_comps;} - /** Return the name-to-index map for the compile-time and runtime-time integer components */ - [[nodiscard]] std::map getParticleiComps () const noexcept { return particle_icomps;} - /** Return the name-to-index map for the runtime-time real components */ - [[nodiscard]] std::map getParticleRuntimeComps () const noexcept { return particle_runtime_comps;} - /** Return the name-to-index map for the runtime-time integer components */ - [[nodiscard]] std::map getParticleRuntimeiComps () const noexcept { return particle_runtime_icomps;} - -protected: - std::map particle_comps; - std::map particle_icomps; - std::map particle_runtime_comps; - std::map particle_runtime_icomps; -}; - -#endif //WARPX_NamedComponentParticleContainer_H_ diff --git a/Source/Particles/ParticleBoundaryBuffer.H b/Source/Particles/ParticleBoundaryBuffer.H index 24b388be00e..c9589ac0c75 100644 --- a/Source/Particles/ParticleBoundaryBuffer.H +++ b/Source/Particles/ParticleBoundaryBuffer.H @@ -32,9 +32,9 @@ public: /** Copy operator for ParticleBoundaryBuffer */ ParticleBoundaryBuffer& operator= ( const ParticleBoundaryBuffer & ) = delete; - /** Move constructor for NamedComponentParticleContainer */ + /** Move constructor for ParticleBoundaryBuffer */ ParticleBoundaryBuffer ( ParticleBoundaryBuffer && ) = default; - /** Move operator for NamedComponentParticleContainer */ + /** Move operator for ParticleBoundaryBuffer */ ParticleBoundaryBuffer& operator= ( ParticleBoundaryBuffer && ) = default; int numSpecies() const { return static_cast(getSpeciesNames().size()); } diff --git a/Source/Particles/ParticleBoundaryBuffer.cpp b/Source/Particles/ParticleBoundaryBuffer.cpp index dbe5dea7085..048534bff6a 100644 --- a/Source/Particles/ParticleBoundaryBuffer.cpp +++ b/Source/Particles/ParticleBoundaryBuffer.cpp @@ -384,11 +384,11 @@ void ParticleBoundaryBuffer::gatherParticlesFromDomainBoundaries (MultiParticleC if (!buffer[i].isDefined()) { buffer[i] = pc.make_alike(); - buffer[i].NewIntComp("stepScraped", false); - buffer[i].NewRealComp("deltaTimeScraped", false); - buffer[i].NewRealComp("nx", false); - buffer[i].NewRealComp("ny", false); - buffer[i].NewRealComp("nz", false); + buffer[i].AddIntComp("stepScraped", false); + buffer[i].AddRealComp("deltaTimeScraped", false); + buffer[i].AddRealComp("nx", false); + buffer[i].AddRealComp("ny", false); + buffer[i].AddRealComp("nz", false); } auto& species_buffer = buffer[i]; @@ -443,11 +443,10 @@ void ParticleBoundaryBuffer::gatherParticlesFromDomainBoundaries (MultiParticleC WARPX_PROFILE("ParticleBoundaryBuffer::gatherParticles::filterAndTransform"); auto& warpx = WarpX::GetInstance(); const auto dt = warpx.getdt(pti.GetLevel()); - auto string_to_index_intcomp = buffer[i].getParticleRuntimeiComps(); - const int step_scraped_index = string_to_index_intcomp.at("stepScraped"); - auto string_to_index_realcomp = buffer[i].getParticleRuntimeComps(); - const int delta_index = string_to_index_realcomp.at("deltaTimeScraped"); - const int normal_index = string_to_index_realcomp.at("nx"); + auto & buf = buffer[i]; + const int step_scraped_index = buf.GetIntCompIndex("stepScraped") - PinnedMemoryParticleContainer::NArrayInt; + const int delta_index = buf.GetRealCompIndex("deltaTimeScraped") - PinnedMemoryParticleContainer::NArrayReal; + const int normal_index = buf.GetRealCompIndex("nx") - PinnedMemoryParticleContainer::NArrayReal; const int step = warpx_instance.getistep(0); amrex::filterAndTransformParticles(ptile_buffer, ptile, predicate, @@ -481,11 +480,11 @@ void ParticleBoundaryBuffer::gatherParticlesFromEmbeddedBoundaries ( if (!buffer[i].isDefined()) { buffer[i] = pc.make_alike(); - buffer[i].NewIntComp("stepScraped", false); - buffer[i].NewRealComp("deltaTimeScraped", false); - buffer[i].NewRealComp("nx", false); - buffer[i].NewRealComp("ny", false); - buffer[i].NewRealComp("nz", false); + buffer[i].AddIntComp("stepScraped", false); + buffer[i].AddRealComp("deltaTimeScraped", false); + buffer[i].AddRealComp("nx", false); + buffer[i].AddRealComp("ny", false); + buffer[i].AddRealComp("nz", false); } @@ -546,11 +545,10 @@ void ParticleBoundaryBuffer::gatherParticlesFromEmbeddedBoundaries ( } auto &warpx = WarpX::GetInstance(); const auto dt = warpx.getdt(pti.GetLevel()); - auto string_to_index_intcomp = buffer[i].getParticleRuntimeiComps(); - const int step_scraped_index = string_to_index_intcomp.at("stepScraped"); - auto string_to_index_realcomp = buffer[i].getParticleRuntimeComps(); - const int delta_index = string_to_index_realcomp.at("deltaTimeScraped"); - const int normal_index = string_to_index_realcomp.at("nx"); + auto & buf = buffer[i]; + const int step_scraped_index = buf.GetIntCompIndex("stepScraped") - PinnedMemoryParticleContainer::NArrayInt; + const int delta_index = buf.GetRealCompIndex("deltaTimeScraped") - PinnedMemoryParticleContainer::NArrayReal; + const int normal_index = buf.GetRealCompIndex("nx") - PinnedMemoryParticleContainer::NArrayReal; const int step = warpx_instance.getistep(0); { diff --git a/Source/Particles/ParticleCreation/DefaultInitialization.H b/Source/Particles/ParticleCreation/DefaultInitialization.H index 88b23905481..1922c829379 100644 --- a/Source/Particles/ParticleCreation/DefaultInitialization.H +++ b/Source/Particles/ParticleCreation/DefaultInitialization.H @@ -102,8 +102,8 @@ namespace ParticleCreation { * These are NOT initialized by this function. * @param[in] user_real_attribs The names of the real components for this particle tile * @param[in] user_int_attribs The names of the int components for this particle tile - * @param[in] particle_comps map between particle component index and component name for real comps - * @param[in] particle_icomps map between particle component index and component name for int comps + * @param[in] particle_comps particle component names for real comps + * @param[in] particle_icomps particle component names for int comps * @param[in] user_real_attrib_parser the parser functions used to initialize the user real components * @param[in] user_int_attrib_parser the parser functions used to initialize the user int components * @param[in] do_qed_comps whether to initialize the qed components (these are usually handled by @@ -120,8 +120,8 @@ void DefaultInitializeRuntimeAttributes (PTile& ptile, const int n_external_attr_int, const std::vector& user_real_attribs, const std::vector& user_int_attribs, - const std::map& particle_comps, - const std::map& particle_icomps, + const std::vector& particle_comps, + const std::vector& particle_icomps, const std::vector& user_real_attrib_parser, const std::vector& user_int_attrib_parser, #ifdef WARPX_QED @@ -151,8 +151,9 @@ void DefaultInitializeRuntimeAttributes (PTile& ptile, auto attr_ptr = ptile.GetStructOfArrays().GetRealData(j).data(); #ifdef WARPX_QED // Current runtime comp is quantum synchrotron optical depth - if (particle_comps.find("opticalDepthQSR") != particle_comps.end() && - particle_comps.at("opticalDepthQSR") == j) + auto const it_qsr = std::find(particle_comps.begin(), particle_comps.end(), "opticalDepthQSR"); + if (it_qsr != particle_comps.end() && + std::distance(particle_comps.begin(), it_qsr) == j) { if (!do_qed_comps) { continue; } const QuantumSynchrotronGetOpticalDepth quantum_sync_get_opt = @@ -172,9 +173,10 @@ void DefaultInitializeRuntimeAttributes (PTile& ptile, } } - // Current runtime comp is Breit-Wheeler optical depth - if (particle_comps.find("opticalDepthBW") != particle_comps.end() && - particle_comps.at("opticalDepthBW") == j) + // Current runtime comp is Breit-Wheeler optical depth + auto const it_bw = std::find(particle_comps.begin(), particle_comps.end(), "opticalDepthBW"); + if (it_bw != particle_comps.end() && + std::distance(particle_comps.begin(), it_bw) == j) { if (!do_qed_comps) { continue; } const BreitWheelerGetOpticalDepth breit_wheeler_get_opt = @@ -198,8 +200,9 @@ void DefaultInitializeRuntimeAttributes (PTile& ptile, for (int ia = 0; ia < n_user_real_attribs; ++ia) { // Current runtime comp is ia-th user defined attribute - if (particle_comps.find(user_real_attribs[ia]) != particle_comps.end() && - particle_comps.at(user_real_attribs[ia]) == j) + auto const it_ura = std::find(particle_comps.begin(), particle_comps.end(), user_real_attribs[ia]); + if (it_ura != particle_comps.end() && + std::distance(particle_comps.begin(), it_ura) == j) { const amrex::ParserExecutor<7> user_real_attrib_parserexec = user_real_attrib_parser[ia]->compile<7>(); @@ -232,8 +235,9 @@ void DefaultInitializeRuntimeAttributes (PTile& ptile, auto attr_ptr = ptile.GetStructOfArrays().GetIntData(j).data(); // Current runtime comp is ionization level - if (particle_icomps.find("ionizationLevel") != particle_icomps.end() && - particle_icomps.at("ionizationLevel") == j) + auto const it_ioniz = std::find(particle_icomps.begin(), particle_icomps.end(), "ionizationLevel"); + if (it_ioniz != particle_icomps.end() && + std::distance(particle_icomps.begin(), it_ioniz) == j) { if constexpr (amrex::RunOnGpu>::value) { amrex::ParallelFor(stop - start, @@ -251,8 +255,9 @@ void DefaultInitializeRuntimeAttributes (PTile& ptile, for (int ia = 0; ia < n_user_int_attribs; ++ia) { // Current runtime comp is ia-th user defined attribute - if (particle_icomps.find(user_int_attribs[ia]) != particle_icomps.end() && - particle_icomps.at(user_int_attribs[ia]) == j) + auto const it_uia = std::find(particle_icomps.begin(), particle_icomps.end(), user_int_attribs[ia]); + if (it_uia != particle_icomps.end() && + std::distance(particle_icomps.begin(), it_uia) == j) { const amrex::ParserExecutor<7> user_int_attrib_parserexec = user_int_attrib_parser[ia]->compile<7>(); diff --git a/Source/Particles/ParticleCreation/FilterCopyTransform.H b/Source/Particles/ParticleCreation/FilterCopyTransform.H index c6ca69d5e89..c05038fae2f 100644 --- a/Source/Particles/ParticleCreation/FilterCopyTransform.H +++ b/Source/Particles/ParticleCreation/FilterCopyTransform.H @@ -88,7 +88,7 @@ Index filterCopyTransformParticles (DstPC& pc, DstTile& dst, SrcTile& src, ParticleCreation::DefaultInitializeRuntimeAttributes(dst, 0, 0, pc.getUserRealAttribs(), pc.getUserIntAttribs(), - pc.getParticleComps(), pc.getParticleiComps(), + pc.GetRealSoANames(), pc.GetIntSoANames(), pc.getUserRealAttribParser(), pc.getUserIntAttribParser(), #ifdef WARPX_QED @@ -258,7 +258,7 @@ Index filterCopyTransformParticles (DstPC& pc1, DstPC& pc2, DstTile& dst1, DstTi ParticleCreation::DefaultInitializeRuntimeAttributes(dst1, 0, 0, pc1.getUserRealAttribs(), pc1.getUserIntAttribs(), - pc1.getParticleComps(), pc1.getParticleiComps(), + pc1.GetRealSoANames(), pc1.GetIntSoANames(), pc1.getUserRealAttribParser(), pc1.getUserIntAttribParser(), #ifdef WARPX_QED @@ -272,7 +272,7 @@ Index filterCopyTransformParticles (DstPC& pc1, DstPC& pc2, DstTile& dst1, DstTi ParticleCreation::DefaultInitializeRuntimeAttributes(dst2, 0, 0, pc2.getUserRealAttribs(), pc2.getUserIntAttribs(), - pc2.getParticleComps(), pc2.getParticleiComps(), + pc2.GetRealSoANames(), pc2.GetIntSoANames(), pc2.getUserRealAttribParser(), pc2.getUserIntAttribParser(), #ifdef WARPX_QED diff --git a/Source/Particles/ParticleCreation/FilterCreateTransformFromFAB.H b/Source/Particles/ParticleCreation/FilterCreateTransformFromFAB.H index 424008e18a6..266faae6322 100644 --- a/Source/Particles/ParticleCreation/FilterCreateTransformFromFAB.H +++ b/Source/Particles/ParticleCreation/FilterCreateTransformFromFAB.H @@ -136,7 +136,7 @@ Index filterCreateTransformFromFAB (DstPC& pc1, DstPC& pc2, ParticleCreation::DefaultInitializeRuntimeAttributes(dst1, 0, 0, pc1.getUserRealAttribs(), pc1.getUserIntAttribs(), - pc1.getParticleComps(), pc1.getParticleiComps(), + pc1.GetRealSoANames(), pc1.GetIntSoANames(), pc1.getUserRealAttribParser(), pc1.getUserIntAttribParser(), #ifdef WARPX_QED @@ -150,7 +150,7 @@ Index filterCreateTransformFromFAB (DstPC& pc1, DstPC& pc2, ParticleCreation::DefaultInitializeRuntimeAttributes(dst2, 0, 0, pc2.getUserRealAttribs(), pc2.getUserIntAttribs(), - pc2.getParticleComps(), pc2.getParticleiComps(), + pc2.GetRealSoANames(), pc2.GetIntSoANames(), pc2.getUserRealAttribParser(), pc2.getUserIntAttribParser(), #ifdef WARPX_QED diff --git a/Source/Particles/ParticleCreation/SmartCopy.H b/Source/Particles/ParticleCreation/SmartCopy.H index e1d944e9c30..6be363e6337 100644 --- a/Source/Particles/ParticleCreation/SmartCopy.H +++ b/Source/Particles/ParticleCreation/SmartCopy.H @@ -140,10 +140,10 @@ class SmartCopyFactory public: template SmartCopyFactory (const SrcPC& src, const DstPC& dst) noexcept : - m_tag_real{getSmartCopyTag(src.getParticleComps(), dst.getParticleComps())}, - m_tag_int{getSmartCopyTag(src.getParticleiComps(), dst.getParticleiComps())}, - m_policy_real{getPolicies(dst.getParticleComps())}, - m_policy_int{getPolicies(dst.getParticleiComps())}, + m_tag_real{getSmartCopyTag(src.GetRealSoANames(), dst.GetRealSoANames())}, + m_tag_int{getSmartCopyTag(src.GetIntSoANames(), dst.GetIntSoANames())}, + m_policy_real{getPolicies(dst.GetRealSoANames())}, + m_policy_int{getPolicies(dst.GetIntSoANames())}, m_defined{true} {} diff --git a/Source/Particles/ParticleCreation/SmartCreate.H b/Source/Particles/ParticleCreation/SmartCreate.H index d93624b6433..688f1c3701f 100644 --- a/Source/Particles/ParticleCreation/SmartCreate.H +++ b/Source/Particles/ParticleCreation/SmartCreate.H @@ -97,8 +97,8 @@ class SmartCreateFactory public: template SmartCreateFactory (const PartTileData& part) noexcept: - m_policy_real{getPolicies(part.getParticleComps())}, - m_policy_int{getPolicies(part.getParticleiComps())}, + m_policy_real{getPolicies(part.GetRealSoANames())}, + m_policy_int{getPolicies(part.GetIntSoANames())}, m_defined{true} {} diff --git a/Source/Particles/ParticleCreation/SmartUtils.H b/Source/Particles/ParticleCreation/SmartUtils.H index 652a3aecd17..358c2b1a7a9 100644 --- a/Source/Particles/ParticleCreation/SmartUtils.H +++ b/Source/Particles/ParticleCreation/SmartUtils.H @@ -35,9 +35,9 @@ struct SmartCopyTag [[nodiscard]] int size () const noexcept { return static_cast(common_names.size()); } }; -PolicyVec getPolicies (const NameMap& names) noexcept; +PolicyVec getPolicies (std::vector const & names) noexcept; -SmartCopyTag getSmartCopyTag (const NameMap& src, const NameMap& dst) noexcept; +SmartCopyTag getSmartCopyTag (std::vector const & src, std::vector const & dst) noexcept; /** * \brief Sets the ids of newly created particles to the next values. diff --git a/Source/Particles/ParticleCreation/SmartUtils.cpp b/Source/Particles/ParticleCreation/SmartUtils.cpp index 7e79f58c59e..19e5bee8b97 100644 --- a/Source/Particles/ParticleCreation/SmartUtils.cpp +++ b/Source/Particles/ParticleCreation/SmartUtils.cpp @@ -13,8 +13,11 @@ #include #include -PolicyVec getPolicies (const NameMap& names) noexcept +PolicyVec getPolicies (std::vector const & names_vec) noexcept { + NameMap names; + for (auto i = 0u; i < names_vec.size(); ++i) { names.emplace(names_vec[i], i); } + std::vector h_policies; h_policies.resize(names.size()); for (const auto& kv : names) @@ -31,10 +34,16 @@ PolicyVec getPolicies (const NameMap& names) noexcept return policies; } -SmartCopyTag getSmartCopyTag (const NameMap& src, const NameMap& dst) noexcept +SmartCopyTag getSmartCopyTag (std::vector const & src_names, std::vector const & dst_names) noexcept { SmartCopyTag tag; + // We want to avoid running an NxM algorithm to find pairs, so sort the components first. + NameMap src; + NameMap dst; + for (auto i = 0u; i < src_names.size(); ++i) { src.emplace(src_names[i], i); } + for (auto i = 0u; i < dst_names.size(); ++i) { dst.emplace(dst_names[i], i); } + std::vector h_src_comps; std::vector h_dst_comps; diff --git a/Source/Particles/PhotonParticleContainer.cpp b/Source/Particles/PhotonParticleContainer.cpp index 47c426cd6ff..ad0b3364eea 100644 --- a/Source/Particles/PhotonParticleContainer.cpp +++ b/Source/Particles/PhotonParticleContainer.cpp @@ -122,7 +122,7 @@ PhotonParticleContainer::PushPX (WarpXParIter& pti, const bool local_has_breit_wheeler = has_breit_wheeler(); if (local_has_breit_wheeler) { evolve_opt = m_shr_p_bw_engine->build_evolve_functor(); - p_optical_depth_BW = pti.GetAttribs(particle_comps["opticalDepthBW"]).dataPtr() + offset; + p_optical_depth_BW = pti.GetAttribs("opticalDepthBW").dataPtr() + offset; } #endif diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 9bf24e659e0..88c9a2273fd 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -342,12 +342,12 @@ PhysicalParticleContainer::PhysicalParticleContainer (AmrCore* amr_core, int isp #ifdef WARPX_QED pp_species_name.query("do_qed_quantum_sync", m_do_qed_quantum_sync); if (m_do_qed_quantum_sync) { - NewRealComp("opticalDepthQSR"); + AddRealComp("opticalDepthQSR"); } pp_species_name.query("do_qed_breit_wheeler", m_do_qed_breit_wheeler); if (m_do_qed_breit_wheeler) { - NewRealComp("opticalDepthBW"); + AddRealComp("opticalDepthBW"); } if(m_do_qed_quantum_sync){ @@ -368,7 +368,7 @@ PhysicalParticleContainer::PhysicalParticleContainer (AmrCore* amr_core, int isp str_int_attrib_function.at(i)); m_user_int_attrib_parser.at(i) = std::make_unique( utils::parser::makeParser(str_int_attrib_function.at(i),{"x","y","z","ux","uy","uz","t"})); - NewIntComp(m_user_int_attribs.at(i)); + AddIntComp(m_user_int_attribs.at(i)); } // User-defined real attributes @@ -383,19 +383,19 @@ PhysicalParticleContainer::PhysicalParticleContainer (AmrCore* amr_core, int isp str_real_attrib_function.at(i)); m_user_real_attrib_parser.at(i) = std::make_unique( utils::parser::makeParser(str_real_attrib_function.at(i),{"x","y","z","ux","uy","uz","t"})); - NewRealComp(m_user_real_attribs.at(i)); + AddRealComp(m_user_real_attribs.at(i)); } // If old particle positions should be saved add the needed components pp_species_name.query("save_previous_position", m_save_previous_position); if (m_save_previous_position) { #if (AMREX_SPACEDIM >= 2) - NewRealComp("prev_x"); + AddRealComp("prev_x"); #endif #if defined(WARPX_DIM_3D) - NewRealComp("prev_y"); + AddRealComp("prev_y"); #endif - NewRealComp("prev_z"); + AddRealComp("prev_z"); #ifdef WARPX_DIM_RZ amrex::Abort("Saving previous particle positions not yet implemented in RZ"); #endif @@ -813,7 +813,7 @@ PhysicalParticleContainer::DefaultInitializeRuntimeAttributes ( ParticleCreation::DefaultInitializeRuntimeAttributes(pinned_tile, n_external_attr_real, n_external_attr_int, m_user_real_attribs, m_user_int_attribs, - particle_comps, particle_icomps, + GetRealSoANames(), GetIntSoANames(), amrex::GetVecOfPtrs(m_user_real_attrib_parser), amrex::GetVecOfPtrs(m_user_int_attrib_parser), #ifdef WARPX_QED @@ -1086,7 +1086,7 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int } uint64_t * AMREX_RESTRICT pa_idcpu = soa.GetIdCPUData().data() + old_size; - PlasmaParserHelper plasma_parser_helper (soa, old_size, m_user_int_attribs, m_user_real_attribs, particle_icomps, particle_comps, plasma_parser_wrapper); + PlasmaParserHelper plasma_parser_helper(soa, old_size, m_user_int_attribs, m_user_real_attribs, plasma_parser_wrapper); int** pa_user_int_data = plasma_parser_helper.getUserIntDataPtrs(); ParticleReal** pa_user_real_data = plasma_parser_helper.getUserRealDataPtrs(); amrex::ParserExecutor<7> const* user_int_parserexec_data = plasma_parser_helper.getUserIntParserExecData(); @@ -1094,11 +1094,11 @@ PhysicalParticleContainer::AddPlasma (PlasmaInjector const& plasma_injector, int int* pi = nullptr; if (do_field_ionization) { - pi = soa.GetIntData(particle_icomps["ionizationLevel"]).data() + old_size; + pi = soa.GetIntData("ionizationLevel").data() + old_size; } #ifdef WARPX_QED - const QEDHelper qed_helper(soa, old_size, particle_comps, + const QEDHelper qed_helper(soa, old_size, has_quantum_sync(), has_breit_wheeler(), m_shr_p_qs_engine, m_shr_p_bw_engine); #endif @@ -1522,7 +1522,7 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, } uint64_t * AMREX_RESTRICT pa_idcpu = soa.GetIdCPUData().data() + old_size; - PlasmaParserHelper plasma_parser_helper (soa, old_size, m_user_int_attribs, m_user_real_attribs, particle_icomps, particle_comps, plasma_parser_wrapper); + PlasmaParserHelper plasma_parser_helper(soa, old_size, m_user_int_attribs, m_user_real_attribs, plasma_parser_wrapper); int** pa_user_int_data = plasma_parser_helper.getUserIntDataPtrs(); ParticleReal** pa_user_real_data = plasma_parser_helper.getUserRealDataPtrs(); amrex::ParserExecutor<7> const* user_int_parserexec_data = plasma_parser_helper.getUserIntParserExecData(); @@ -1530,11 +1530,11 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, int* p_ion_level = nullptr; if (do_field_ionization) { - p_ion_level = soa.GetIntData(particle_icomps["ionizationLevel"]).data() + old_size; + p_ion_level = soa.GetIntData("ionizationLevel").data() + old_size; } #ifdef WARPX_QED - const QEDHelper qed_helper(soa, old_size, particle_comps, + const QEDHelper qed_helper(soa, old_size, has_quantum_sync(), has_breit_wheeler(), m_shr_p_qs_engine, m_shr_p_bw_engine); #endif @@ -1922,7 +1922,7 @@ PhysicalParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, // Deposit charge before particle push, in component 0 of MultiFab rho. const int* const AMREX_RESTRICT ion_lev = (do_field_ionization)? - pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr():nullptr; + pti.GetiAttribs("ionizationLevel").dataPtr():nullptr; amrex::MultiFab* rho = fields.get(FieldType::rho_fp, lev); DepositCharge(pti, wp, ion_lev, rho, 0, 0, @@ -2018,7 +2018,7 @@ PhysicalParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, const amrex::Real relative_time = (push_type == PushType::Explicit ? -0.5_rt * dt : 0.0_rt); const int* const AMREX_RESTRICT ion_lev = (do_field_ionization)? - pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr():nullptr; + pti.GetiAttribs("ionizationLevel").dataPtr():nullptr; // Deposit inside domains amrex::MultiFab * jx = fields.get(current_fp_string, Direction{0}, lev); @@ -2050,7 +2050,7 @@ PhysicalParticleContainer::Evolve (ablastr::fields::MultiFabRegister& fields, "Cannot deposit charge in rho component 1: only component 0 is allocated!"); const int* const AMREX_RESTRICT ion_lev = (do_field_ionization)? - pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr():nullptr; + pti.GetiAttribs("ionizationLevel").dataPtr():nullptr; DepositCharge(pti, wp, ion_lev, rho, 1, 0, np_current, thread_num, lev, lev); @@ -2424,7 +2424,7 @@ PhysicalParticleContainer::PushP (int lev, Real dt, int* AMREX_RESTRICT ion_lev = nullptr; if (do_field_ionization) { - ion_lev = pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr(); + ion_lev = pti.GetiAttribs("ionizationLevel").dataPtr(); } // Loop over the particles and update their momentum @@ -2620,7 +2620,7 @@ PhysicalParticleContainer::PushPX (WarpXParIter& pti, int* AMREX_RESTRICT ion_lev = nullptr; if (do_field_ionization) { - ion_lev = pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr() + offset; + ion_lev = pti.GetiAttribs("ionizationLevel").dataPtr() + offset; } const bool save_previous_position = m_save_previous_position; @@ -2629,12 +2629,12 @@ PhysicalParticleContainer::PushPX (WarpXParIter& pti, ParticleReal* z_old = nullptr; if (save_previous_position) { #if (AMREX_SPACEDIM >= 2) - x_old = pti.GetAttribs(particle_comps["prev_x"]).dataPtr() + offset; + x_old = pti.GetAttribs("prev_x").dataPtr() + offset; #endif #if defined(WARPX_DIM_3D) - y_old = pti.GetAttribs(particle_comps["prev_y"]).dataPtr() + offset; + y_old = pti.GetAttribs("prev_y").dataPtr() + offset; #endif - z_old = pti.GetAttribs(particle_comps["prev_z"]).dataPtr() + offset; + z_old = pti.GetAttribs("prev_z").dataPtr() + offset; amrex::ignore_unused(x_old, y_old); } @@ -2654,7 +2654,7 @@ PhysicalParticleContainer::PushPX (WarpXParIter& pti, const bool local_has_quantum_sync = has_quantum_sync(); if (local_has_quantum_sync) { evolve_opt = m_shr_p_qs_engine->build_evolve_functor(); - p_optical_depth_QSR = pti.GetAttribs(particle_comps["opticalDepthQSR"]).dataPtr() + offset; + p_optical_depth_QSR = pti.GetAttribs("opticalDepthQSR").dataPtr() + offset; } #endif @@ -2859,15 +2859,15 @@ PhysicalParticleContainer::ImplicitPushXP (WarpXParIter& pti, ParticleReal* const AMREX_RESTRICT uz = attribs[PIdx::uz].dataPtr() + offset; #if (AMREX_SPACEDIM >= 2) - ParticleReal* x_n = pti.GetAttribs(particle_comps["x_n"]).dataPtr(); + ParticleReal* x_n = pti.GetAttribs("x_n").dataPtr(); #endif #if defined(WARPX_DIM_3D) || defined(WARPX_DIM_RZ) - ParticleReal* y_n = pti.GetAttribs(particle_comps["y_n"]).dataPtr(); + ParticleReal* y_n = pti.GetAttribs("y_n").dataPtr(); #endif - ParticleReal* z_n = pti.GetAttribs(particle_comps["z_n"]).dataPtr(); - ParticleReal* ux_n = pti.GetAttribs(particle_comps["ux_n"]).dataPtr(); - ParticleReal* uy_n = pti.GetAttribs(particle_comps["uy_n"]).dataPtr(); - ParticleReal* uz_n = pti.GetAttribs(particle_comps["uz_n"]).dataPtr(); + ParticleReal* z_n = pti.GetAttribs("z_n").dataPtr(); + ParticleReal* ux_n = pti.GetAttribs("ux_n").dataPtr(); + ParticleReal* uy_n = pti.GetAttribs("uy_n").dataPtr(); + ParticleReal* uz_n = pti.GetAttribs("uz_n").dataPtr(); const int do_copy = (m_do_back_transformed_particles && (a_dt_type!=DtType::SecondHalf) ); CopyParticleAttribs copyAttribs; @@ -2877,7 +2877,7 @@ PhysicalParticleContainer::ImplicitPushXP (WarpXParIter& pti, int* AMREX_RESTRICT ion_lev = nullptr; if (do_field_ionization) { - ion_lev = pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr() + offset; + ion_lev = pti.GetiAttribs("ionizationLevel").dataPtr() + offset; } // Loop over the particles and update their momentum @@ -2896,7 +2896,7 @@ PhysicalParticleContainer::ImplicitPushXP (WarpXParIter& pti, const bool local_has_quantum_sync = has_quantum_sync(); if (local_has_quantum_sync) { evolve_opt = m_shr_p_qs_engine->build_evolve_functor(); - p_optical_depth_QSR = pti.GetAttribs(particle_comps["opticalDepthQSR"]).dataPtr() + offset; + p_optical_depth_QSR = pti.GetAttribs("opticalDepthQSR").dataPtr() + offset; } #endif @@ -3110,7 +3110,7 @@ PhysicalParticleContainer::InitIonizationModule () physical_element == "H" || !do_adk_correction, "Correction to ADK by Zhang et al., PRA 90, 043410 (2014) only works with Hydrogen"); // Add runtime integer component for ionization level - NewIntComp("ionizationLevel"); + AddIntComp("ionizationLevel"); // Get atomic number and ionization energies from file const int ion_element_id = utils::physics::ion_map_ids.at(physical_element); ion_atomic_number = utils::physics::ion_atomic_numbers[ion_element_id]; @@ -3193,7 +3193,7 @@ PhysicalParticleContainer::getIonizationFunc (const WarpXParIter& pti, adk_exp_prefactor.dataPtr(), adk_power.dataPtr(), adk_correction_factors.dataPtr(), - particle_icomps["ionizationLevel"], + GetIntCompIndex("ionizationLevel"), ion_atomic_number, do_adk_correction}; } @@ -3299,14 +3299,14 @@ PhotonEmissionFilterFunc PhysicalParticleContainer::getPhotonEmissionFilterFunc () { WARPX_PROFILE("PhysicalParticleContainer::getPhotonEmissionFunc()"); - return PhotonEmissionFilterFunc{particle_runtime_comps["opticalDepthQSR"]}; + return PhotonEmissionFilterFunc{GetRealCompIndex("opticalDepthQSR") - NArrayReal}; } PairGenerationFilterFunc PhysicalParticleContainer::getPairGenerationFilterFunc () { WARPX_PROFILE("PhysicalParticleContainer::getPairGenerationFunc()"); - return PairGenerationFilterFunc{particle_runtime_comps["opticalDepthBW"]}; + return PairGenerationFilterFunc{GetRealCompIndex("opticalDepthBW") - NArrayReal}; } #endif diff --git a/Source/Particles/PinnedMemoryParticleContainer.H b/Source/Particles/PinnedMemoryParticleContainer.H index 402c621eb9a..b9fc4bbe79e 100644 --- a/Source/Particles/PinnedMemoryParticleContainer.H +++ b/Source/Particles/PinnedMemoryParticleContainer.H @@ -1,8 +1,8 @@ #ifndef WARPX_PinnedMemoryParticleContainer_H_ #define WARPX_PinnedMemoryParticleContainer_H_ -#include "NamedComponentParticleContainer.H" +#include "WarpXParticleContainer.H" -using PinnedMemoryParticleContainer = NamedComponentParticleContainer; +using PinnedMemoryParticleContainer = amrex::ParticleContainerPureSoA; #endif //WARPX_PinnedMemoryParticleContainer_H_ diff --git a/Source/Particles/Pusher/GetAndSetPosition.H b/Source/Particles/Pusher/GetAndSetPosition.H index ab06fe3d6cd..d2a223c57d8 100644 --- a/Source/Particles/Pusher/GetAndSetPosition.H +++ b/Source/Particles/Pusher/GetAndSetPosition.H @@ -9,7 +9,6 @@ #define WARPX_PARTICLES_PUSHER_GETANDSETPOSITION_H_ #include "Particles/WarpXParticleContainer.H" -#include "Particles/NamedComponentParticleContainer.H" #include #include diff --git a/Source/Particles/RigidInjectedParticleContainer.cpp b/Source/Particles/RigidInjectedParticleContainer.cpp index 5d8b0111825..420d7599ecb 100644 --- a/Source/Particles/RigidInjectedParticleContainer.cpp +++ b/Source/Particles/RigidInjectedParticleContainer.cpp @@ -345,7 +345,7 @@ RigidInjectedParticleContainer::PushP (int lev, Real dt, int* AMREX_RESTRICT ion_lev = nullptr; if (do_field_ionization) { - ion_lev = pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr(); + ion_lev = pti.GetiAttribs("ionizationLevel").dataPtr(); } // Save the position and momenta, making copies diff --git a/Source/Particles/WarpXParticleContainer.H b/Source/Particles/WarpXParticleContainer.H index 9c316b110ee..a4581d4415d 100644 --- a/Source/Particles/WarpXParticleContainer.H +++ b/Source/Particles/WarpXParticleContainer.H @@ -23,7 +23,6 @@ # include "ElementaryProcess/QEDInternals/QuantumSyncEngineWrapper_fwd.H" #endif #include "MultiParticleContainer_fwd.H" -#include "NamedComponentParticleContainer.H" #include @@ -49,6 +48,55 @@ #include #include +/** Real Particle Attributes stored in amrex::ParticleContainer's struct of array + */ +struct PIdx +{ + enum { +#if !defined (WARPX_DIM_1D_Z) + x, +#endif +#if defined (WARPX_DIM_3D) + y, +#endif + z, + w, ///< weight + ux, uy, uz, +#ifdef WARPX_DIM_RZ + theta, ///< RZ needs all three position components +#endif + nattribs ///< number of compile-time attributes + }; + + //! component names + static constexpr auto names = { +#if !defined (WARPX_DIM_1D_Z) + "x", +#endif +#if defined (WARPX_DIM_3D) + "y", +#endif + "z", + "w", + "ux", + "uy", + "uz", +#ifdef WARPX_DIM_RZ + "theta" +#endif + }; + + static_assert(names.size() == nattribs); +}; + +struct IntIdx { + enum + { + nattribs ///< the number of attributes above (always last) + }; + + static constexpr std::initializer_list names = {}; +}; class WarpXParIter : public amrex::ParIterSoA @@ -80,10 +128,35 @@ public: return GetStructOfArrays().GetRealData(comp); } + [[nodiscard]] const IntVector& GetiAttribs (int comp) const + { + return GetStructOfArrays().GetIntData(comp); + } + [[nodiscard]] IntVector& GetiAttribs (int comp) { return GetStructOfArrays().GetIntData(comp); } + + [[nodiscard]] const RealVector& GetAttribs (const std::string& name) const + { + return GetStructOfArrays().GetRealData(name); + } + + [[nodiscard]] RealVector& GetAttribs (const std::string& name) + { + return GetStructOfArrays().GetRealData(name); + } + + [[nodiscard]] const IntVector& GetiAttribs (const std::string& name) const + { + return GetStructOfArrays().GetIntData(name); + } + + [[nodiscard]] IntVector& GetiAttribs (const std::string& name) + { + return GetStructOfArrays().GetIntData(name); + } }; /** @@ -109,7 +182,7 @@ public: * derived classes, e.g., Evolve) or actual functions (e.g. CurrentDeposition). */ class WarpXParticleContainer - : public NamedComponentParticleContainer + : public amrex::ParticleContainerPureSoA { public: friend MultiParticleContainer; diff --git a/Source/Particles/WarpXParticleContainer.cpp b/Source/Particles/WarpXParticleContainer.cpp index 21b76485907..8e91093d95b 100644 --- a/Source/Particles/WarpXParticleContainer.cpp +++ b/Source/Particles/WarpXParticleContainer.cpp @@ -89,10 +89,14 @@ WarpXParIter::WarpXParIter (ContainerType& pc, int level, MFItInfo& info) } WarpXParticleContainer::WarpXParticleContainer (AmrCore* amr_core, int ispecies) - : NamedComponentParticleContainer(amr_core->GetParGDB()) + : amrex::ParticleContainerPureSoA(amr_core->GetParGDB()) , species_id(ispecies) { SetParticleSize(); + SetSoACompileTimeNames( + {PIdx::names.begin(), PIdx::names.end()}, + {IntIdx::names.begin(), IntIdx::names.end()} + ); ReadParameters(); // Reading the external fields needs to be here since ReadParameters @@ -627,22 +631,22 @@ WarpXParticleContainer::DepositCurrent (WarpXParIter& pti, } else if (push_type == PushType::Implicit) { #if (AMREX_SPACEDIM >= 2) - auto& xp_n = pti.GetAttribs(particle_comps["x_n"]); + auto& xp_n = pti.GetAttribs("x_n"); const ParticleReal* xp_n_data = xp_n.dataPtr() + offset; #else const ParticleReal* xp_n_data = nullptr; #endif #if defined(WARPX_DIM_3D) || defined(WARPX_DIM_RZ) - auto& yp_n = pti.GetAttribs(particle_comps["y_n"]); + auto& yp_n = pti.GetAttribs("y_n"); const ParticleReal* yp_n_data = yp_n.dataPtr() + offset; #else const ParticleReal* yp_n_data = nullptr; #endif - auto& zp_n = pti.GetAttribs(particle_comps["z_n"]); + auto& zp_n = pti.GetAttribs("z_n"); const ParticleReal* zp_n_data = zp_n.dataPtr() + offset; - auto& uxp_n = pti.GetAttribs(particle_comps["ux_n"]); - auto& uyp_n = pti.GetAttribs(particle_comps["uy_n"]); - auto& uzp_n = pti.GetAttribs(particle_comps["uz_n"]); + auto& uxp_n = pti.GetAttribs("ux_n"); + auto& uyp_n = pti.GetAttribs("uy_n"); + auto& uzp_n = pti.GetAttribs("uz_n"); if (WarpX::nox == 1){ doChargeConservingDepositionShapeNImplicit<1>( xp_n_data, yp_n_data, zp_n_data, @@ -680,22 +684,22 @@ WarpXParticleContainer::DepositCurrent (WarpXParIter& pti, } else if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Villasenor) { if (push_type == PushType::Implicit) { #if (AMREX_SPACEDIM >= 2) - auto& xp_n = pti.GetAttribs(particle_comps["x_n"]); + auto& xp_n = pti.GetAttribs("x_n"); const ParticleReal* xp_n_data = xp_n.dataPtr() + offset; #else const ParticleReal* xp_n_data = nullptr; #endif #if defined(WARPX_DIM_3D) || defined(WARPX_DIM_RZ) - auto& yp_n = pti.GetAttribs(particle_comps["y_n"]); + auto& yp_n = pti.GetAttribs("y_n"); const ParticleReal* yp_n_data = yp_n.dataPtr() + offset; #else const ParticleReal* yp_n_data = nullptr; #endif - auto& zp_n = pti.GetAttribs(particle_comps["z_n"]); + auto& zp_n = pti.GetAttribs("z_n"); const ParticleReal* zp_n_data = zp_n.dataPtr() + offset; - auto& uxp_n = pti.GetAttribs(particle_comps["ux_n"]); - auto& uyp_n = pti.GetAttribs(particle_comps["uy_n"]); - auto& uzp_n = pti.GetAttribs(particle_comps["uz_n"]); + auto& uxp_n = pti.GetAttribs("ux_n"); + auto& uyp_n = pti.GetAttribs("uy_n"); + auto& uzp_n = pti.GetAttribs("uz_n"); if (WarpX::nox == 1){ doVillasenorDepositionShapeNImplicit<1>( xp_n_data, yp_n_data, zp_n_data, @@ -790,9 +794,9 @@ WarpXParticleContainer::DepositCurrent (WarpXParIter& pti, xyzmin, lo, q, WarpX::n_rz_azimuthal_modes); } } else if (push_type == PushType::Implicit) { - auto& uxp_n = pti.GetAttribs(particle_comps["ux_n"]); - auto& uyp_n = pti.GetAttribs(particle_comps["uy_n"]); - auto& uzp_n = pti.GetAttribs(particle_comps["uz_n"]); + auto& uxp_n = pti.GetAttribs("ux_n"); + auto& uyp_n = pti.GetAttribs("uy_n"); + auto& uzp_n = pti.GetAttribs("uz_n"); if (WarpX::nox == 1){ doDepositionShapeNImplicit<1>( GetPosition, wp.dataPtr() + offset, @@ -869,7 +873,7 @@ WarpXParticleContainer::DepositCurrent ( int* AMREX_RESTRICT ion_lev = nullptr; if (do_field_ionization) { - ion_lev = pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr(); + ion_lev = pti.GetiAttribs("ionizationLevel").dataPtr(); } DepositCurrent(pti, wp, uxp, uyp, uzp, ion_lev, @@ -1262,7 +1266,7 @@ WarpXParticleContainer::DepositCharge (amrex::MultiFab* rho, int* AMREX_RESTRICT ion_lev = nullptr; if (do_field_ionization) { - ion_lev = pti.GetiAttribs(particle_icomps["ionizationLevel"]).dataPtr(); + ion_lev = pti.GetiAttribs("ionizationLevel").dataPtr(); } DepositCharge(pti, wp, ion_lev, rho, icomp, 0, np, thread_num, lev, lev); @@ -1546,8 +1550,16 @@ WarpXParticleContainer::PushX (int lev, amrex::Real dt) // without runtime component). void WarpXParticleContainer::defineAllParticleTiles () noexcept { - // Call the parent class's method - NamedComponentParticleContainer::defineAllParticleTiles(); + for (int lev = 0; lev <= finestLevel(); ++lev) + { + for (auto mfi = MakeMFIter(lev); mfi.isValid(); ++mfi) + { + const int grid_id = mfi.index(); + const int tile_id = mfi.LocalTileIndex(); + DefineAndReturnParticleTile(lev, grid_id, tile_id); + } + } + // Resize the tmp_particle_data (no present in parent class) tmp_particle_data.resize(finestLevel()+1); @@ -1570,7 +1582,7 @@ WarpXParticleContainer::particlePostLocate(ParticleType& p, { if (not do_splitting) { return; } - // Tag particle if goes to higher level. + // Tag particle if it goes to a higher level. // It will be split later in the loop if (pld.m_lev == lev+1 and p.id() != amrex::LongParticleIds::NoSplitParticleID diff --git a/Source/Python/Particles/CMakeLists.txt b/Source/Python/Particles/CMakeLists.txt index eed1bb07c74..6b7754fdf2d 100644 --- a/Source/Python/Particles/CMakeLists.txt +++ b/Source/Python/Particles/CMakeLists.txt @@ -10,7 +10,6 @@ foreach(D IN LISTS WarpX_DIMS) # pybind11 ParticleBoundaryBuffer.cpp MultiParticleContainer.cpp - PinnedMemoryParticleContainer.cpp WarpXParticleContainer.cpp ) endif() diff --git a/Source/Python/Particles/PinnedMemoryParticleContainer.cpp b/Source/Python/Particles/PinnedMemoryParticleContainer.cpp deleted file mode 100644 index 21dd6a9d364..00000000000 --- a/Source/Python/Particles/PinnedMemoryParticleContainer.cpp +++ /dev/null @@ -1,31 +0,0 @@ -/* Copyright 2021-2023 The WarpX Community - * - * Authors: Axel Huebl, Remi Lehe, Roelof Groenewald - * License: BSD-3-Clause-LBNL - */ - -#include "Python/pyWarpX.H" - -#include - - -void init_PinnedMemoryParticleContainer (py::module& m) -{ - py::class_< - PinnedMemoryParticleContainer, - amrex::ParticleContainerPureSoA - > pmpc (m, "PinnedMemoryParticleContainer"); - pmpc - .def_property_readonly("real_comp_names", - [](PinnedMemoryParticleContainer& pc) - { - return pc.getParticleComps(); - } - ) - .def_property_readonly("int_comp_names", - [](PinnedMemoryParticleContainer& pc) - { - return pc.getParticleiComps(); - } - ); -} diff --git a/Source/Python/Particles/WarpXParticleContainer.cpp b/Source/Python/Particles/WarpXParticleContainer.cpp index 7bf02aab62b..73e0a8b0db0 100644 --- a/Source/Python/Particles/WarpXParticleContainer.cpp +++ b/Source/Python/Particles/WarpXParticleContainer.cpp @@ -30,7 +30,7 @@ void init_WarpXParticleContainer (py::module& m) > wpc (m, "WarpXParticleContainer"); wpc .def("add_real_comp", - [](WarpXParticleContainer& pc, const std::string& name, bool comm) { pc.NewRealComp(name, comm); }, + [](WarpXParticleContainer& pc, const std::string& name, bool comm) { pc.AddRealComp(name, comm); }, py::arg("name"), py::arg("comm") ) .def("add_n_particles", @@ -85,19 +85,19 @@ void init_WarpXParticleContainer (py::module& m) py::arg("nattr_int"), py::arg("attr_int"), py::arg("uniqueparticles"), py::arg("id")=-1 ) - .def("get_comp_index", + .def("get_comp_index", // deprecated: use pyAMReX get_real_comp_index [](WarpXParticleContainer& pc, std::string comp_name) { - auto particle_comps = pc.getParticleComps(); - return particle_comps.at(comp_name); + py::print("get_comp_index is deprecated. Use get_real_comp_index instead."); + return pc.GetRealCompIndex(comp_name); }, py::arg("comp_name") ) - .def("get_icomp_index", + .def("get_icomp_index", // deprecated: use pyAMReX get_int_comp_index [](WarpXParticleContainer& pc, std::string comp_name) { - auto particle_comps = pc.getParticleiComps(); - return particle_comps.at(comp_name); + py::print("get_icomp_index is deprecated. Use get_int_comp_index instead."); + return pc.GetIntCompIndex(comp_name); }, py::arg("comp_name") ) diff --git a/Source/Python/pyWarpX.cpp b/Source/Python/pyWarpX.cpp index e128599abd0..45c4b48614b 100644 --- a/Source/Python/pyWarpX.cpp +++ b/Source/Python/pyWarpX.cpp @@ -34,7 +34,6 @@ void init_BoundaryBufferParIter (py::module&); void init_MultiParticleContainer (py::module&); void init_MultiFabRegister (py::module&); void init_ParticleBoundaryBuffer (py::module&); -void init_PinnedMemoryParticleContainer (py::module&); void init_WarpXParIter (py::module&); void init_WarpXParticleContainer (py::module&); void init_WarpX(py::module&); @@ -61,7 +60,6 @@ PYBIND11_MODULE(PYWARPX_MODULE_NAME, m) { // note: order from parent to child classes init_MultiFabRegister(m); - init_PinnedMemoryParticleContainer(m); init_WarpXParticleContainer(m); init_WarpXParIter(m); init_BoundaryBufferParIter(m); From a995f77c60c8e2fc61d21ecd0ad897e28f7c720d Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 10 Feb 2025 16:12:30 -0800 Subject: [PATCH 221/278] Doc: New APL on Magnetic Reconnection (#5646) **Magnetic Reconnection: An Alternative Explanation of Radio Emission in Galaxy Clusters** by Subham Ghosh and Pallavi Bhat was just published. https://10.3847/2041-8213/ad9f2d --------- Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- Docs/source/highlights.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index 2e8eeffbef2..53a176f35f2 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -154,6 +154,11 @@ High Energy Astrophysical Plasma Physics Scientific works in astrophysical plasma modeling. +#. Ghosh S, Bhat P. + **Magnetic Reconnection: An Alternative Explanation of Radio Emission in Galaxy Clusters**. + The Astrophysical Journal Letters **979** 1, 2025. + `DOI:10.3847/2041-8213/ad9f2d `__ + #. Klion H, Jambunathan R, Rowan ME, Yang E, Willcox D, Vay J-L, Lehe R, Myers A, Huebl A, Zhang W. **Particle-in-Cell simulations of relativistic magnetic reconnection with advanced Maxwell solver algorithms**. The Astrophysical Journal **952** 8, 2023. From ee15a972438c6e1ea8ec236f8e289ec6ca248415 Mon Sep 17 00:00:00 2001 From: Marco Garten Date: Mon, 10 Feb 2025 16:12:58 -0800 Subject: [PATCH 222/278] Update highlights for Ma et al. PRAB oblique laser in RZ (#5653) Updated highlights in WarpX docs for 20205 PRAB article describing how to simulate oblique laser pulses in quasicylindrical geometry using WarpX. --- Docs/source/highlights.rst | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index 53a176f35f2..c7baca48f76 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -14,7 +14,12 @@ Plasma-Based Acceleration Scientific works in laser-plasma and beam-plasma acceleration. -#. Shrock JE, Rockafellow E, Miao B, Le M, Hollinger RC, Wang S, Gonsalves AJ, Picksley A, Rocca JJ, and Milchberg HM +#. Ma M, Zeng M, Wang J, Lu G, Yan W, Chen L, and Li D. + **Particle-in-cell simulation of laser wakefield accelerators with oblique lasers in quasicylindrical geometry**. + Phys. Rev. Accel. Beams **28**, 021301, 2025 + `DOI:10.1103/PhysRevAccelBeams.28.021301 `__ + +#. Shrock JE, Rockafellow E, Miao B, Le M, Hollinger RC, Wang S, Gonsalves AJ, Picksley A, Rocca JJ, and Milchberg HM. **Guided Mode Evolution and Ionization Injection in Meter-Scale Multi-GeV Laser Wakefield Accelerators**. Phys. Rev. Lett. **133**, 045002, 2024 `DOI:10.1103/PhysRevLett.133.045002 `__ From e0421a1cebbd69be6593145cd1e713000ec5ae46 Mon Sep 17 00:00:00 2001 From: Revathi Jambunathan <41089244+RevathiJambunathan@users.noreply.github.com> Date: Mon, 10 Feb 2025 16:13:49 -0800 Subject: [PATCH 223/278] Doc : MR paper highlight (#5651) Co-authored-by: Remi Lehe --- Docs/source/highlights.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index c7baca48f76..b40ed16e945 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -159,6 +159,11 @@ High Energy Astrophysical Plasma Physics Scientific works in astrophysical plasma modeling. +#. Jambunathan R, Jones H, Corrales L, Klion H, Roward ME, Myers A, Zhang W, Vay J-L. + **Application of mesh refinement to relativistic magnetic reconnection**. + Physics of Plasmas ***32*** 1, 2025 + `DOI:10.1063/5.0233583 `__ + #. Ghosh S, Bhat P. **Magnetic Reconnection: An Alternative Explanation of Radio Emission in Galaxy Clusters**. The Astrophysical Journal Letters **979** 1, 2025. From 8eab0c9c227a4b7f0cd0f1fde2a2246c6b5f03c5 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Tue, 11 Feb 2025 01:31:56 +0100 Subject: [PATCH 224/278] Move Fornberg coefficients calculations from WarpX to ablastr (#5619) The calculation of Fornberg stencil coefficients is rather general, and it can be shared with other projects of the BLAST family. Therefore, this PR moves the responsible functions into `ablastr`. Specifically, the PR does the following: - 2 new files (`FiniteDifference.H` and `FiniteDifference.cpp`) are created under `ablastr/math` (`CMakeLists.txt` and `Make.package` accordingly) - the static method of the WarpX class `getFornbergStencilCoefficients` and the `ReorderFornbergCoefficients` function (originally defined in an anonymous namespace in `WarpX.cpp`) are moved to these new files, inside the namespace `ablastr::math` - the two methods are minimally adapted (e.g., `AMREX_ALWAYS_ASSERT_WITH_MESSAGE` becomes `ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE`) - `WarpX.cpp` and `SpectralKSpace.cpp` (where the aforementioned functions were called) are updated Note that with this PR `SpectralKSpace.cpp` does not need anymore to include the heavy `WarpX.H` header. --- .../SpectralSolver/SpectralKSpace.cpp | 11 ++- Source/WarpX.H | 10 --- Source/WarpX.cpp | 79 ++----------------- Source/ablastr/math/CMakeLists.txt | 8 ++ Source/ablastr/math/FiniteDifference.H | 44 +++++++++++ Source/ablastr/math/FiniteDifference.cpp | 77 ++++++++++++++++++ Source/ablastr/math/Make.package | 6 +- 7 files changed, 147 insertions(+), 88 deletions(-) create mode 100644 Source/ablastr/math/FiniteDifference.H create mode 100644 Source/ablastr/math/FiniteDifference.cpp diff --git a/Source/FieldSolver/SpectralSolver/SpectralKSpace.cpp b/Source/FieldSolver/SpectralSolver/SpectralKSpace.cpp index 94bd384f265..adf7fff775d 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralKSpace.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralKSpace.cpp @@ -7,10 +7,12 @@ */ #include "SpectralKSpace.H" -#include "WarpX.H" #include "Utils/TextMsg.H" #include "Utils/WarpXConst.H" +#include +#include + #include #include #include @@ -211,7 +213,8 @@ SpectralKSpace::getModifiedKComponent (const DistributionMapping& dm, } else { // Compute real-space stencil coefficients - Vector h_stencil_coef = WarpX::getFornbergStencilCoefficients(n_order, grid_type); + Vector h_stencil_coef = + ablastr::math::getFornbergStencilCoefficients(n_order, grid_type); Gpu::DeviceVector d_stencil_coef(h_stencil_coef.size()); Gpu::copyAsync(Gpu::hostToDevice, h_stencil_coef.begin(), h_stencil_coef.end(), d_stencil_coef.begin()); @@ -237,7 +240,7 @@ SpectralKSpace::getModifiedKComponent (const DistributionMapping& dm, { p_modified_k[i] = 0; for (int n=0; n getFornbergStencilCoefficients (int n_order, ablastr::utils::enums::GridType a_grid_type); - // Device vectors of stencil coefficients used for finite-order centering of fields amrex::Gpu::DeviceVector device_field_centering_stencil_coeffs_x; amrex::Gpu::DeviceVector device_field_centering_stencil_coeffs_y; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index a1eac8d6080..128e22e2fe3 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -50,6 +50,7 @@ #include "FieldSolver/ImplicitSolvers/ImplicitSolverLibrary.H" +#include #include #include @@ -199,29 +200,6 @@ namespace std::any_of(field_boundary_hi.begin(), field_boundary_hi.end(), is_pml); return is_any_pml; } - - /** - * \brief Re-orders the Fornberg coefficients so that they can be used more conveniently for - * finite-order centering operations. For example, for finite-order centering of order 6, - * the Fornberg coefficients \c (c_0,c_1,c_2) are re-ordered as \c (c_2,c_1,c_0,c_0,c_1,c_2). - * - * \param[in,out] ordered_coeffs host vector where the re-ordered Fornberg coefficients will be stored - * \param[in] unordered_coeffs host vector storing the original sequence of Fornberg coefficients - * \param[in] order order of the finite-order centering along a given direction - */ - void ReorderFornbergCoefficients ( - amrex::Vector& ordered_coeffs, - const amrex::Vector& unordered_coeffs, - const int order) - { - const int n = order / 2; - for (int i = 0; i < n; i++) { - ordered_coeffs[i] = unordered_coeffs[n-1-i]; - } - for (int i = n; i < order; i++) { - ordered_coeffs[i] = unordered_coeffs[i-n]; - } - } } void WarpX::MakeWarpX () @@ -3196,49 +3174,6 @@ WarpX::BuildBufferMasksInBox ( const amrex::Box tbx, amrex::IArrayBox &buffer_ma }); } -amrex::Vector WarpX::getFornbergStencilCoefficients (const int n_order, ablastr::utils::enums::GridType a_grid_type) -{ - AMREX_ALWAYS_ASSERT_WITH_MESSAGE(n_order % 2 == 0, "n_order must be even"); - - const int m = n_order / 2; - amrex::Vector coeffs; - coeffs.resize(m); - - // There are closed-form formula for these coefficients, but they result in - // an overflow when evaluated numerically. One way to avoid the overflow is - // to calculate the coefficients by recurrence. - - // Coefficients for collocated (nodal) finite-difference approximation - if (a_grid_type == GridType::Collocated) - { - // First coefficient - coeffs.at(0) = m * 2._rt / (m+1); - // Other coefficients by recurrence - for (int n = 1; n < m; n++) - { - coeffs.at(n) = - (m-n) * 1._rt / (m+n+1) * coeffs.at(n-1); - } - } - // Coefficients for staggered finite-difference approximation - else - { - Real prod = 1.; - for (int k = 1; k < m+1; k++) - { - prod *= (m + k) / (4._rt * k); - } - // First coefficient - coeffs.at(0) = 4_rt * m * prod * prod; - // Other coefficients by recurrence - for (int n = 1; n < m; n++) - { - coeffs.at(n) = - ((2_rt*n-1) * (m-n)) * 1._rt / ((2_rt*n+1) * (m+n)) * coeffs.at(n-1); - } - } - - return coeffs; -} - void WarpX::AllocateCenteringCoefficients (amrex::Gpu::DeviceVector& device_centering_stencil_coeffs_x, amrex::Gpu::DeviceVector& device_centering_stencil_coeffs_y, amrex::Gpu::DeviceVector& device_centering_stencil_coeffs_z, @@ -3257,9 +3192,9 @@ void WarpX::AllocateCenteringCoefficients (amrex::Gpu::DeviceVector amrex::Vector host_centering_stencil_coeffs_y; amrex::Vector host_centering_stencil_coeffs_z; - Fornberg_stencil_coeffs_x = getFornbergStencilCoefficients(centering_nox, a_grid_type); - Fornberg_stencil_coeffs_y = getFornbergStencilCoefficients(centering_noy, a_grid_type); - Fornberg_stencil_coeffs_z = getFornbergStencilCoefficients(centering_noz, a_grid_type); + Fornberg_stencil_coeffs_x = ablastr::math::getFornbergStencilCoefficients(centering_nox, a_grid_type); + Fornberg_stencil_coeffs_y = ablastr::math::getFornbergStencilCoefficients(centering_noy, a_grid_type); + Fornberg_stencil_coeffs_z = ablastr::math::getFornbergStencilCoefficients(centering_noz, a_grid_type); host_centering_stencil_coeffs_x.resize(centering_nox); host_centering_stencil_coeffs_y.resize(centering_noy); @@ -3267,17 +3202,17 @@ void WarpX::AllocateCenteringCoefficients (amrex::Gpu::DeviceVector // Re-order Fornberg stencil coefficients: // example for order 6: (c_0,c_1,c_2) becomes (c_2,c_1,c_0,c_0,c_1,c_2) - ::ReorderFornbergCoefficients( + ablastr::math::ReorderFornbergCoefficients( host_centering_stencil_coeffs_x, Fornberg_stencil_coeffs_x, centering_nox ); - ::ReorderFornbergCoefficients( + ablastr::math::ReorderFornbergCoefficients( host_centering_stencil_coeffs_y, Fornberg_stencil_coeffs_y, centering_noy ); - ::ReorderFornbergCoefficients( + ablastr::math::ReorderFornbergCoefficients( host_centering_stencil_coeffs_z, Fornberg_stencil_coeffs_z, centering_noz diff --git a/Source/ablastr/math/CMakeLists.txt b/Source/ablastr/math/CMakeLists.txt index 9093da83ae1..0ad3fe80b87 100644 --- a/Source/ablastr/math/CMakeLists.txt +++ b/Source/ablastr/math/CMakeLists.txt @@ -1 +1,9 @@ +foreach(D IN LISTS WarpX_DIMS) + warpx_set_suffix_dims(SD ${D}) + target_sources(ablastr_${SD} + PRIVATE + FiniteDifference.cpp + ) +endforeach() + add_subdirectory(fft) diff --git a/Source/ablastr/math/FiniteDifference.H b/Source/ablastr/math/FiniteDifference.H new file mode 100644 index 00000000000..8761318eb81 --- /dev/null +++ b/Source/ablastr/math/FiniteDifference.H @@ -0,0 +1,44 @@ +/* Copyright 2021-2025 Edoardo Zoni, Luca Fedeli + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ +#ifndef ABLASTR_MATH_FINITE_DIFFERENCE_H_ +#define ABLASTR_MATH_FINITE_DIFFERENCE_H_ + +#include "ablastr/utils/Enums.H" + +#include +#include + +namespace ablastr::math +{ + /** + * \brief Returns an array of coefficients (Fornberg coefficients), corresponding + * to the weight of each point in a finite-difference approximation of a derivative + * (up to order \c n_order). + * + * \param[in] n_order order of the finite-difference approximation + * \param[in] a_grid_type type of grid (collocated or not) + */ + [[nodiscard]] amrex::Vector + getFornbergStencilCoefficients ( + int n_order, ablastr::utils::enums::GridType a_grid_type); + + /** + * \brief Re-orders the Fornberg coefficients so that they can be used more conveniently for + * finite-order centering operations. For example, for finite-order centering of order 6, + * the Fornberg coefficients \c (c_0,c_1,c_2) are re-ordered as \c (c_2,c_1,c_0,c_0,c_1,c_2). + * + * \param[in,out] ordered_coeffs host vector where the re-ordered Fornberg coefficients will be stored + * \param[in] unordered_coeffs host vector storing the original sequence of Fornberg coefficients + * \param[in] order order of the finite-order centering along a given direction + */ + void + ReorderFornbergCoefficients ( + amrex::Vector& ordered_coeffs, + const amrex::Vector& unordered_coeffs, int order); +} + +#endif //ABLASTR_MATH_FINITE_DIFFERENCE_H_ diff --git a/Source/ablastr/math/FiniteDifference.cpp b/Source/ablastr/math/FiniteDifference.cpp new file mode 100644 index 00000000000..85d0b332131 --- /dev/null +++ b/Source/ablastr/math/FiniteDifference.cpp @@ -0,0 +1,77 @@ +/* Copyright 2021-2025 Edoardo Zoni, Luca Fedeli + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#include "FiniteDifference.H" + +#include "ablastr/utils/TextMsg.H" + +using namespace ablastr::utils::enums; +using namespace amrex; + +namespace ablastr::math +{ + + amrex::Vector + getFornbergStencilCoefficients (const int n_order, GridType a_grid_type) + { + ABLASTR_ALWAYS_ASSERT_WITH_MESSAGE(n_order % 2 == 0, "n_order must be even"); + + const int m = n_order / 2; + amrex::Vector coeffs; + coeffs.resize(m); + + // There are closed-form formula for these coefficients, but they result in + // an overflow when evaluated numerically. One way to avoid the overflow is + // to calculate the coefficients by recurrence. + + // Coefficients for collocated (nodal) finite-difference approximation + if (a_grid_type == GridType::Collocated) + { + // First coefficient + coeffs.at(0) = m * 2._rt / (m+1); + // Other coefficients by recurrence + for (int n = 1; n < m; n++) + { + coeffs.at(n) = - (m-n) * 1._rt / (m+n+1) * coeffs.at(n-1); + } + } + // Coefficients for staggered finite-difference approximation + else + { + amrex::Real prod = 1.; + for (int k = 1; k < m+1; k++) + { + prod *= (m + k) / (4._rt * k); + } + // First coefficient + coeffs.at(0) = 4_rt * m * prod * prod; + // Other coefficients by recurrence + for (int n = 1; n < m; n++) + { + coeffs.at(n) = - ((2_rt*n-1) * (m-n)) * 1._rt / ((2_rt*n+1) * (m+n)) * coeffs.at(n-1); + } + } + + return coeffs; + } + + void + ReorderFornbergCoefficients ( + amrex::Vector& ordered_coeffs, + const amrex::Vector& unordered_coeffs, + const int order) + { + const int n = order / 2; + for (int i = 0; i < n; i++) { + ordered_coeffs[i] = unordered_coeffs[n-1-i]; + } + for (int i = n; i < order; i++) { + ordered_coeffs[i] = unordered_coeffs[i-n]; + } + } + +} diff --git a/Source/ablastr/math/Make.package b/Source/ablastr/math/Make.package index a0e95b11225..5e3fd22dc81 100644 --- a/Source/ablastr/math/Make.package +++ b/Source/ablastr/math/Make.package @@ -1,3 +1,5 @@ -include $(WARPX_HOME)/Source/ablastr/math/fft/Make.package +CEXE_sources += FiniteDifference.cpp + +VPATH_LOCATIONS += $(WARPX_HOME)/Source/ablastr/math -VPATH_LOCATIONS += $(WARPX_HOME)/Source/ablastr +include $(WARPX_HOME)/Source/ablastr/math/fft/Make.package From 5f32399069f2acc3c528983e237b87537844e4e2 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Tue, 11 Feb 2025 02:21:49 +0000 Subject: [PATCH 225/278] [pre-commit.ci] pre-commit autoupdate (#5652) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.4 → v0.9.6](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.4...v0.9.6) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 577f0ffc1f0..e113fa4c8e5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.4 + rev: v0.9.6 hooks: # Run the linter - id: ruff From 879caeca10d6105d515cb45be604ed035de71ae1 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Tue, 11 Feb 2025 05:56:59 +0100 Subject: [PATCH 226/278] WarpX class : em_solver_medium no longer a static variable (#5642) This PR turns the static variable `em_solver_medium` of the WarpX class into a private non-static member variable : `m_em_solver_medium` . This is done with the aim of reducing the usage of static variables in WarpX. --- Source/Evolve/WarpXEvolve.cpp | 4 ++-- Source/Initialization/WarpXInitData.cpp | 10 +++++----- Source/Utils/WarpXMovingWindow.cpp | 2 +- Source/WarpX.H | 5 +++-- Source/WarpX.cpp | 8 ++++---- 5 files changed, 15 insertions(+), 14 deletions(-) diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index b40503ac1c7..a5ad9d4034e 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -438,10 +438,10 @@ WarpX::OneStep_nosub (Real cur_time) EvolveB(0.5_rt * dt[0], DtType::FirstHalf, cur_time); // We now have B^{n+1/2} FillBoundaryB(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); - if (WarpX::em_solver_medium == MediumForEM::Vacuum) { + if (m_em_solver_medium == MediumForEM::Vacuum) { // vacuum medium EvolveE(dt[0], cur_time); // We now have E^{n+1} - } else if (WarpX::em_solver_medium == MediumForEM::Macroscopic) { + } else if (m_em_solver_medium == MediumForEM::Macroscopic) { // macroscopic medium MacroscopicEvolveE(dt[0], cur_time); // We now have E^{n+1} } else { diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index cf452df56a2..b2885f8ca6a 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -288,17 +288,17 @@ WarpX::PrintMainPICparameters () else{ amrex::Print() << "Operation mode: | Electromagnetic" << "\n"; } - if (em_solver_medium == MediumForEM::Vacuum ){ + if (m_em_solver_medium == MediumForEM::Vacuum ){ amrex::Print() << " | - vacuum" << "\n"; } - else if (em_solver_medium == MediumForEM::Macroscopic ){ + else if (m_em_solver_medium == MediumForEM::Macroscopic ){ amrex::Print() << " | - macroscopic" << "\n"; } - if ( (em_solver_medium == MediumForEM::Macroscopic) && + if ( (m_em_solver_medium == MediumForEM::Macroscopic) && (WarpX::macroscopic_solver_algo == MacroscopicSolverAlgo::LaxWendroff)){ amrex::Print() << " | - Lax-Wendroff algorithm\n"; } - else if ((em_solver_medium == MediumForEM::Macroscopic) && + else if ((m_em_solver_medium == MediumForEM::Macroscopic) && (WarpX::macroscopic_solver_algo == MacroscopicSolverAlgo::BackwardEuler)){ amrex::Print() << " | - Backward Euler algorithm\n"; } @@ -561,7 +561,7 @@ WarpX::InitData () BuildBufferMasks(); - if (WarpX::em_solver_medium == MediumForEM::Macroscopic) { + if (m_em_solver_medium == MediumForEM::Macroscopic) { const int lev_zero = 0; m_macroscopic_properties->InitData( Geom(lev_zero), diff --git a/Source/Utils/WarpXMovingWindow.cpp b/Source/Utils/WarpXMovingWindow.cpp index b37aa41e28a..0cea2709312 100644 --- a/Source/Utils/WarpXMovingWindow.cpp +++ b/Source/Utils/WarpXMovingWindow.cpp @@ -464,7 +464,7 @@ WarpX::MoveWindow (const int step, bool move_j) } // Recompute macroscopic properties of the medium - if (WarpX::em_solver_medium == MediumForEM::Macroscopic) { + if (m_em_solver_medium == MediumForEM::Macroscopic) { const int lev_zero = 0; m_macroscopic_properties->InitData( Geom(lev_zero), diff --git a/Source/WarpX.H b/Source/WarpX.H index 27b02021678..b12cb1ab7f0 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -206,8 +206,6 @@ public: * being used (0 or 1 corresponding to timers or heuristic). */ static inline auto load_balance_costs_update_algo = LoadBalanceCostsUpdateAlgo::Default; - //! Integer that corresponds to electromagnetic Maxwell solver (vacuum - 0, macroscopic - 1) - static inline auto em_solver_medium = MediumForEM::Default; /** Integer that correspond to macroscopic Maxwell solver algorithm * (BackwardEuler - 0, Lax-Wendroff - 1) */ @@ -1371,6 +1369,9 @@ private: bool do_fluid_species = false; std::unique_ptr myfl; + //! Integer that corresponds to electromagnetic Maxwell solver (vacuum - 0, macroscopic - 1) + MediumForEM m_em_solver_medium = MediumForEM::Default; + // // Fields: First array for level, second for direction // diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 128e22e2fe3..1e8e121dd5c 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -352,7 +352,7 @@ WarpX::WarpX () m_field_factory.resize(nlevs_max); - if (em_solver_medium == MediumForEM::Macroscopic) { + if (m_em_solver_medium == MediumForEM::Macroscopic) { // create object for macroscopic solver m_macroscopic_properties = std::make_unique(); } @@ -1248,8 +1248,8 @@ WarpX::ReadParameters () " combined with mesh refinement is currently not implemented"); } - pp_algo.query_enum_sloppy("em_solver_medium", em_solver_medium, "-_"); - if (em_solver_medium == MediumForEM::Macroscopic ) { + pp_algo.query_enum_sloppy("em_solver_medium", m_em_solver_medium, "-_"); + if (m_em_solver_medium == MediumForEM::Macroscopic ) { pp_algo.query_enum_sloppy("macroscopic_sigma_method", macroscopic_solver_algo, "-_"); } @@ -2274,7 +2274,7 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm } // Allocate extra multifabs for macroscopic properties of the medium - if (em_solver_medium == MediumForEM::Macroscopic) { + if (m_em_solver_medium == MediumForEM::Macroscopic) { WARPX_ALWAYS_ASSERT_WITH_MESSAGE( lev==0, "Macroscopic properties are not supported with mesh refinement."); m_macroscopic_properties->AllocateLevelMFs(ba, dm, ngEB); From daabdd69a03fa18fe3d03f4f92b78e93919daec1 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Tue, 11 Feb 2025 06:12:22 +0100 Subject: [PATCH 227/278] Clang-tidy CI test: bump version from 16 to 17 (#5600) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This PR bumps the version used for `clang-tidy` CI tests from 16 to 17. It also addresses all the issues found with the upgraded tool. To be merged **after** https://github.com/ECP-WarpX/WarpX/pull/5592 ✅ ### The issues found 🧐 and fixed 🛠️ with the upgraded tool are the following : - [bugprone-switch-missing-default-case](https://releases.llvm.org/17.0.1/tools/clang/tools/extra/docs/clang-tidy/checks/bugprone/switch-missing-default-case.html) A newly introduced check to flag `switch` statements without a `default` case (unless the argument is an `enum`) - [cppcoreguidelines-rvalue-reference-param-not-moved](https://releases.llvm.org/17.0.1/tools/clang/tools/extra/docs/clang-tidy/checks/cppcoreguidelines/rvalue-reference-param-not-moved.html) A newly introduced check to flag when an rvalue reference argument of a function is never moved inside the function body. ⚠️ **Warning**: in order to have this check compatible with [performance-move-const-arg](https://releases.llvm.org/17.0.1/tools/clang/tools/extra/docs/clang-tidy/checks/performance/move-const-arg.html) I had to set `performance-move-const-arg.CheckTriviallyCopyableMove` to `false` (specifically for the three methods in `ablastr::utils::msg_logger` accepting `std::vector::const_iterator&& rit` arguments). - [misc-header-include-cycle](https://releases.llvm.org/17.0.1/tools/clang/tools/extra/docs/clang-tidy/checks/misc/header-include-cycle.html) A newly introduced check to prevent cyclic header inclusions. - [modernize-type-traits](https://releases.llvm.org/17.0.1/tools/clang/tools/extra/docs/clang-tidy/checks/modernize/type-traits.html) A newly introduced check. The idea is to replace currencies of, e.g., `std::is_integral::value`, with the less verbose alternative `std::is_integral_v` - [performance-avoid-endl](https://releases.llvm.org/17.0.1/tools/clang/tools/extra/docs/clang-tidy/checks/performance/avoid-endl.html) A newly introduced check. The idea is to replace `<< std::endl` with `\n`, since `endl` also forces a flush of the stream. In few cases flushing the buffer is actually the desired behavior. Typically, this happens when we want to write to `std::cerr`, which is however automatically flushed after each write operation. In cases where actually flushing to `std::cout` is the desired behavior one can do `<< \n << std::flush `, which is arguably more transparent than `<< std::endl`. - [performance-noexcept-swap](https://releases.llvm.org/17.0.1/tools/clang/tools/extra/docs/clang-tidy/checks/performance/noexcept-swap.html) For performance reasons it is better if `swap` functions are declared as `noexcept`, in order to allow the compiler to perform more aggressive optimizations. In any case, we can use the AMReX function `amrex::Swap`, which is `noexcept`. ### 🔄 Re-enabled checks: - [readability-misleading-indentation](https://releases.llvm.org/17.0.1/tools/clang/tools/extra/docs/clang-tidy/checks/readability/misleading-indentation.html) This check was already available in v16, but a bug led to false positives. The bug has been corrected in v17 of the tool, so we can re-enable the check. ### ⛔ The PR excludes the following checks : - [cppcoreguidelines-missing-std-forward](https://releases.llvm.org/17.0.1/tools/clang/tools/extra/docs/clang-tidy/checks/cppcoreguidelines/missing-std-forward.html) A newly introduced check that warns when a forwarding reference parameter is not forwarded. In order to comply with this check I think that I have to pass some parameters by reference to lambda functions inside `ParallelFor` constructs. However, this leads to issues when we compile for GPUs. Therefore, I think that the best solution is to exclude this check. See an example below (for `PredFunc&& filter` ): ``` amrex::ParallelForRNG(np, [=,&filter] AMREX_GPU_DEVICE (int i, amrex::RandomEngine const& engine) { p_mask[i] = filter(src_data, i, engine); }); ``` - [misc-include-cleaner](https://releases.llvm.org/17.0.1/tools/clang/tools/extra/docs/clang-tidy/checks/misc/include-cleaner.html) It would be awesome to include this check. However, as it is now implemented, it has no notion of "associated headers". For instance, let's suppose that the header `MyClass.H` has `#include` and that `MyClass.cpp` has `#include "MyClass.H"` and uses `std:string` somewhere. In this case, the check raises a warning stating that you should include `` in `MyClass.cpp` even if it is transitively included via the associate header `MyClass.H` . For this reason, for the moment, it is better to periodically check headers with the `IWYU` tool. --- .clang-tidy | 6 ++++-- .github/workflows/clang_tidy.yml | 8 ++++---- .../FlushFormats/FlushFormatCatalyst.cpp | 6 +++--- Source/Diagnostics/FullDiagnostics.cpp | 2 +- Source/Diagnostics/ReducedDiags/Timestep.cpp | 2 +- .../MagnetostaticSolver/MagnetostaticSolver.cpp | 2 +- .../FieldSolver/SpectralSolver/SpectralKSpace.H | 9 +++++---- .../SpectralSolver/SpectralKSpace.cpp | 2 +- .../SpectralSolver/SpectralKSpace_fwd.H | 2 +- .../DivCleaner/ProjectionDivCleaner.cpp | 4 ++-- Source/NonlinearSolvers/NewtonSolver.H | 2 +- Source/NonlinearSolvers/PicardSolver.H | 2 +- .../Resampling/VelocityCoincidenceThinning.H | 16 +++++----------- Source/Python/callbacks.cpp | 4 ++-- Source/Python/pyWarpX.cpp | 2 +- .../fields/EffectivePotentialPoissonSolver.H | 2 +- Source/ablastr/fields/Interpolate.H | 3 --- Source/ablastr/utils/msg_logger/MsgLogger.H | 12 ++++++------ Source/ablastr/utils/msg_logger/MsgLogger.cpp | 15 +++++++++------ Tools/Linter/runClangTidy.sh | 8 ++++---- Tools/QedTablesUtils/Source/QedTableCommons.H | 4 ++-- 21 files changed, 55 insertions(+), 58 deletions(-) diff --git a/.clang-tidy b/.clang-tidy index efb60a001d0..8111fc2fc25 100644 --- a/.clang-tidy +++ b/.clang-tidy @@ -19,6 +19,7 @@ Checks: ' -cppcoreguidelines-avoid-non-const-global-variables, -cppcoreguidelines-init-variables, -cppcoreguidelines-macro-usage, + -cppcoreguidelines-missing-std-forward, -cppcoreguidelines-narrowing-conversions, -cppcoreguidelines-non-private-member-variables-in-classes, -cppcoreguidelines-owning-memory, @@ -29,6 +30,7 @@ Checks: ' misc-*, -misc-no-recursion, -misc-non-private-member-variables-in-classes, + -misc-include-cleaner, modernize-*, -modernize-avoid-c-arrays, -modernize-return-braced-init-list, @@ -44,7 +46,6 @@ Checks: ' -readability-implicit-bool-conversion, -readability-isolate-declaration, -readability-magic-numbers, - -readability-misleading-indentation, -readability-named-parameter, -readability-uppercase-literal-suffix ' @@ -58,6 +59,7 @@ CheckOptions: value: "true" - key: misc-use-anonymous-namespace.HeaderFileExtensions value: "H," - +- key: performance-move-const-arg.CheckTriviallyCopyableMove + value: "false" HeaderFilterRegex: 'Source[a-z_A-Z0-9\/]+\.H$' diff --git a/.github/workflows/clang_tidy.yml b/.github/workflows/clang_tidy.yml index 3caa11e1885..49f2a5b6e25 100644 --- a/.github/workflows/clang_tidy.yml +++ b/.github/workflows/clang_tidy.yml @@ -26,7 +26,7 @@ jobs: - uses: actions/checkout@v4 - name: install dependencies run: | - .github/workflows/dependencies/clang.sh 16 + .github/workflows/dependencies/clang.sh 17 - name: set up cache uses: actions/cache@v4 with: @@ -43,8 +43,8 @@ jobs: export CCACHE_LOGFILE=${{ github.workspace }}/ccache.log.txt ccache -z - export CXX=$(which clang++-16) - export CC=$(which clang-16) + export CXX=$(which clang++-17) + export CC=$(which clang-17) cmake -S . -B build_clang_tidy \ -DCMAKE_VERBOSE_MAKEFILE=ON \ @@ -62,7 +62,7 @@ jobs: ${{github.workspace}}/.github/workflows/source/makeMakefileForClangTidy.py --input ${{github.workspace}}/ccache.log.txt make -j4 --keep-going -f clang-tidy-ccache-misses.mak \ - CLANG_TIDY=clang-tidy-16 \ + CLANG_TIDY=clang-tidy-17 \ CLANG_TIDY_ARGS="--config-file=${{github.workspace}}/.clang-tidy --warnings-as-errors=*" ccache -s diff --git a/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.cpp b/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.cpp index 3e542f9f871..5e5f3634e8f 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatCatalyst.cpp @@ -110,7 +110,7 @@ FlushFormatCatalyst::FlushFormatCatalyst() { if (err != catalyst_status_ok) { std::string message = " Error: Failed to initialize Catalyst!\n"; - std::cerr << message << err << std::endl; + std::cerr << message << err << "\n"; amrex::Print() << message; amrex::Abort(message); } @@ -180,7 +180,7 @@ FlushFormatCatalyst::WriteToFile ( if (err != catalyst_status_ok) { std::string message = " Error: Failed to execute Catalyst!\n"; - std::cerr << message << err << std::endl; + std::cerr << message << err << "\n"; amrex::Print() << message; } WARPX_PROFILE_VAR_STOP(prof_catalyst_execute); @@ -200,7 +200,7 @@ FlushFormatCatalyst::~FlushFormatCatalyst() { if (err != catalyst_status_ok) { std::string message = " Error: Failed to finalize Catalyst!\n"; - std::cerr << message << err << std::endl; + std::cerr << message << err << "\n"; amrex::Print() << message; amrex::Abort(message); } else { diff --git a/Source/Diagnostics/FullDiagnostics.cpp b/Source/Diagnostics/FullDiagnostics.cpp index 8e2ebd3886a..5e8cede12ea 100644 --- a/Source/Diagnostics/FullDiagnostics.cpp +++ b/Source/Diagnostics/FullDiagnostics.cpp @@ -873,7 +873,7 @@ FullDiagnostics::InitializeFieldFunctors (int lev) } else if ( m_varnames[comp] == "divE" ){ m_all_field_functors[lev][comp] = std::make_unique(warpx.m_fields.get_alldirs(FieldType::Efield_aux, lev), lev, m_crse_ratio); } else { - std::cout << "Error on component " << m_varnames[comp] << std::endl; + std::cout << "Error on component " << m_varnames[comp] << "\n"; WARPX_ABORT_WITH_MESSAGE(m_varnames[comp] + " is not a known field output type for this geometry"); } } diff --git a/Source/Diagnostics/ReducedDiags/Timestep.cpp b/Source/Diagnostics/ReducedDiags/Timestep.cpp index 3474121db91..e74f22c27ec 100644 --- a/Source/Diagnostics/ReducedDiags/Timestep.cpp +++ b/Source/Diagnostics/ReducedDiags/Timestep.cpp @@ -50,7 +50,7 @@ Timestep::Timestep (const std::string& rd_name) } // close file - ofs << std::endl; + ofs << "\n"; ofs.close(); } } diff --git a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp index fb93342ed08..2a744f3f902 100644 --- a/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp +++ b/Source/FieldSolver/MagnetostaticSolver/MagnetostaticSolver.cpp @@ -130,7 +130,7 @@ WarpX::AddMagnetostaticFieldLabFrame() // temporary fix!!! const amrex::Real absolute_tolerance = 0.0; amrex::Real required_precision; - if constexpr (std::is_same::value) { + if constexpr (std::is_same_v) { required_precision = 1e-5; } else { diff --git a/Source/FieldSolver/SpectralSolver/SpectralKSpace.H b/Source/FieldSolver/SpectralSolver/SpectralKSpace.H index 16f93d8292a..fcf1a2ccd02 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralKSpace.H +++ b/Source/FieldSolver/SpectralSolver/SpectralKSpace.H @@ -17,6 +17,7 @@ #include #include #include +#include #include #include #include @@ -35,9 +36,9 @@ using SpectralShiftFactor = amrex::LayoutData< // Indicate the type of correction "shift" factor to apply // when the FFT is performed from/to a cell-centered grid in real space. -struct ShiftType { - enum{ TransformFromCellCentered=0, TransformToCellCentered=1 }; -}; +AMREX_ENUM(ShiftType, + TransformFromCellCentered, + TransformToCellCentered); /** * \brief Class that represents the spectral space. @@ -69,7 +70,7 @@ class SpectralKSpace SpectralShiftFactor getSpectralShiftFactor( const amrex::DistributionMapping& dm, int i_dim, - int shift_type ) const; + ShiftType shift_type ) const; protected: amrex::Array k_vec; diff --git a/Source/FieldSolver/SpectralSolver/SpectralKSpace.cpp b/Source/FieldSolver/SpectralSolver/SpectralKSpace.cpp index adf7fff775d..5313409553f 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralKSpace.cpp +++ b/Source/FieldSolver/SpectralSolver/SpectralKSpace.cpp @@ -145,7 +145,7 @@ SpectralKSpace::getKComponent( const DistributionMapping& dm, SpectralShiftFactor SpectralKSpace::getSpectralShiftFactor( const DistributionMapping& dm, const int i_dim, - const int shift_type ) const + const ShiftType shift_type ) const { // Initialize an empty DeviceVector in each box SpectralShiftFactor shift_factor( spectralspace_ba, dm ); diff --git a/Source/FieldSolver/SpectralSolver/SpectralKSpace_fwd.H b/Source/FieldSolver/SpectralSolver/SpectralKSpace_fwd.H index a256767d5bc..3b93622ae0b 100644 --- a/Source/FieldSolver/SpectralSolver/SpectralKSpace_fwd.H +++ b/Source/FieldSolver/SpectralSolver/SpectralKSpace_fwd.H @@ -8,7 +8,7 @@ #ifndef WARPX_SPECTRALKSPACE_FWD_H #define WARPX_SPECTRALKSPACE_FWD_H -struct ShiftType; +enum class ShiftType; class SpectralKSpace; diff --git a/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp b/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp index 1209f621e31..d7a3bb3ac92 100644 --- a/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp +++ b/Source/Initialization/DivCleaner/ProjectionDivCleaner.cpp @@ -104,7 +104,7 @@ void ProjectionDivCleaner::ReadParameters () { // Initialize tolerance based on field precision - if constexpr (std::is_same::value) { + if constexpr (std::is_same_v) { m_rtol = 5e-5; m_atol = 0.0; } @@ -337,7 +337,7 @@ WarpX::ProjectionCleanDivB() { && WarpX::poisson_solver_id == PoissonSolverAlgo::Multigrid)) { amrex::Print() << Utils::TextMsg::Info( "Starting Projection B-Field divergence cleaner."); - if constexpr (!std::is_same::value) { + if constexpr (!std::is_same_v) { ablastr::warn_manager::WMRecordWarning("Projection Div Cleaner", "WarpX is running with a field precision of SINGLE." "Convergence of projection based div cleaner is not optimal and may fail.", diff --git a/Source/NonlinearSolvers/NewtonSolver.H b/Source/NonlinearSolvers/NewtonSolver.H index f5147b2e4c0..f92687d6b34 100644 --- a/Source/NonlinearSolvers/NewtonSolver.H +++ b/Source/NonlinearSolvers/NewtonSolver.H @@ -313,7 +313,7 @@ void NewtonSolver::Solve ( Vec& a_U, " and the relative tolerance is " << m_rtol << ". Absolute norm is " << norm_abs << " and the absolute tolerance is " << m_atol; - if (this->m_verbose) { amrex::Print() << convergenceMsg.str() << std::endl; } + if (this->m_verbose) { amrex::Print() << convergenceMsg.str() << "\n"; } if (m_require_convergence) { WARPX_ABORT_WITH_MESSAGE(convergenceMsg.str()); } else { diff --git a/Source/NonlinearSolvers/PicardSolver.H b/Source/NonlinearSolvers/PicardSolver.H index 6fe941cd48f..62323b64a23 100644 --- a/Source/NonlinearSolvers/PicardSolver.H +++ b/Source/NonlinearSolvers/PicardSolver.H @@ -205,7 +205,7 @@ void PicardSolver::Solve ( Vec& a_U, " and the relative tolerance is " << m_rtol << ". Absolute norm is " << norm_abs << " and the absolute tolerance is " << m_atol; - if (this->m_verbose) { amrex::Print() << convergenceMsg.str() << std::endl; } + if (this->m_verbose) { amrex::Print() << convergenceMsg.str() << "\n"; } if (m_require_convergence) { WARPX_ABORT_WITH_MESSAGE(convergenceMsg.str()); } else { diff --git a/Source/Particles/Resampling/VelocityCoincidenceThinning.H b/Source/Particles/Resampling/VelocityCoincidenceThinning.H index a815092e03e..d55aed99bcd 100644 --- a/Source/Particles/Resampling/VelocityCoincidenceThinning.H +++ b/Source/Particles/Resampling/VelocityCoincidenceThinning.H @@ -14,6 +14,8 @@ #include "Utils/Parser/ParserUtils.H" #include "Utils/ParticleUtils.H" +#include + /** * \brief This class implements a particle merging scheme wherein particles * are clustered in phase space and particles in the same cluster is merged @@ -66,14 +68,6 @@ public: */ struct HeapSort { - AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE - void swap(int &a, int &b) const - { - const auto temp = b; - b = a; - a = temp; - } - AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void operator() (int index_array[], const int bin_array[], const int start, const int n) const { @@ -84,7 +78,7 @@ public: // move child through heap if it is bigger than its parent while (j > 0 && bin_array[index_array[j+start]] > bin_array[index_array[(j - 1)/2 + start]]) { // swap child and parent until branch is properly ordered - swap(index_array[j+start], index_array[(j - 1)/2 + start]); + amrex::Swap(index_array[j+start], index_array[(j - 1)/2 + start]); j = (j - 1) / 2; } } @@ -92,7 +86,7 @@ public: for (int i = n - 1; i > 0; i--) { // swap value of first (now the largest value) to the new end point - swap(index_array[start], index_array[i+start]); + amrex::Swap(index_array[start], index_array[i+start]); // remake the max heap int j = 0, index; @@ -105,7 +99,7 @@ public: } // if parent is smaller than child, swap parent with child having higher value if (index < i && bin_array[index_array[j+start]] < bin_array[index_array[index+start]]) { - swap(index_array[j+start], index_array[index+start]); + amrex::Swap(index_array[j+start], index_array[index+start]); } j = index; } diff --git a/Source/Python/callbacks.cpp b/Source/Python/callbacks.cpp index 79f15c62835..81d379b189a 100644 --- a/Source/Python/callbacks.cpp +++ b/Source/Python/callbacks.cpp @@ -33,8 +33,8 @@ void ExecutePythonCallback ( const std::string& name ) try { warpx_callback_py_map[name](); } catch (std::exception &e) { - std::cerr << "Python callback '" << name << "' failed!" << std::endl; - std::cerr << e.what() << std::endl; + std::cerr << "Python callback '" << name << "' failed!" << "\n"; + std::cerr << e.what() << "\n"; std::exit(3); // note: NOT amrex::Abort(), to avoid hangs with MPI // future note: diff --git a/Source/Python/pyWarpX.cpp b/Source/Python/pyWarpX.cpp index 45c4b48614b..8ae174b4d3e 100644 --- a/Source/Python/pyWarpX.cpp +++ b/Source/Python/pyWarpX.cpp @@ -93,7 +93,7 @@ PYBIND11_MODULE(PYWARPX_MODULE_NAME, m) { // TODO broken numpy if not at least v1.15.0: raise warning // auto numpy = py::module::import("numpy"); // auto npversion = numpy.attr("__version__"); - // std::cout << "numpy version: " << py::str(npversion) << std::endl; + // std::cout << "numpy version: " << py::str(npversion) << "\n"; m.def("amrex_init", [](const py::list args) { diff --git a/Source/ablastr/fields/EffectivePotentialPoissonSolver.H b/Source/ablastr/fields/EffectivePotentialPoissonSolver.H index c6b5d2c5bcc..80e899df027 100644 --- a/Source/ablastr/fields/EffectivePotentialPoissonSolver.H +++ b/Source/ablastr/fields/EffectivePotentialPoissonSolver.H @@ -260,7 +260,7 @@ computeEffectivePotentialPhi ( } // Run additional operations, such as calculation of the E field for embedded boundaries - if constexpr (!std::is_same::value) { + if constexpr (!std::is_same_v) { if (post_phi_calculation.has_value()) { post_phi_calculation.value()(mlmg, lev); } diff --git a/Source/ablastr/fields/Interpolate.H b/Source/ablastr/fields/Interpolate.H index e5121215393..dc4ad47df94 100644 --- a/Source/ablastr/fields/Interpolate.H +++ b/Source/ablastr/fields/Interpolate.H @@ -11,12 +11,9 @@ #include #include -#include - #include #include - namespace ablastr::fields::details { /** Local interpolation from phi_cp to phi[lev+1] diff --git a/Source/ablastr/utils/msg_logger/MsgLogger.H b/Source/ablastr/utils/msg_logger/MsgLogger.H index 2497bdcfae7..088a613bc87 100644 --- a/Source/ablastr/utils/msg_logger/MsgLogger.H +++ b/Source/ablastr/utils/msg_logger/MsgLogger.H @@ -79,10 +79,10 @@ namespace ablastr::utils::msg_logger * \brief Same as static Msg deserialize(std::vector::const_iterator& it) * but accepting an rvalue as an argument * - * @param[in] it iterator of a byte array + * @param[in] rit iterator of a byte array * @return a Msg struct */ - static Msg deserialize(std::vector::const_iterator&& it); + static Msg deserialize(std::vector::const_iterator&& rit); }; /** @@ -115,10 +115,10 @@ namespace ablastr::utils::msg_logger * \brief Same as static Msg MsgWithCounter(std::vector::const_iterator& it) * but accepting an rvalue as an argument * - * @param[in] it iterator of a byte array + * @param[in] rit iterator of a byte array * @return a MsgWithCounter struct */ - static MsgWithCounter deserialize(std::vector::const_iterator&& it); + static MsgWithCounter deserialize(std::vector::const_iterator&& rit); }; /** @@ -154,10 +154,10 @@ namespace ablastr::utils::msg_logger * \brief Same as static Msg MsgWithCounterAndRanks(std::vector::const_iterator& it) * but accepting an rvalue as an argument * - * @param[in] it iterator of a byte array + * @param[in] rit iterator of a byte array * @return a MsgWithCounterAndRanks struct */ - static MsgWithCounterAndRanks deserialize(std::vector::const_iterator&& it); + static MsgWithCounterAndRanks deserialize(std::vector::const_iterator&& rit); }; /** diff --git a/Source/ablastr/utils/msg_logger/MsgLogger.cpp b/Source/ablastr/utils/msg_logger/MsgLogger.cpp index 6537a8f61e5..6597588d085 100644 --- a/Source/ablastr/utils/msg_logger/MsgLogger.cpp +++ b/Source/ablastr/utils/msg_logger/MsgLogger.cpp @@ -147,9 +147,10 @@ Msg Msg::deserialize (std::vector::const_iterator& it) return msg; } -Msg Msg::deserialize (std::vector::const_iterator&& it) +Msg Msg::deserialize (std::vector::const_iterator&& rit) { - return Msg::deserialize(it); + auto lit = std::vector::const_iterator{std::move(rit)}; + return Msg::deserialize(lit); } std::vector MsgWithCounter::serialize() const @@ -174,9 +175,10 @@ MsgWithCounter MsgWithCounter::deserialize (std::vector::const_iterator& i return msg_with_counter; } -MsgWithCounter MsgWithCounter::deserialize (std::vector::const_iterator&& it) +MsgWithCounter MsgWithCounter::deserialize (std::vector::const_iterator&& rit) { - return MsgWithCounter::deserialize(it); + auto lit = std::vector::const_iterator{std::move(rit)}; + return MsgWithCounter::deserialize(lit); } std::vector MsgWithCounterAndRanks::serialize() const @@ -205,9 +207,10 @@ MsgWithCounterAndRanks::deserialize (std::vector::const_iterator& it) } MsgWithCounterAndRanks -MsgWithCounterAndRanks::deserialize (std::vector::const_iterator&& it) +MsgWithCounterAndRanks::deserialize (std::vector::const_iterator&& rit) { - return MsgWithCounterAndRanks::deserialize(it); + auto lit = std::vector::const_iterator{std::move(rit)}; + return MsgWithCounterAndRanks::deserialize(lit); } Logger::Logger() : diff --git a/Tools/Linter/runClangTidy.sh b/Tools/Linter/runClangTidy.sh index 262d713cac6..4c1948cf372 100755 --- a/Tools/Linter/runClangTidy.sh +++ b/Tools/Linter/runClangTidy.sh @@ -55,13 +55,13 @@ ${CTIDY} --version echo echo "This can be overridden by setting the environment" echo "variables CLANG, CLANGXX, and CLANGTIDY e.g.: " -echo "$ export CLANG=clang-16" -echo "$ export CLANGXX=clang++-16" -echo "$ export CTIDCLANGTIDYY=clang-tidy-16" +echo "$ export CLANG=clang-17" +echo "$ export CLANGXX=clang++-17" +echo "$ export CTIDCLANGTIDYY=clang-tidy-17" echo "$ ./Tools/Linter/runClangTidy.sh" echo echo "******************************************************" -echo "* Warning: clang v16 is currently used in CI tests. *" +echo "* Warning: clang v17 is currently used in CI tests. *" echo "* It is therefore recommended to use this version. *" echo "* Otherwise, a newer version may find issues not *" echo "* currently covered by CI tests while older versions *" diff --git a/Tools/QedTablesUtils/Source/QedTableCommons.H b/Tools/QedTablesUtils/Source/QedTableCommons.H index 2233513bc97..903ba4623a8 100644 --- a/Tools/QedTablesUtils/Source/QedTableCommons.H +++ b/Tools/QedTablesUtils/Source/QedTableCommons.H @@ -12,8 +12,8 @@ bool Contains (const ContainerType& container, const ElementType& el) void AbortWithMessage(const std::string& msg) { - std::cout << "### ABORT : " << msg << std::endl; - std::cout << "___________________________" << std::endl; + std::cerr << "### ABORT : " << msg << "\n"; + std::cerr << "___________________________\n"; exit(1); } From bc936fece76333f27db2e3e478a5a475658d3775 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Tue, 11 Feb 2025 06:15:56 +0100 Subject: [PATCH 228/278] WarpX class: moving initialization of warning manager to WarpXInit (#5579) This PR moves the initialization of the warning manager from the very large `ReadParameters` function of the WarpX class to a free function inside `WarpXInit.H/cpp` . This function is now called by the constructor of the WarpX class. The final goal is to simplify the WarpX class. --- Source/Initialization/WarpXInit.H | 9 ++++++-- Source/Initialization/WarpXInit.cpp | 35 ++++++++++++++++++++++++++++- Source/WarpX.cpp | 28 ++--------------------- 3 files changed, 43 insertions(+), 29 deletions(-) diff --git a/Source/Initialization/WarpXInit.H b/Source/Initialization/WarpXInit.H index cb9de99c3bc..85e3b8d068e 100644 --- a/Source/Initialization/WarpXInit.H +++ b/Source/Initialization/WarpXInit.H @@ -17,14 +17,19 @@ namespace warpx::initialization * @param[in] argc number of arguments from main() * @param[in] argv argument strings from main() */ - void initialize_external_libraries(int argc, char* argv[]); + void initialize_external_libraries (int argc, char* argv[]); /** Initializes, in the following order: * - the FFT library through the anyfft::cleanup() function in ablastr * - the AMReX library * - the MPI library through the mpi_finalize helper function in ablastr */ - void finalize_external_libraries(); + void finalize_external_libraries (); + + /** + * Initializes the Warning manager in ablastr + */ + void initialize_warning_manager (); /** Check that warpx.dims matches the binary name */ diff --git a/Source/Initialization/WarpXInit.cpp b/Source/Initialization/WarpXInit.cpp index e9f3dc95a59..555bea52a7f 100644 --- a/Source/Initialization/WarpXInit.cpp +++ b/Source/Initialization/WarpXInit.cpp @@ -15,7 +15,9 @@ #include #include +#include +#include #include void warpx::initialization::initialize_external_libraries(int argc, char* argv[]) @@ -25,13 +27,44 @@ void warpx::initialization::initialize_external_libraries(int argc, char* argv[] ablastr::math::anyfft::setup(); } -void warpx::initialization::finalize_external_libraries() +void warpx::initialization::finalize_external_libraries () { ablastr::math::anyfft::cleanup(); amrex::Finalize(); ablastr::parallelization::mpi_finalize(); } +void warpx::initialization::initialize_warning_manager () +{ + const auto pp_warpx = amrex::ParmParse{"warpx"}; + + //"Synthetic" warning messages may be injected in the Warning Manager via + // inputfile for debug&testing purposes. + ablastr::warn_manager::GetWMInstance().debug_read_warnings_from_input(pp_warpx); + + // Set the flag to control if WarpX has to emit a warning message as soon as a warning is recorded + bool always_warn_immediately = false; + pp_warpx.query("always_warn_immediately", always_warn_immediately); + ablastr::warn_manager::GetWMInstance().SetAlwaysWarnImmediately(always_warn_immediately); + + // Set the WarnPriority threshold to decide if WarpX has to abort when a warning is recorded + if(std::string str_abort_on_warning_threshold; + pp_warpx.query("abort_on_warning_threshold", str_abort_on_warning_threshold)){ + std::optional abort_on_warning_threshold = std::nullopt; + if (str_abort_on_warning_threshold == "high") { + abort_on_warning_threshold = ablastr::warn_manager::WarnPriority::high; + } else if (str_abort_on_warning_threshold == "medium" ) { + abort_on_warning_threshold = ablastr::warn_manager::WarnPriority::medium; + } else if (str_abort_on_warning_threshold == "low") { + abort_on_warning_threshold = ablastr::warn_manager::WarnPriority::low; + } else { + WARPX_ABORT_WITH_MESSAGE(str_abort_on_warning_threshold + +"is not a valid option for warpx.abort_on_warning_threshold (use: low, medium or high)"); + } + ablastr::warn_manager::GetWMInstance().SetAbortThreshold(abort_on_warning_threshold); + } +} + void warpx::initialization::check_dims() { // Ensure that geometry.dims is set properly. diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 1e8e121dd5c..a17c7ff432e 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -246,6 +246,8 @@ WarpX::Finalize() WarpX::WarpX () { + warpx::initialization::initialize_warning_manager(); + ReadParameters(); BackwardCompatibility(); @@ -497,32 +499,6 @@ WarpX::ReadParameters () { ParmParse const pp_warpx("warpx"); - //"Synthetic" warning messages may be injected in the Warning Manager via - // inputfile for debug&testing purposes. - ablastr::warn_manager::GetWMInstance().debug_read_warnings_from_input(pp_warpx); - - // Set the flag to control if WarpX has to emit a warning message as soon as a warning is recorded - bool always_warn_immediately = false; - pp_warpx.query("always_warn_immediately", always_warn_immediately); - ablastr::warn_manager::GetWMInstance().SetAlwaysWarnImmediately(always_warn_immediately); - - // Set the WarnPriority threshold to decide if WarpX has to abort when a warning is recorded - if(std::string str_abort_on_warning_threshold; - pp_warpx.query("abort_on_warning_threshold", str_abort_on_warning_threshold)){ - std::optional abort_on_warning_threshold = std::nullopt; - if (str_abort_on_warning_threshold == "high") { - abort_on_warning_threshold = ablastr::warn_manager::WarnPriority::high; - } else if (str_abort_on_warning_threshold == "medium" ) { - abort_on_warning_threshold = ablastr::warn_manager::WarnPriority::medium; - } else if (str_abort_on_warning_threshold == "low") { - abort_on_warning_threshold = ablastr::warn_manager::WarnPriority::low; - } else { - WARPX_ABORT_WITH_MESSAGE(str_abort_on_warning_threshold - +"is not a valid option for warpx.abort_on_warning_threshold (use: low, medium or high)"); - } - ablastr::warn_manager::GetWMInstance().SetAbortThreshold(abort_on_warning_threshold); - } - std::vector numprocs_in; utils::parser::queryArrWithParser( pp_warpx, "numprocs", numprocs_in, 0, AMREX_SPACEDIM); From 6dfa3ba5e74edc7ef80ac6bc6aa88b42b926f46f Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Tue, 11 Feb 2025 09:43:04 +0100 Subject: [PATCH 229/278] WarpX class: move shiftMF to anonymous namespace in WarpXMovingWindow.cpp (#5609) This PR moves the static function `shiftMF` from the WarpX class to an anonymous namespace in `WarpXMovingWindow.cpp`, where it is actually used. This is done to simplify the Warpx class. --- Source/Utils/WarpXMovingWindow.cpp | 439 +++++++++++++++-------------- Source/WarpX.H | 6 - 2 files changed, 231 insertions(+), 214 deletions(-) diff --git a/Source/Utils/WarpXMovingWindow.cpp b/Source/Utils/WarpXMovingWindow.cpp index 0cea2709312..281aa5e75ba 100644 --- a/Source/Utils/WarpXMovingWindow.cpp +++ b/Source/Utils/WarpXMovingWindow.cpp @@ -57,6 +57,199 @@ using namespace amrex; +namespace +{ + + /** This function shifts a MultiFab in a given direction + * + * \param[in,out] mf the MultiFab to be shifted + * \param[in] geom the Geometry object associated to the level of the MultiFab mf + * \param[in] num_shift magnitude of the shift (cell number) + * \param[in] dir direction of the shift + * \param[in] safe_guard_cells flag to enable "safe mode" data exchanges with more guard cells + * \param[in] do_single_precision_comms flag to enable single precision communications + * \param[in,out] cost the pointer to the data structure holding costs for timer-based load-balance + * \param[in] external_field the external field (used to initialize EM fields) + * \param[in] useparser flag to enable the use of a field parser to initialize EM fields + * \param[in] field_parser the field parser + * \param[in] PMLRZ_flag flag to enable a special treatment for PML in RZ simulations + */ + void shiftMF ( + amrex::MultiFab& mf, const amrex::Geometry& geom, + int num_shift, int dir, + bool safe_guard_cells, bool do_single_precision_comms, + amrex::LayoutData* cost, + amrex::Real external_field=0.0, bool useparser = false, + amrex::ParserExecutor<3> const& field_parser={}, + const bool PMLRZ_flag = false) + { + using namespace amrex::literals; + WARPX_PROFILE("warpx::shiftMF()"); + const amrex::BoxArray& ba = mf.boxArray(); + const amrex::DistributionMapping& dm = mf.DistributionMap(); + const int nc = mf.nComp(); + const amrex::IntVect& ng = mf.nGrowVect(); + + AMREX_ALWAYS_ASSERT(ng[dir] >= std::abs(num_shift)); + + amrex::MultiFab tmpmf(ba, dm, nc, ng); + amrex::MultiFab::Copy(tmpmf, mf, 0, 0, nc, ng); + + if ( safe_guard_cells ) { + // Fill guard cells. + ablastr::utils::communication::FillBoundary(tmpmf, do_single_precision_comms, geom.periodicity()); + } else { + amrex::IntVect ng_mw = amrex::IntVect::TheUnitVector(); + // Enough guard cells in the MW direction + ng_mw[dir] = std::abs(num_shift); + // Make sure we don't exceed number of guard cells allocated + ng_mw = ng_mw.min(ng); + // Fill guard cells. + ablastr::utils::communication::FillBoundary(tmpmf, ng_mw, do_single_precision_comms, geom.periodicity()); + } + + // Make a box that covers the region that the window moved into + const amrex::IndexType& typ = ba.ixType(); + const amrex::Box& domainBox = geom.Domain(); + amrex::Box adjBox; + if (num_shift > 0) { + adjBox = adjCellHi(domainBox, dir, ng[dir]); + } else { + adjBox = adjCellLo(domainBox, dir, ng[dir]); + } + adjBox = amrex::convert(adjBox, typ); + + for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { + if (idim == dir and typ.nodeCentered(dir)) { + if (num_shift > 0) { + adjBox.growLo(idim, -1); + } else { + adjBox.growHi(idim, -1); + } + } else if (idim != dir) { + adjBox.growLo(idim, ng[idim]); + adjBox.growHi(idim, ng[idim]); + } + } + + amrex::IntVect shiftiv(0); + shiftiv[dir] = num_shift; + const amrex::Dim3 shift = shiftiv.dim3(); + + const amrex::RealBox& real_box = geom.ProbDomain(); + const auto dx = geom.CellSizeArray(); + +#ifdef AMREX_USE_OMP + #pragma omp parallel if (Gpu::notInLaunchRegion()) +#endif + for (amrex::MFIter mfi(tmpmf, TilingIfNotGPU()); mfi.isValid(); ++mfi ) + { + if (cost) + { + amrex::Gpu::synchronize(); + } + auto wt = static_cast(amrex::second()); + + auto const& dstfab = mf.array(mfi); + auto const& srcfab = tmpmf.array(mfi); + + const amrex::Box& outbox = mfi.growntilebox() & adjBox; + + if (outbox.ok()) { + if (!useparser) { + AMREX_PARALLEL_FOR_4D ( outbox, nc, i, j, k, n, + { + srcfab(i,j,k,n) = external_field; + }) + } else { + // index type of the src mf + auto const& mf_IndexType = (tmpmf).ixType(); + amrex::IntVect mf_type(AMREX_D_DECL(0,0,0)); + for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { + mf_type[idim] = mf_IndexType.nodeCentered(idim); + } + + amrex::ParallelFor (outbox, nc, + [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept + { + // Compute x,y,z co-ordinates based on index type of mf +#if defined(WARPX_DIM_1D_Z) + const amrex::Real x = 0.0_rt; + const amrex::Real y = 0.0_rt; + const amrex::Real fac_z = (1.0_rt - mf_type[0]) * dx[0]*0.5_rt; + const amrex::Real z = i*dx[0] + real_box.lo(0) + fac_z; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + const amrex::Real fac_x = (1.0_rt - mf_type[0]) * dx[0]*0.5_rt; + const amrex::Real x = i*dx[0] + real_box.lo(0) + fac_x; + const amrex::Real y = 0.0; + const amrex::Real fac_z = (1.0_rt - mf_type[1]) * dx[1]*0.5_rt; + const amrex::Real z = j*dx[1] + real_box.lo(1) + fac_z; +#else + const amrex::Real fac_x = (1.0_rt - mf_type[0]) * dx[0]*0.5_rt; + const amrex::Real x = i*dx[0] + real_box.lo(0) + fac_x; + const amrex::Real fac_y = (1.0_rt - mf_type[1]) * dx[1]*0.5_rt; + const amrex::Real y = j*dx[1] + real_box.lo(1) + fac_y; + const amrex::Real fac_z = (1.0_rt - mf_type[2]) * dx[2]*0.5_rt; + const amrex::Real z = k*dx[2] + real_box.lo(2) + fac_z; +#endif + srcfab(i,j,k,n) = field_parser(x,y,z); + }); + } + + } + + amrex::Box dstBox = mf[mfi].box(); + if (num_shift > 0) { + dstBox.growHi(dir, -num_shift); + } else { + dstBox.growLo(dir, num_shift); + } + AMREX_PARALLEL_FOR_4D ( dstBox, nc, i, j, k, n, + { + dstfab(i,j,k,n) = srcfab(i+shift.x,j+shift.y,k+shift.z,n); + }) + + if (cost) + { + amrex::Gpu::synchronize(); + wt = static_cast(amrex::second()) - wt; + amrex::HostDevice::Atomic::Add( &(*cost)[mfi.index()], wt); + } + } + +#if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) + if (PMLRZ_flag) { + // This does the exchange of data in the corner guard cells, the cells that are in the + // guard region both radially and longitudinally. These are the PML cells in the overlapping + // longitudinal region. FillBoundary normally does not update these cells. + // This update is needed so that the cells at the end of the FABs are updated appropriately + // with the data shifted from the neighboring FAB. Without this update, the RZ PML becomes + // unstable with the moving grid. + // This code creates a temporary MultiFab using a BoxList where the radial size of all of + // its boxes is increased so that the radial guard cells are included in the boxes valid domain. + // The temporary MultiFab is setup to refer to the data of the original Multifab (this can + // be done since the shape of the data is all the same, just the indexing is different). + amrex::BoxList bl; + const auto ba_size = static_cast(ba.size()); + for (int i = 0; i < ba_size; ++i) { + bl.push_back(amrex::grow(ba[i], 0, mf.nGrowVect()[0])); + } + const amrex::BoxArray rba(std::move(bl)); + amrex::MultiFab rmf(rba, dm, mf.nComp(), IntVect(0,mf.nGrowVect()[1]), MFInfo().SetAlloc(false)); + + for (amrex::MFIter mfi(mf); mfi.isValid(); ++mfi) { + rmf.setFab(mfi, FArrayBox(mf[mfi], amrex::make_alias, 0, mf.nComp())); + } + rmf.FillBoundary(false); + } +#else + amrex::ignore_unused(PMLRZ_flag); +#endif + + } + +} + void WarpX::UpdateInjectionPosition (const amrex::Real a_dt) { @@ -208,9 +401,6 @@ WarpX::MoveWindow (const int step, bool move_j) int num_shift = num_shift_base; int num_shift_crse = num_shift; - constexpr auto do_update_cost = true; - constexpr auto dont_update_cost = false; //We can't update cost for PML - // Shift the mesh fields for (int lev = 0; lev <= finest_level; ++lev) { @@ -219,6 +409,11 @@ WarpX::MoveWindow (const int step, bool move_j) num_shift *= refRatio(lev-1)[dir]; } + auto* cost_lev = + (WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) ? getCosts(lev) : nullptr; + + amrex::LayoutData* no_cost = nullptr ; //We can't update cost for PML + // Shift each component of vector fields (E, B, j) for (int dim = 0; dim < 3; ++dim) { // Fine grid @@ -240,59 +435,60 @@ WarpX::MoveWindow (const int step, bool move_j) if (dim == 1) { Efield_parser = m_p_ext_field_params->Eyfield_parser->compile<3>(); } if (dim == 2) { Efield_parser = m_p_ext_field_params->Ezfield_parser->compile<3>(); } } - shiftMF(*m_fields.get(FieldType::Bfield_fp, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells, + ::shiftMF(*m_fields.get(FieldType::Bfield_fp, Direction{dim}, lev), geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*m_fields.get(FieldType::Efield_fp, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells, + ::shiftMF(*m_fields.get(FieldType::Efield_fp, Direction{dim}, lev), geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev, m_p_ext_field_params->E_external_grid[dim], use_Eparser, Efield_parser); if (fft_do_time_averaging) { ablastr::fields::MultiLevelVectorField Efield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_fp, finest_level); ablastr::fields::MultiLevelVectorField Bfield_avg_fp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_fp, finest_level); - shiftMF(*Bfield_avg_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells, + ::shiftMF(*Bfield_avg_fp[lev][dim], geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*Efield_avg_fp[lev][dim], geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells, + ::shiftMF(*Efield_avg_fp[lev][dim], geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev, m_p_ext_field_params-> E_external_grid[dim], use_Eparser, Efield_parser); } if (move_j) { - shiftMF(*m_fields.get(FieldType::current_fp, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells); + ::shiftMF(*m_fields.get(FieldType::current_fp, Direction{dim}, lev), geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev); } if (pml[lev] && pml[lev]->ok()) { amrex::MultiFab* pml_B = m_fields.get(FieldType::pml_B_fp, Direction{dim}, lev); amrex::MultiFab* pml_E = m_fields.get(FieldType::pml_E_fp, Direction{dim}, lev); - shiftMF(*pml_B, geom[lev], num_shift, dir, lev, dont_update_cost, m_safe_guard_cells); - shiftMF(*pml_E, geom[lev], num_shift, dir, lev, dont_update_cost, m_safe_guard_cells); + ::shiftMF(*pml_B, geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, no_cost); + ::shiftMF(*pml_E, geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, no_cost); } #if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) + const bool PMLRZ_flag = getPMLRZ(); if (pml_rz[lev] && dim < 2) { amrex::MultiFab* pml_rz_B = m_fields.get(FieldType::pml_B_fp, Direction{dim}, lev); amrex::MultiFab* pml_rz_E = m_fields.get(FieldType::pml_E_fp, Direction{dim}, lev); - shiftMF(*pml_rz_B, geom[lev], num_shift, dir, lev, dont_update_cost, m_safe_guard_cells); - shiftMF(*pml_rz_E, geom[lev], num_shift, dir, lev, dont_update_cost, m_safe_guard_cells); + ::shiftMF(*pml_rz_B, geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, no_cost, 0.0_rt, false, amrex::ParserExecutor<3>{}, PMLRZ_flag); + ::shiftMF(*pml_rz_E, geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, no_cost, 0.0_rt, false, amrex::ParserExecutor<3>{}, PMLRZ_flag); } #endif if (lev > 0) { // coarse grid - shiftMF(*m_fields.get(FieldType::Bfield_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells, + ::shiftMF(*m_fields.get(FieldType::Bfield_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*m_fields.get(FieldType::Efield_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells, + ::shiftMF(*m_fields.get(FieldType::Efield_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev, m_p_ext_field_params->E_external_grid[dim], use_Eparser, Efield_parser); - shiftMF(*m_fields.get(FieldType::Bfield_aux, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells); - shiftMF(*m_fields.get(FieldType::Efield_aux, Direction{dim}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells); + ::shiftMF(*m_fields.get(FieldType::Bfield_aux, Direction{dim}, lev), geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev); + ::shiftMF(*m_fields.get(FieldType::Efield_aux, Direction{dim}, lev), geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev); if (fft_do_time_averaging) { ablastr::fields::MultiLevelVectorField Efield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Efield_avg_cp, finest_level, skip_lev0_coarse_patch); ablastr::fields::MultiLevelVectorField Bfield_avg_cp = m_fields.get_mr_levels_alldirs(FieldType::Bfield_avg_cp, finest_level, skip_lev0_coarse_patch); - shiftMF(*Bfield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells, + ::shiftMF(*Bfield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev, m_p_ext_field_params->B_external_grid[dim], use_Bparser, Bfield_parser); - shiftMF(*Efield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells, + ::shiftMF(*Efield_avg_cp[lev][dim], geom[lev-1], num_shift_crse, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev, m_p_ext_field_params->E_external_grid[dim], use_Eparser, Efield_parser); } if (move_j) { - shiftMF(*m_fields.get(FieldType::current_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells); + ::shiftMF(*m_fields.get(FieldType::current_cp, Direction{dim}, lev), geom[lev-1], num_shift_crse, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev); } if (do_pml && pml[lev]->ok()) { amrex::MultiFab* pml_B_cp = m_fields.get(FieldType::pml_B_cp, Direction{dim}, lev); amrex::MultiFab* pml_E_cp = m_fields.get(FieldType::pml_E_cp, Direction{dim}, lev); - shiftMF(*pml_B_cp, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost, m_safe_guard_cells); - shiftMF(*pml_E_cp, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost, m_safe_guard_cells); + ::shiftMF(*pml_B_cp, geom[lev-1], num_shift_crse, dir, m_safe_guard_cells, do_single_precision_comms, no_cost); + ::shiftMF(*pml_E_cp, geom[lev-1], num_shift_crse, dir, m_safe_guard_cells, do_single_precision_comms, no_cost); } } } @@ -302,11 +498,11 @@ WarpX::MoveWindow (const int step, bool move_j) if (m_fields.has(FieldType::F_fp, lev)) { // Fine grid - shiftMF(*m_fields.get(FieldType::F_fp, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells); + ::shiftMF(*m_fields.get(FieldType::F_fp, lev), geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev); if (lev > 0) { // Coarse grid - shiftMF(*m_fields.get(FieldType::F_cp, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells); + ::shiftMF(*m_fields.get(FieldType::F_cp, lev), geom[lev-1], num_shift_crse, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev); } } @@ -317,7 +513,7 @@ WarpX::MoveWindow (const int step, bool move_j) if (do_pml && pml[lev]->ok()) { amrex::MultiFab* pml_F = m_fields.get(FieldType::pml_F_fp, lev); - shiftMF(*pml_F, geom[lev], num_shift, dir, lev, dont_update_cost, m_safe_guard_cells); + ::shiftMF(*pml_F, geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, no_cost); } if (lev > 0) { @@ -325,7 +521,7 @@ WarpX::MoveWindow (const int step, bool move_j) if (do_pml && pml[lev]->ok()) { amrex::MultiFab* pml_F = m_fields.get(FieldType::pml_F_cp, lev); - shiftMF(*pml_F, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost, m_safe_guard_cells); + ::shiftMF(*pml_F, geom[lev-1], num_shift_crse, dir, m_safe_guard_cells, do_single_precision_comms, no_cost); } } } @@ -335,11 +531,11 @@ WarpX::MoveWindow (const int step, bool move_j) if (m_fields.has(FieldType::G_fp, lev)) { // Fine grid - shiftMF(*m_fields.get(FieldType::G_fp, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells); + ::shiftMF(*m_fields.get(FieldType::G_fp, lev), geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev); if (lev > 0) { // Coarse grid - shiftMF(*m_fields.get(FieldType::G_cp, lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells); + ::shiftMF(*m_fields.get(FieldType::G_cp, lev), geom[lev-1], num_shift_crse, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev); } } @@ -350,7 +546,7 @@ WarpX::MoveWindow (const int step, bool move_j) if (do_pml && pml[lev]->ok()) { amrex::MultiFab* pml_G = m_fields.get(FieldType::pml_G_fp, lev); - shiftMF(*pml_G, geom[lev], num_shift, dir, lev, dont_update_cost, m_safe_guard_cells); + ::shiftMF(*pml_G, geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, no_cost); } if (lev > 0) { @@ -358,7 +554,7 @@ WarpX::MoveWindow (const int step, bool move_j) if (do_pml && pml[lev]->ok()) { amrex::MultiFab* pml_G = m_fields.get(FieldType::pml_G_cp, lev); - shiftMF(*pml_G, geom[lev-1], num_shift_crse, dir, lev, dont_update_cost, m_safe_guard_cells); + ::shiftMF(*pml_G, geom[lev-1], num_shift_crse, dir, m_safe_guard_cells, do_single_precision_comms, no_cost); } } } @@ -367,10 +563,10 @@ WarpX::MoveWindow (const int step, bool move_j) if (move_j) { if (m_fields.has(FieldType::rho_fp, lev)) { // Fine grid - shiftMF(*m_fields.get(FieldType::rho_fp,lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells); + ::shiftMF(*m_fields.get(FieldType::rho_fp,lev), geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev); if (lev > 0){ // Coarse grid - shiftMF(*m_fields.get(FieldType::rho_cp,lev), geom[lev-1], num_shift_crse, dir, lev, do_update_cost, m_safe_guard_cells); + ::shiftMF(*m_fields.get(FieldType::rho_cp,lev), geom[lev-1], num_shift_crse, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev); } } } @@ -380,10 +576,10 @@ WarpX::MoveWindow (const int step, bool move_j) const int n_fluid_species = myfl->nSpecies(); for (int i=0; iGetFluidContainer(i); - shiftMF( *m_fields.get(fl.name_mf_N, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells ); - shiftMF( *m_fields.get(fl.name_mf_NU, Direction{0}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells ); - shiftMF( *m_fields.get(fl.name_mf_NU, Direction{1}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells ); - shiftMF( *m_fields.get(fl.name_mf_NU, Direction{2}, lev), geom[lev], num_shift, dir, lev, do_update_cost, m_safe_guard_cells ); + ::shiftMF( *m_fields.get(fl.name_mf_N, lev), geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev ); + ::shiftMF( *m_fields.get(fl.name_mf_NU, Direction{0}, lev), geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev ); + ::shiftMF( *m_fields.get(fl.name_mf_NU, Direction{1}, lev), geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev ); + ::shiftMF( *m_fields.get(fl.name_mf_NU, Direction{2}, lev), geom[lev], num_shift, dir, m_safe_guard_cells, do_single_precision_comms, cost_lev ); } } } @@ -477,179 +673,6 @@ WarpX::MoveWindow (const int step, bool move_j) return num_shift_base; } -void -WarpX::shiftMF (amrex::MultiFab& mf, const amrex::Geometry& geom, - int num_shift, int dir, const int lev, bool update_cost_flag, - const bool safe_guard_cells, - amrex::Real external_field, bool useparser, - amrex::ParserExecutor<3> const& field_parser) -{ - using namespace amrex::literals; - WARPX_PROFILE("WarpX::shiftMF()"); - const amrex::BoxArray& ba = mf.boxArray(); - const amrex::DistributionMapping& dm = mf.DistributionMap(); - const int nc = mf.nComp(); - const amrex::IntVect& ng = mf.nGrowVect(); - - AMREX_ALWAYS_ASSERT(ng[dir] >= num_shift); - - amrex::MultiFab tmpmf(ba, dm, nc, ng); - amrex::MultiFab::Copy(tmpmf, mf, 0, 0, nc, ng); - - if ( safe_guard_cells ) { - // Fill guard cells. - ablastr::utils::communication::FillBoundary(tmpmf, WarpX::do_single_precision_comms, geom.periodicity()); - } else { - amrex::IntVect ng_mw = amrex::IntVect::TheUnitVector(); - // Enough guard cells in the MW direction - ng_mw[dir] = num_shift; - // Make sure we don't exceed number of guard cells allocated - ng_mw = ng_mw.min(ng); - // Fill guard cells. - ablastr::utils::communication::FillBoundary(tmpmf, ng_mw, WarpX::do_single_precision_comms, geom.periodicity()); - } - - // Make a box that covers the region that the window moved into - const amrex::IndexType& typ = ba.ixType(); - const amrex::Box& domainBox = geom.Domain(); - amrex::Box adjBox; - if (num_shift > 0) { - adjBox = adjCellHi(domainBox, dir, ng[dir]); - } else { - adjBox = adjCellLo(domainBox, dir, ng[dir]); - } - adjBox = amrex::convert(adjBox, typ); - - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - if (idim == dir and typ.nodeCentered(dir)) { - if (num_shift > 0) { - adjBox.growLo(idim, -1); - } else { - adjBox.growHi(idim, -1); - } - } else if (idim != dir) { - adjBox.growLo(idim, ng[idim]); - adjBox.growHi(idim, ng[idim]); - } - } - - amrex::IntVect shiftiv(0); - shiftiv[dir] = num_shift; - const amrex::Dim3 shift = shiftiv.dim3(); - - const amrex::RealBox& real_box = geom.ProbDomain(); - const auto dx = geom.CellSizeArray(); - - amrex::LayoutData* cost = WarpX::getCosts(lev); -#ifdef AMREX_USE_OMP -#pragma omp parallel if (Gpu::notInLaunchRegion()) -#endif - - for (amrex::MFIter mfi(tmpmf, TilingIfNotGPU()); mfi.isValid(); ++mfi ) - { - if (cost && WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) - { - amrex::Gpu::synchronize(); - } - auto wt = static_cast(amrex::second()); - - auto const& dstfab = mf.array(mfi); - auto const& srcfab = tmpmf.array(mfi); - - const amrex::Box& outbox = mfi.growntilebox() & adjBox; - - if (outbox.ok()) { - if (!useparser) { - AMREX_PARALLEL_FOR_4D ( outbox, nc, i, j, k, n, - { - srcfab(i,j,k,n) = external_field; - }) - } else { - // index type of the src mf - auto const& mf_IndexType = (tmpmf).ixType(); - amrex::IntVect mf_type(AMREX_D_DECL(0,0,0)); - for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { - mf_type[idim] = mf_IndexType.nodeCentered(idim); - } - - amrex::ParallelFor (outbox, nc, - [=] AMREX_GPU_DEVICE (int i, int j, int k, int n) noexcept - { - // Compute x,y,z co-ordinates based on index type of mf -#if defined(WARPX_DIM_1D_Z) - const amrex::Real x = 0.0_rt; - const amrex::Real y = 0.0_rt; - const amrex::Real fac_z = (1.0_rt - mf_type[0]) * dx[0]*0.5_rt; - const amrex::Real z = i*dx[0] + real_box.lo(0) + fac_z; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - const amrex::Real fac_x = (1.0_rt - mf_type[0]) * dx[0]*0.5_rt; - const amrex::Real x = i*dx[0] + real_box.lo(0) + fac_x; - const amrex::Real y = 0.0; - const amrex::Real fac_z = (1.0_rt - mf_type[1]) * dx[1]*0.5_rt; - const amrex::Real z = j*dx[1] + real_box.lo(1) + fac_z; -#else - const amrex::Real fac_x = (1.0_rt - mf_type[0]) * dx[0]*0.5_rt; - const amrex::Real x = i*dx[0] + real_box.lo(0) + fac_x; - const amrex::Real fac_y = (1.0_rt - mf_type[1]) * dx[1]*0.5_rt; - const amrex::Real y = j*dx[1] + real_box.lo(1) + fac_y; - const amrex::Real fac_z = (1.0_rt - mf_type[2]) * dx[2]*0.5_rt; - const amrex::Real z = k*dx[2] + real_box.lo(2) + fac_z; -#endif - srcfab(i,j,k,n) = field_parser(x,y,z); - }); - } - - } - - amrex::Box dstBox = mf[mfi].box(); - if (num_shift > 0) { - dstBox.growHi(dir, -num_shift); - } else { - dstBox.growLo(dir, num_shift); - } - AMREX_PARALLEL_FOR_4D ( dstBox, nc, i, j, k, n, - { - dstfab(i,j,k,n) = srcfab(i+shift.x,j+shift.y,k+shift.z,n); - }) - - if (cost && update_cost_flag && - WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) - { - amrex::Gpu::synchronize(); - wt = static_cast(amrex::second()) - wt; - amrex::HostDevice::Atomic::Add( &(*cost)[mfi.index()], wt); - } - } - -#if (defined WARPX_DIM_RZ) && (defined WARPX_USE_FFT) - if (WarpX::GetInstance().getPMLRZ()) { - // This does the exchange of data in the corner guard cells, the cells that are in the - // guard region both radially and longitudinally. These are the PML cells in the overlapping - // longitudinal region. FillBoundary normally does not update these cells. - // This update is needed so that the cells at the end of the FABs are updated appropriately - // with the data shifted from the neighboring FAB. Without this update, the RZ PML becomes - // unstable with the moving grid. - // This code creates a temporary MultiFab using a BoxList where the radial size of all of - // its boxes is increased so that the radial guard cells are included in the boxes valid domain. - // The temporary MultiFab is setup to refer to the data of the original Multifab (this can - // be done since the shape of the data is all the same, just the indexing is different). - amrex::BoxList bl; - const auto ba_size = static_cast(ba.size()); - for (int i = 0; i < ba_size; ++i) { - bl.push_back(amrex::grow(ba[i], 0, mf.nGrowVect()[0])); - } - const amrex::BoxArray rba(std::move(bl)); - amrex::MultiFab rmf(rba, dm, mf.nComp(), IntVect(0,mf.nGrowVect()[1]), MFInfo().SetAlloc(false)); - - for (amrex::MFIter mfi(mf); mfi.isValid(); ++mfi) { - rmf.setFab(mfi, FArrayBox(mf[mfi], amrex::make_alias, 0, mf.nComp())); - } - rmf.FillBoundary(false); - } -#endif - -} - void WarpX::ShiftGalileanBoundary () { diff --git a/Source/WarpX.H b/Source/WarpX.H index b12cb1ab7f0..7d164a9e685 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -166,12 +166,6 @@ public: amrex::Vector,3 > >& GetEBUpdateEFlag() { return m_eb_update_E; } amrex::Vector< std::unique_ptr > const & GetEBReduceParticleShapeFlag() const { return m_eb_reduce_particle_shape; } - static void shiftMF (amrex::MultiFab& mf, const amrex::Geometry& geom, - int num_shift, int dir, int lev, bool update_cost_flag, - bool safe_guard_cells, - amrex::Real external_field=0.0, bool useparser = false, - amrex::ParserExecutor<3> const& field_parser={}); - /** * \brief * If an authors' string is specified in the inputfile, this method returns that string. From 2cc4fd2c3ad5be96e1aa5811d72a8e1018d925c4 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 11 Feb 2025 00:50:59 -0800 Subject: [PATCH 230/278] AMReX/pyAMReX/PICSAR: Weekly Update (#5655) Weekly update to latest AMReX. Weekly update to latest pyAMReX. Weekly update to latest PICSAR (no changes). ```console ./Tools/Release/updateAMReX.py ./Tools/Release/updatepyAMReX.py ./Tools/Release/updatePICSAR.py ``` --------- Signed-off-by: Axel Huebl Co-authored-by: Weiqun Zhang --- .github/workflows/cuda.yml | 2 +- Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp | 2 +- cmake/dependencies/AMReX.cmake | 2 +- cmake/dependencies/pyAMReX.cmake | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 0943de41e55..6e87134904f 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -127,7 +127,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 78bdf0faabc4101d5333ebb421e553efcc7ec04e && cd - + cd ../amrex && git checkout --detach 198da4879a63f1bc8c4e8d674bf9185525318f61 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp index 74f9b308acd..31d8136e175 100755 --- a/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp +++ b/Source/ablastr/fields/IntegratedGreenFunctionSolver.cpp @@ -59,7 +59,7 @@ computePhiIGF ( amrex::MultiFab const & rho, } if (!obc_solver || obc_solver->Domain() != domain) { amrex::FFT::Info info{}; - if (is_igf_2d_slices) { info.setBatchMode(true); } // do 2D FFTs + if (is_igf_2d_slices) { info.setTwoDMode(true); } // do 2D FFTs info.setNumProcs(nprocs); obc_solver = std::make_unique>(domain, info); } diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 5136cb8f2f4..7f5546a931b 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -294,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "78bdf0faabc4101d5333ebb421e553efcc7ec04e" +set(WarpX_amrex_branch "198da4879a63f1bc8c4e8d674bf9185525318f61" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index b716e883be9..be7c64acd69 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "006bf94a4c68466fac8a1281750391b5a6083d82" +set(WarpX_pyamrex_branch "3088ea12a1a6287246bf027c4235f10e92472450" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") From 7c9f8f2e0c401e61b91842832319553015a1d7fc Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 14 Feb 2025 01:17:27 +0100 Subject: [PATCH 231/278] Move several EB-related methods out of the WarpX class (#5630) This PR transforms the WarpX member functions `MarkReducedShapeCells`, `MarkUpdateCellsStairCase`, `MarkUpdateECellsECT`, `MarkUpdateBCellsECT`, `MarkExtensionCells` into pure functions inside the namespace `warpx::embedded_boundary`, together with `ComputeEdgeLengths`, `ComputeFaceAreas`, `ScaleEdges`, and `ScaleAreas`. The source files containing these functions are renamed as `EmbeddedBoundaryInit.H/cpp` , since these functions are called only during the initialization. --- Source/BoundaryConditions/PML.cpp | 2 +- Source/EmbeddedBoundary/CMakeLists.txt | 2 +- Source/EmbeddedBoundary/EmbeddedBoundary.H | 55 -- Source/EmbeddedBoundary/EmbeddedBoundary.cpp | 200 ------ .../EmbeddedBoundary/EmbeddedBoundaryInit.H | 141 ++++ .../EmbeddedBoundary/EmbeddedBoundaryInit.cpp | 614 ++++++++++++++++++ Source/EmbeddedBoundary/Make.package | 4 +- Source/EmbeddedBoundary/WarpXInitEB.cpp | 434 +------------ Source/Initialization/WarpXInitData.cpp | 19 +- Source/WarpX.H | 79 --- 10 files changed, 782 insertions(+), 768 deletions(-) delete mode 100644 Source/EmbeddedBoundary/EmbeddedBoundary.H delete mode 100644 Source/EmbeddedBoundary/EmbeddedBoundary.cpp create mode 100644 Source/EmbeddedBoundary/EmbeddedBoundaryInit.H create mode 100644 Source/EmbeddedBoundary/EmbeddedBoundaryInit.cpp diff --git a/Source/BoundaryConditions/PML.cpp b/Source/BoundaryConditions/PML.cpp index 1b66195d163..195642ade2c 100644 --- a/Source/BoundaryConditions/PML.cpp +++ b/Source/BoundaryConditions/PML.cpp @@ -12,7 +12,7 @@ #include "BoundaryConditions/PMLComponent.H" #include "Fields.H" #ifdef AMREX_USE_EB -# include "EmbeddedBoundary/EmbeddedBoundary.H" +# include "EmbeddedBoundary/EmbeddedBoundaryInit.H" #endif #ifdef WARPX_USE_FFT # include "FieldSolver/SpectralSolver/SpectralFieldData.H" diff --git a/Source/EmbeddedBoundary/CMakeLists.txt b/Source/EmbeddedBoundary/CMakeLists.txt index 75f9bbdaa04..909886bbad6 100644 --- a/Source/EmbeddedBoundary/CMakeLists.txt +++ b/Source/EmbeddedBoundary/CMakeLists.txt @@ -2,7 +2,7 @@ foreach(D IN LISTS WarpX_DIMS) warpx_set_suffix_dims(SD ${D}) target_sources(lib_${SD} PRIVATE - EmbeddedBoundary.cpp + EmbeddedBoundaryInit.cpp Enabled.cpp WarpXInitEB.cpp WarpXFaceExtensions.cpp diff --git a/Source/EmbeddedBoundary/EmbeddedBoundary.H b/Source/EmbeddedBoundary/EmbeddedBoundary.H deleted file mode 100644 index fc02667246b..00000000000 --- a/Source/EmbeddedBoundary/EmbeddedBoundary.H +++ /dev/null @@ -1,55 +0,0 @@ -/* Copyright 2021-2025 Lorenzo Giacomel, Luca Fedeli - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ - -#ifndef WARPX_EMBEDDED_BOUNDARY_EMBEDDED_BOUNDARY_H_ -#define WARPX_EMBEDDED_BOUNDARY_EMBEDDED_BOUNDARY_H_ - -#include "Enabled.H" - -#ifdef AMREX_USE_EB - -#include - -#include -#include - -#include - -namespace warpx::embedded_boundary -{ - /** - * \brief Compute the length of the mesh edges. Here the length is a value in [0, 1]. - * An edge of length 0 is fully covered. - */ - void ComputeEdgeLengths ( - ablastr::fields::VectorField& edge_lengths, - const amrex::EBFArrayBoxFactory& eb_fact); - /** - * \brief Compute the area of the mesh faces. Here the area is a value in [0, 1]. - * An edge of area 0 is fully covered. - */ - void ComputeFaceAreas ( - ablastr::fields::VectorField& face_areas, - const amrex::EBFArrayBoxFactory& eb_fact); - - /** - * \brief Scale the edges lengths by the mesh width to obtain the real lengths. - */ - void ScaleEdges ( - ablastr::fields::VectorField& edge_lengths, - const std::array& cell_size); - /** - * \brief Scale the edges areas by the mesh width to obtain the real areas. - */ - void ScaleAreas ( - ablastr::fields::VectorField& face_areas, - const std::array& cell_size); -} - -#endif - -#endif //WARPX_EMBEDDED_BOUNDARY_EMBEDDED_BOUNDARY_H_ diff --git a/Source/EmbeddedBoundary/EmbeddedBoundary.cpp b/Source/EmbeddedBoundary/EmbeddedBoundary.cpp deleted file mode 100644 index 9c3d53aefeb..00000000000 --- a/Source/EmbeddedBoundary/EmbeddedBoundary.cpp +++ /dev/null @@ -1,200 +0,0 @@ -/* Copyright 2021-2025 Lorenzo Giacomel, Luca Fedeli - * - * This file is part of WarpX. - * - * License: BSD-3-Clause-LBNL - */ - -#include "Enabled.H" - -#ifdef AMREX_USE_EB - -#include "EmbeddedBoundary.H" - -#include "Utils/TextMsg.H" - -#include -#include -#include -#include -#include -#include - -namespace web = warpx::embedded_boundary; - -void -web::ComputeEdgeLengths ( - ablastr::fields::VectorField& edge_lengths, - const amrex::EBFArrayBoxFactory& eb_fact) -{ - BL_PROFILE("ComputeEdgeLengths"); - -#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) - WARPX_ABORT_WITH_MESSAGE("ComputeEdgeLengths only implemented in 2D and 3D"); -#endif - - auto const &flags = eb_fact.getMultiEBCellFlagFab(); - auto const &edge_centroid = eb_fact.getEdgeCent(); - for (int idim = 0; idim < 3; ++idim){ -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - if (idim == 1) { - edge_lengths[1]->setVal(0.); - continue; - } -#endif - for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi){ - amrex::Box const box = mfi.tilebox(edge_lengths[idim]->ixType().toIntVect(), - edge_lengths[idim]->nGrowVect()); - amrex::FabType const fab_type = flags[mfi].getType(box); - auto const &edge_lengths_dim = edge_lengths[idim]->array(mfi); - - if (fab_type == amrex::FabType::regular) { - // every cell in box is all regular - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - edge_lengths_dim(i, j, k) = 1.; - }); - } else if (fab_type == amrex::FabType::covered) { - // every cell in box is all covered - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - edge_lengths_dim(i, j, k) = 0.; - }); - } else { -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - int idim_amrex = idim; - if (idim == 2) { idim_amrex = 1; } - auto const &edge_cent = edge_centroid[idim_amrex]->const_array(mfi); -#elif defined(WARPX_DIM_3D) - auto const &edge_cent = edge_centroid[idim]->const_array(mfi); -#endif - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - if (edge_cent(i, j, k) == amrex::Real(-1.0)) { - // This edge is all covered - edge_lengths_dim(i, j, k) = 0.; - } else if (edge_cent(i, j, k) == amrex::Real(1.0)) { - // This edge is all open - edge_lengths_dim(i, j, k) = 1.; - } else { - // This edge is cut. - edge_lengths_dim(i, j, k) = 1 - amrex::Math::abs(amrex::Real(2.0) - * edge_cent(i, j, k)); - } - - }); - } - } - } -} - - -void -web::ComputeFaceAreas ( - ablastr::fields::VectorField& face_areas, - const amrex::EBFArrayBoxFactory& eb_fact) -{ - BL_PROFILE("ComputeFaceAreas"); - -#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) - WARPX_ABORT_WITH_MESSAGE("ComputeFaceAreas only implemented in 2D and 3D"); -#endif - - auto const &flags = eb_fact.getMultiEBCellFlagFab(); -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - //In 2D the volume frac is actually the area frac. - auto const &area_frac = eb_fact.getVolFrac(); -#elif defined(WARPX_DIM_3D) - auto const &area_frac = eb_fact.getAreaFrac(); -#endif - - for (int idim = 0; idim < 3; ++idim) { -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - if (idim == 0 || idim == 2) { - face_areas[idim]->setVal(0.); - continue; - } -#endif - for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi) { - amrex::Box const box = mfi.tilebox(face_areas[idim]->ixType().toIntVect(), - face_areas[idim]->nGrowVect()); - amrex::FabType const fab_type = flags[mfi].getType(box); - auto const &face_areas_dim = face_areas[idim]->array(mfi); - if (fab_type == amrex::FabType::regular) { - // every cell in box is all regular - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - face_areas_dim(i, j, k) = amrex::Real(1.); - }); - } else if (fab_type == amrex::FabType::covered) { - // every cell in box is all covered - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - face_areas_dim(i, j, k) = amrex::Real(0.); - }); - } else { -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - auto const &face = area_frac.const_array(mfi); -#elif defined(WARPX_DIM_3D) - auto const &face = area_frac[idim]->const_array(mfi); -#endif - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - face_areas_dim(i, j, k) = face(i, j, k); - }); - } - } - } -} - -void -web::ScaleEdges ( - ablastr::fields::VectorField& edge_lengths, - const std::array& cell_size) -{ - BL_PROFILE("ScaleEdges"); - -#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) - WARPX_ABORT_WITH_MESSAGE("ScaleEdges only implemented in 2D and 3D"); -#endif - - for (int idim = 0; idim < 3; ++idim){ -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - if (idim == 1) { continue; } -#endif - for (amrex::MFIter mfi(*edge_lengths[0]); mfi.isValid(); ++mfi) { - const amrex::Box& box = mfi.tilebox(edge_lengths[idim]->ixType().toIntVect(), - edge_lengths[idim]->nGrowVect() ); - auto const &edge_lengths_dim = edge_lengths[idim]->array(mfi); - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - edge_lengths_dim(i, j, k) *= cell_size[idim]; - }); - } - } -} - - -void -web::ScaleAreas ( - ablastr::fields::VectorField& face_areas, - const std::array& cell_size) -{ - BL_PROFILE("ScaleAreas"); - -#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) - WARPX_ABORT_WITH_MESSAGE("ScaleAreas only implemented in 2D and 3D"); -#endif - - for (int idim = 0; idim < 3; ++idim) { -#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - if (idim == 0 || idim == 2) { continue; } -#endif - for (amrex::MFIter mfi(*face_areas[0]); mfi.isValid(); ++mfi) { - const amrex::Box& box = mfi.tilebox(face_areas[idim]->ixType().toIntVect(), - face_areas[idim]->nGrowVect() ); - amrex::Real const full_area = cell_size[(idim+1)%3]*cell_size[(idim+2)%3]; - auto const &face_areas_dim = face_areas[idim]->array(mfi); - - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - face_areas_dim(i, j, k) *= full_area; - }); - - } - } -} - -#endif diff --git a/Source/EmbeddedBoundary/EmbeddedBoundaryInit.H b/Source/EmbeddedBoundary/EmbeddedBoundaryInit.H new file mode 100644 index 00000000000..ed29fe5b688 --- /dev/null +++ b/Source/EmbeddedBoundary/EmbeddedBoundaryInit.H @@ -0,0 +1,141 @@ +/* Copyright 2021-2025 Lorenzo Giacomel, Luca Fedeli + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_EMBEDDED_BOUNDARY_EMBEDDED_BOUNDARY_INIT_H_ +#define WARPX_EMBEDDED_BOUNDARY_EMBEDDED_BOUNDARY_INIT_H_ + +#include "Enabled.H" + +#ifdef AMREX_USE_EB + +#include + +#include +#include +#include +#include + +#include + +namespace warpx::embedded_boundary +{ + + /** \brief Set a flag to indicate in which cells a particle should deposit charge/current + * with a reduced, order 1 shape. + * + * More specifically, the flag is set to 1 if any of the neighboring cells over which the + * particle shape might extend are either partially or fully covered by an embedded boundary. + * This ensures that a particle in this cell deposits with an order 1 shape, which in turn + * makes sure that the particle never deposits any charge in a partially or fully covered cell. + * + * \param[in] eb_reduce_particle_shape multifab to be filled with 1s and 0s + * \param[in] eb_fact EB factory + * \param[in] particle_shape_order order of the particle shape function + * \param[in] periodicity TODO Geom(0).periodicity() + */ + void MarkReducedShapeCells ( + std::unique_ptr & eb_reduce_particle_shape, + amrex::EBFArrayBoxFactory const & eb_fact, + int particle_shape_order, + const amrex::Periodicity& periodicity); + + /** \brief Set a flag to indicate on which grid points the field `field` + * should be updated, depending on their position relative to the embedded boundary. + * + * This function is used by all finite-difference solvers, except the + * ECT solver, which instead uses `MarkUpdateECellsECT` and `MarkUpdateBCellsECT`. + * It uses a stair-case approximation of the embedded boundary: + * If a grid point touches cells that are either partially or fully covered + * by the embedded boundary: the corresponding field is not updated. + * + * More specifically, this function fills the iMultiFabs in `eb_update` + * (which have the same indexType as the MultiFabs in `field`) with 1 + * or 0, depending on whether the grid point should be updated or not. + */ + void MarkUpdateCellsStairCase ( + std::array< std::unique_ptr,3> & eb_update, + ablastr::fields::VectorField const & field, + amrex::EBFArrayBoxFactory const & eb_fact ); + + /** \brief Set a flag to indicate on which grid points the E field + * should be updated, depending on their position relative to the embedded boundary. + * + * This function is used by ECT solver. The E field is not updated if + * the edge on which it is defined is fully covered by the embedded boundary. + * + * More specifically, this function fills the iMultiFabs in `eb_update_E` + * (which have the same indexType as the E field) with 1 or 0, depending + * on whether the grid point should be updated or not. + */ + void MarkUpdateECellsECT ( + std::array< std::unique_ptr,3> & eb_update_E, + ablastr::fields::VectorField const& edge_lengths ); + + /** \brief Set a flag to indicate on which grid points the B field + * should be updated, depending on their position relative to the embedded boundary. + * + * This function is used by ECT solver. The B field is not updated if + * the face on which it is defined is fully covered by the embedded boundary. + * + * More specifically, this function fills the iMultiFabs in `eb_update_B` + * (which have the same indexType as the B field) with 1 or 0, depending + * on whether the grid point should be updated or not. + */ + void MarkUpdateBCellsECT ( + std::array< std::unique_ptr,3> & eb_update_B, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& edge_lengths ); + + /** + * \brief Initialize information for cell extensions. + * The flags convention for m_flag_info_face is as follows + * - 0 for unstable cells + * - 1 for stable cells which have not been intruded + * - 2 for stable cells which have been intruded + * Here we cannot know if a cell is intruded or not so we initialize all stable cells with 1 + */ + void MarkExtensionCells( + const std::array& cell_size, + std::array< std::unique_ptr, 3 > & flag_info_face, + std::array< std::unique_ptr, 3 > & flag_ext_face, + const ablastr::fields::VectorField& b_field, + const ablastr::fields::VectorField& face_areas, + const ablastr::fields::VectorField& edge_lengths, + const ablastr::fields::VectorField& area_mod); + + /** + * \brief Compute the length of the mesh edges. Here the length is a value in [0, 1]. + * An edge of length 0 is fully covered. + */ + void ComputeEdgeLengths ( + ablastr::fields::VectorField& edge_lengths, + const amrex::EBFArrayBoxFactory& eb_fact); + /** + * \brief Compute the area of the mesh faces. Here the area is a value in [0, 1]. + * An edge of area 0 is fully covered. + */ + void ComputeFaceAreas ( + ablastr::fields::VectorField& face_areas, + const amrex::EBFArrayBoxFactory& eb_fact); + + /** + * \brief Scale the edges lengths by the mesh width to obtain the real lengths. + */ + void ScaleEdges ( + ablastr::fields::VectorField& edge_lengths, + const std::array& cell_size); + /** + * \brief Scale the edges areas by the mesh width to obtain the real areas. + */ + void ScaleAreas ( + ablastr::fields::VectorField& face_areas, + const std::array& cell_size); +} + +#endif + +#endif //WARPX_EMBEDDED_BOUNDARY_EMBEDDED_BOUNDARY_H_ diff --git a/Source/EmbeddedBoundary/EmbeddedBoundaryInit.cpp b/Source/EmbeddedBoundary/EmbeddedBoundaryInit.cpp new file mode 100644 index 00000000000..6a4caec2e99 --- /dev/null +++ b/Source/EmbeddedBoundary/EmbeddedBoundaryInit.cpp @@ -0,0 +1,614 @@ +/* Copyright 2021-2025 Lorenzo Giacomel, Luca Fedeli + * + * This file is part of WarpX. + * + * License: BSD-3-Clause-LBNL + */ + +#include "Enabled.H" + +#ifdef AMREX_USE_EB + +#include "EmbeddedBoundaryInit.H" + +#include "Fields.H" +#include "Utils/TextMsg.H" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace web = warpx::embedded_boundary; + +void +web::MarkReducedShapeCells ( + std::unique_ptr & eb_reduce_particle_shape, + amrex::EBFArrayBoxFactory const & eb_fact, + int const particle_shape_order, + const amrex::Periodicity& periodicity) +{ + // Pre-fill array with 0, including in the ghost cells outside of the domain. + // (The guard cells in the domain will be updated by `FillBoundary` at the end of this function.) + eb_reduce_particle_shape->setVal(0, eb_reduce_particle_shape->nGrow()); + + // Extract structures for embedded boundaries + amrex::FabArray const& eb_flag = eb_fact.getMultiEBCellFlagFab(); + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for (amrex::MFIter mfi(*eb_reduce_particle_shape); mfi.isValid(); ++mfi) { + + const amrex::Box& box = mfi.tilebox(); + amrex::Array4 const & eb_reduce_particle_shape_arr = eb_reduce_particle_shape->array(mfi); + + // Check if the box (including one layer of guard cells) contains a mix of covered and regular cells + const amrex::Box eb_info_box = mfi.tilebox(amrex::IntVect::TheCellVector()).grow(1); + amrex::FabType const fab_type = eb_flag[mfi].getType( eb_info_box ); + + if (fab_type == amrex::FabType::regular) { // All cells in the box are regular + + // Every cell in box is regular: do not reduce particle shape in any cell + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + eb_reduce_particle_shape_arr(i, j, k) = 0; + }); + + } else if (fab_type == amrex::FabType::covered) { // All cells in the box are covered + + // Every cell in box is fully covered: reduce particle shape + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + eb_reduce_particle_shape_arr(i, j, k) = 1; + }); + + } else { // The box contains a mix of covered and regular cells + + auto const & flag = eb_flag[mfi].array(); + + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + + // Check if any of the neighboring cells over which the particle shape might extend + // are either partially or fully covered. In this case, set eb_reduce_particle_shape_arr + // to one for this cell, to indicate that the particle should use an order 1 shape + // (This ensures that the particle never deposits any charge in a partially or + // fully covered cell, even with higher-order shapes) + // Note: in the code below `particle_shape_order/2` corresponds to the number of neighboring cells + // over which the shape factor could extend, in each direction. + int const i_start = i-particle_shape_order/2; + int const i_end = i+particle_shape_order/2; +#if AMREX_SPACEDIM > 1 + int const j_start = j-particle_shape_order/2; + int const j_end = j+particle_shape_order/2; +#else + int const j_start = j; + int const j_end = j; +#endif +#if AMREX_SPACEDIM > 2 + int const k_start = k-particle_shape_order/2; + int const k_end = k+particle_shape_order/2; +#else + int const k_start = k; + int const k_end = k; +#endif + int reduce_shape = 0; + for (int i_cell = i_start; i_cell <= i_end; ++i_cell) { + for (int j_cell = j_start; j_cell <= j_end; ++j_cell) { + for (int k_cell = k_start; k_cell <= k_end; ++k_cell) { + // `isRegular` returns `false` if the cell is either partially or fully covered. + if ( !flag(i_cell, j_cell, k_cell).isRegular() ) { + reduce_shape = 1; + } + } + } + } + eb_reduce_particle_shape_arr(i, j, k) = reduce_shape; + }); + + } + + } + // FillBoundary to set the values in the guard cells + eb_reduce_particle_shape->FillBoundary(periodicity); +} + +void +web::MarkUpdateCellsStairCase ( + std::array< std::unique_ptr,3> & eb_update, + ablastr::fields::VectorField const& field, + amrex::EBFArrayBoxFactory const & eb_fact ) +{ + + using ablastr::fields::Direction; + using warpx::fields::FieldType; + + // Extract structures for embedded boundaries + amrex::FabArray const& eb_flag = eb_fact.getMultiEBCellFlagFab(); + + for (int idim = 0; idim < 3; ++idim) { + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for (amrex::MFIter mfi(*field[idim]); mfi.isValid(); ++mfi) { + + const amrex::Box& box = mfi.tilebox(); + amrex::Array4 const & eb_update_arr = eb_update[idim]->array(mfi); + + // Check if the box (including one layer of guard cells) contains a mix of covered and regular cells + const amrex::Box eb_info_box = mfi.tilebox(amrex::IntVect::TheCellVector()).grow(1); + amrex::FabType const fab_type = eb_flag[mfi].getType( eb_info_box ); + + if (fab_type == amrex::FabType::regular) { // All cells in the box are regular + + // Every cell in box is regular: update field in every cell + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + eb_update_arr(i, j, k) = 1; + }); + + } else if (fab_type == amrex::FabType::covered) { // All cells in the box are covered + + // Every cell in box is fully covered: do not update field + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + eb_update_arr(i, j, k) = 0; + }); + + } else { // The box contains a mix of covered and regular cells + + auto const & flag = eb_flag[mfi].array(); + auto index_type = field[idim]->ixType(); + + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + + // Stair-case approximation: If neighboring cells of this gridpoint + // are either partially or fully covered: do not update field + + // The number of cells that we need to check depend on the index type + // of the `eb_update_arr` in each direction. + // If `eb_update_arr` is nodal in a given direction, we need to check the cells + // to the left and right of this nodal gridpoint. + // For instance, if `eb_update_arr` is nodal in the first dimension, we need + // to check the cells at index i-1 and at index i, since, with AMReX indexing conventions, + // these are the neighboring cells for the nodal gripoint at index i. + // If `eb_update_arr` is cell-centerd in a given direction, we only need to check + // the cell at the same position (e.g., in the first dimension: the cell at index i). + int const i_start = ( index_type.nodeCentered(0) )? i-1 : i; +#if AMREX_SPACEDIM > 1 + int const j_start = ( index_type.nodeCentered(1) )? j-1 : j; +#else + int const j_start = j; +#endif +#if AMREX_SPACEDIM > 2 + int const k_start = ( index_type.nodeCentered(2) )? k-1 : k; +#else + int const k_start = k; +#endif + // Loop over neighboring cells + int eb_update_flag = 1; + for (int i_cell = i_start; i_cell <= i; ++i_cell) { + for (int j_cell = j_start; j_cell <= j; ++j_cell) { + for (int k_cell = k_start; k_cell <= k; ++k_cell) { + // If one of the neighboring is either partially or fully covered + // (i.e. if they are not regular cells), do not update field + // (`isRegular` returns `false` if the cell is either partially or fully covered.) + if ( !flag(i_cell, j_cell, k_cell).isRegular() ) { + eb_update_flag = 0; + } + } + } + } + eb_update_arr(i, j, k) = eb_update_flag; + }); + + } + + } + + } + +} + +void +web::MarkUpdateECellsECT ( + std::array< std::unique_ptr,3> & eb_update_E, + ablastr::fields::VectorField const& edge_lengths ) +{ + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( amrex::MFIter mfi(*eb_update_E[0], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { + + const amrex::Box& tbx = mfi.tilebox( eb_update_E[0]->ixType().toIntVect(), eb_update_E[0]->nGrowVect() ); + const amrex::Box& tby = mfi.tilebox( eb_update_E[1]->ixType().toIntVect(), eb_update_E[1]->nGrowVect() ); + const amrex::Box& tbz = mfi.tilebox( eb_update_E[2]->ixType().toIntVect(), eb_update_E[2]->nGrowVect() ); + + amrex::Array4 const & eb_update_Ex_arr = eb_update_E[0]->array(mfi); + amrex::Array4 const & eb_update_Ey_arr = eb_update_E[1]->array(mfi); + amrex::Array4 const & eb_update_Ez_arr = eb_update_E[2]->array(mfi); + + amrex::Array4 const & lx_arr = edge_lengths[0]->array(mfi); + amrex::Array4 const & lz_arr = edge_lengths[2]->array(mfi); +#if defined(WARPX_DIM_3D) + amrex::Array4 const & ly_arr = edge_lengths[1]->array(mfi); +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::Dim3 const lx_lo = amrex::lbound(lx_arr); + amrex::Dim3 const lx_hi = amrex::ubound(lx_arr); + amrex::Dim3 const lz_lo = amrex::lbound(lz_arr); + amrex::Dim3 const lz_hi = amrex::ubound(lz_arr); +#endif + + amrex::ParallelFor (tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + // Do not update Ex if the edge on which it lives is fully covered + eb_update_Ex_arr(i, j, k) = (lx_arr(i, j, k) == 0)? 0 : 1; + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { +#ifdef WARPX_DIM_3D + // In 3D: Do not update Ey if the edge on which it lives is fully covered + eb_update_Ey_arr(i, j, k) = (ly_arr(i, j, k) == 0)? 0 : 1; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + // In XZ and RZ: Ey is associated with a mesh node, + // so we need to check if the mesh node is covered + if((lx_arr(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)==0) + ||(lx_arr(std::max(i-1, lx_lo.x), std::min(j , lx_hi.y), k)==0) + ||(lz_arr(std::min(i , lz_hi.x), std::min(j , lz_hi.y), k)==0) + ||(lz_arr(std::min(i , lz_hi.x), std::max(j-1, lz_lo.y), k)==0)) { + eb_update_Ey_arr(i, j, k) = 0; + } else { + eb_update_Ey_arr(i, j, k) = 1; + } +#endif + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + // Do not update Ez if the edge on which it lives is fully covered + eb_update_Ez_arr(i, j, k) = (lz_arr(i, j, k) == 0)? 0 : 1; + } + ); + + } +} + +void +web::MarkUpdateBCellsECT ( + std::array< std::unique_ptr,3> & eb_update_B, + ablastr::fields::VectorField const& face_areas, + ablastr::fields::VectorField const& edge_lengths ) +{ + +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( amrex::MFIter mfi(*eb_update_B[0], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { + + const amrex::Box& tbx = mfi.tilebox( eb_update_B[0]->ixType().toIntVect(), eb_update_B[0]->nGrowVect() ); + const amrex::Box& tby = mfi.tilebox( eb_update_B[1]->ixType().toIntVect(), eb_update_B[1]->nGrowVect() ); + const amrex::Box& tbz = mfi.tilebox( eb_update_B[2]->ixType().toIntVect(), eb_update_B[2]->nGrowVect() ); + + amrex::Array4 const & eb_update_Bx_arr = eb_update_B[0]->array(mfi); + amrex::Array4 const & eb_update_By_arr = eb_update_B[1]->array(mfi); + amrex::Array4 const & eb_update_Bz_arr = eb_update_B[2]->array(mfi); + +#ifdef WARPX_DIM_3D + amrex::Array4 const & Sx_arr = face_areas[0]->array(mfi); + amrex::Array4 const & Sy_arr = face_areas[1]->array(mfi); + amrex::Array4 const & Sz_arr = face_areas[2]->array(mfi); + amrex::ignore_unused(edge_lengths); +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + amrex::Array4 const & Sy_arr = face_areas[1]->array(mfi); + amrex::Array4 const & lx_arr = edge_lengths[0]->array(mfi); + amrex::Array4 const & lz_arr = edge_lengths[2]->array(mfi); +#endif + amrex::ParallelFor (tbx, tby, tbz, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { +#ifdef WARPX_DIM_3D + // In 3D: do not update Bx if the face on which it lives is fully covered + eb_update_Bx_arr(i, j, k) = (Sx_arr(i, j, k) == 0)? 0 : 1; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + //In XZ and RZ, Bx lives on a z-edge ; do not update if fully covered + eb_update_Bx_arr(i, j, k) = (lz_arr(i, j, k) == 0)? 0 : 1; +#endif + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { + // Do not update By if the face on which it lives is fully covered + eb_update_By_arr(i, j, k) = (Sy_arr(i, j, k) == 0)? 0 : 1; + }, + [=] AMREX_GPU_DEVICE (int i, int j, int k) { +#ifdef WARPX_DIM_3D + // In 3D: do not update Bz if the face on which it lives is fully covered + eb_update_Bz_arr(i, j, k) = (Sz_arr(i, j, k) == 0)? 0 : 1; +#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + //In XZ and RZ, Bz lives on a x-edge ; do not update if fully covered + eb_update_Bz_arr(i, j, k) = (lx_arr(i, j, k) == 0)? 0 : 1; +#endif + } + ); + + } +} + +void +web::MarkExtensionCells ( + const std::array& cell_size, + std::array< std::unique_ptr, 3 > & flag_info_face, + std::array< std::unique_ptr, 3 > & flag_ext_face, + const ablastr::fields::VectorField& b_field, + const ablastr::fields::VectorField& face_areas, + const ablastr::fields::VectorField& edge_lengths, + const ablastr::fields::VectorField& area_mod) +{ + using ablastr::fields::Direction; + using warpx::fields::FieldType; + +#ifdef WARPX_DIM_RZ + amrex::ignore_unused(cell_size, flag_info_face, flag_ext_face, b_field, + face_areas, edge_lengths, area_mod); + +#elif !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) + + WARPX_ABORT_WITH_MESSAGE("MarkExtensionCells only implemented in 2D and 3D"); + +#else + + for (int idim = 0; idim < 3; ++idim) { + +# if defined(WARPX_DIM_XZ) + if (idim == 0 || idim == 2) { + flag_info_face[idim]->setVal(0.); + flag_ext_face[idim]->setVal(0.); + continue; + } +# endif + for (amrex::MFIter mfi(*b_field[idim]); mfi.isValid(); ++mfi) { + auto* face_areas_idim_max_lev = face_areas[idim]; + + const amrex::Box& box = mfi.tilebox(face_areas_idim_max_lev->ixType().toIntVect(), + face_areas_idim_max_lev->nGrowVect() ); + + auto const& S = face_areas_idim_max_lev->array(mfi); + auto const& flag_info_face_data = flag_info_face[idim]->array(mfi); + auto const& flag_ext_face_data = flag_ext_face[idim]->array(mfi); + auto const& lx = edge_lengths[0]->array(mfi); + auto const& ly = edge_lengths[1]->array(mfi); + auto const& lz = edge_lengths[2]->array(mfi); + auto const& mod_areas_dim_data = area_mod[idim]->array(mfi); + + const amrex::Real dx = cell_size[0]; + const amrex::Real dy = cell_size[1]; + const amrex::Real dz = cell_size[2]; + + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + // Minimal area for this cell to be stable + mod_areas_dim_data(i, j, k) = S(i, j, k); + double S_stab; + if (idim == 0){ + S_stab = 0.5 * std::max({ly(i, j, k) * dz, ly(i, j, k + 1) * dz, + lz(i, j, k) * dy, lz(i, j + 1, k) * dy}); + }else if (idim == 1){ + +# if defined(WARPX_DIM_XZ) + S_stab = 0.5 * std::max({lx(i, j, k) * dz, lx(i, j + 1, k) * dz, + lz(i, j, k) * dx, lz(i + 1, j, k) * dx}); +# else + S_stab = 0.5 * std::max({lx(i, j, k) * dz, lx(i, j, k + 1) * dz, + lz(i, j, k) * dx, lz(i + 1, j, k) * dx}); +# endif + }else { + S_stab = 0.5 * std::max({lx(i, j, k) * dy, lx(i, j + 1, k) * dy, + ly(i, j, k) * dx, ly(i + 1, j, k) * dx}); + } + + // Does this face need to be extended? + // The difference between flag_info_face and flag_ext_face is that: + // - for every face flag_info_face contains a: + // * 0 if the face needs to be extended + // * 1 if the face is large enough to lend area to other faces + // * 2 if the face is actually intruded by other face + // Here we only take care of the first two cases. The entries corresponding + // to the intruded faces are going to be set in the function ComputeFaceExtensions + // - for every face flag_ext_face contains a: + // * 1 if the face needs to be extended + // * 0 otherwise + // In the function ComputeFaceExtensions, after the cells are extended, the + // corresponding entries in flag_ext_face are set to zero. This helps to keep + // track of which cells could not be extended + flag_ext_face_data(i, j, k) = int(S(i, j, k) < S_stab && S(i, j, k) > 0); + if(flag_ext_face_data(i, j, k)){ + flag_info_face_data(i, j, k) = 0; + } + // Is this face available to lend area to other faces? + // The criterion is that the face has to be interior and not already unstable itself + if(int(S(i, j, k) > 0 && !flag_ext_face_data(i, j, k))) { + flag_info_face_data(i, j, k) = 1; + } + }); + } + } +#endif +} + +void +web::ComputeEdgeLengths ( + ablastr::fields::VectorField& edge_lengths, + const amrex::EBFArrayBoxFactory& eb_fact) +{ + BL_PROFILE("ComputeEdgeLengths"); + +#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) + WARPX_ABORT_WITH_MESSAGE("ComputeEdgeLengths only implemented in 2D and 3D"); +#endif + + auto const &flags = eb_fact.getMultiEBCellFlagFab(); + auto const &edge_centroid = eb_fact.getEdgeCent(); + for (int idim = 0; idim < 3; ++idim){ +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + if (idim == 1) { + edge_lengths[1]->setVal(0.); + continue; + } +#endif + for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi){ + amrex::Box const box = mfi.tilebox(edge_lengths[idim]->ixType().toIntVect(), + edge_lengths[idim]->nGrowVect()); + amrex::FabType const fab_type = flags[mfi].getType(box); + auto const &edge_lengths_dim = edge_lengths[idim]->array(mfi); + + if (fab_type == amrex::FabType::regular) { + // every cell in box is all regular + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + edge_lengths_dim(i, j, k) = 1.; + }); + } else if (fab_type == amrex::FabType::covered) { + // every cell in box is all covered + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + edge_lengths_dim(i, j, k) = 0.; + }); + } else { +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + int idim_amrex = idim; + if (idim == 2) { idim_amrex = 1; } + auto const &edge_cent = edge_centroid[idim_amrex]->const_array(mfi); +#elif defined(WARPX_DIM_3D) + auto const &edge_cent = edge_centroid[idim]->const_array(mfi); +#endif + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + if (edge_cent(i, j, k) == amrex::Real(-1.0)) { + // This edge is all covered + edge_lengths_dim(i, j, k) = 0.; + } else if (edge_cent(i, j, k) == amrex::Real(1.0)) { + // This edge is all open + edge_lengths_dim(i, j, k) = 1.; + } else { + // This edge is cut. + edge_lengths_dim(i, j, k) = 1 - amrex::Math::abs(amrex::Real(2.0) + * edge_cent(i, j, k)); + } + + }); + } + } + } +} + + +void +web::ComputeFaceAreas ( + ablastr::fields::VectorField& face_areas, + const amrex::EBFArrayBoxFactory& eb_fact) +{ + BL_PROFILE("ComputeFaceAreas"); + +#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) + WARPX_ABORT_WITH_MESSAGE("ComputeFaceAreas only implemented in 2D and 3D"); +#endif + + auto const &flags = eb_fact.getMultiEBCellFlagFab(); +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + //In 2D the volume frac is actually the area frac. + auto const &area_frac = eb_fact.getVolFrac(); +#elif defined(WARPX_DIM_3D) + auto const &area_frac = eb_fact.getAreaFrac(); +#endif + + for (int idim = 0; idim < 3; ++idim) { +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + if (idim == 0 || idim == 2) { + face_areas[idim]->setVal(0.); + continue; + } +#endif + for (amrex::MFIter mfi(flags); mfi.isValid(); ++mfi) { + amrex::Box const box = mfi.tilebox(face_areas[idim]->ixType().toIntVect(), + face_areas[idim]->nGrowVect()); + amrex::FabType const fab_type = flags[mfi].getType(box); + auto const &face_areas_dim = face_areas[idim]->array(mfi); + if (fab_type == amrex::FabType::regular) { + // every cell in box is all regular + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + face_areas_dim(i, j, k) = amrex::Real(1.); + }); + } else if (fab_type == amrex::FabType::covered) { + // every cell in box is all covered + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + face_areas_dim(i, j, k) = amrex::Real(0.); + }); + } else { +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + auto const &face = area_frac.const_array(mfi); +#elif defined(WARPX_DIM_3D) + auto const &face = area_frac[idim]->const_array(mfi); +#endif + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + face_areas_dim(i, j, k) = face(i, j, k); + }); + } + } + } +} + +void +web::ScaleEdges ( + ablastr::fields::VectorField& edge_lengths, + const std::array& cell_size) +{ + BL_PROFILE("ScaleEdges"); + +#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) + WARPX_ABORT_WITH_MESSAGE("ScaleEdges only implemented in 2D and 3D"); +#endif + + for (int idim = 0; idim < 3; ++idim){ +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + if (idim == 1) { continue; } +#endif + for (amrex::MFIter mfi(*edge_lengths[0]); mfi.isValid(); ++mfi) { + const amrex::Box& box = mfi.tilebox(edge_lengths[idim]->ixType().toIntVect(), + edge_lengths[idim]->nGrowVect() ); + auto const &edge_lengths_dim = edge_lengths[idim]->array(mfi); + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + edge_lengths_dim(i, j, k) *= cell_size[idim]; + }); + } + } +} + + +void +web::ScaleAreas ( + ablastr::fields::VectorField& face_areas, + const std::array& cell_size) +{ + BL_PROFILE("ScaleAreas"); + +#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) && !defined(WARPX_DIM_RZ) + WARPX_ABORT_WITH_MESSAGE("ScaleAreas only implemented in 2D and 3D"); +#endif + + for (int idim = 0; idim < 3; ++idim) { +#if defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + if (idim == 0 || idim == 2) { continue; } +#endif + for (amrex::MFIter mfi(*face_areas[0]); mfi.isValid(); ++mfi) { + const amrex::Box& box = mfi.tilebox(face_areas[idim]->ixType().toIntVect(), + face_areas[idim]->nGrowVect() ); + amrex::Real const full_area = cell_size[(idim+1)%3]*cell_size[(idim+2)%3]; + auto const &face_areas_dim = face_areas[idim]->array(mfi); + + amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { + face_areas_dim(i, j, k) *= full_area; + }); + + } + } +} + +#endif diff --git a/Source/EmbeddedBoundary/Make.package b/Source/EmbeddedBoundary/Make.package index e1c6422d99c..fd46932827d 100644 --- a/Source/EmbeddedBoundary/Make.package +++ b/Source/EmbeddedBoundary/Make.package @@ -1,11 +1,11 @@ -CEXE_headers += EmbeddedBoundary.H +CEXE_headers += EmbeddedBoundaryInit.H CEXE_headers += Enabled.H CEXE_headers += ParticleScraper.H CEXE_headers += ParticleBoundaryProcess.H CEXE_headers += DistanceToEB.H CEXE_headers += WarpXFaceInfoBox.H -CEXE_sources += EmbeddedBoundary.cpp +CEXE_sources += EmbeddedBoundaryInit.cpp CEXE_sources += Enabled.cpp CEXE_sources += WarpXInitEB.cpp CEXE_sources += WarpXFaceExtensions.cpp diff --git a/Source/EmbeddedBoundary/WarpXInitEB.cpp b/Source/EmbeddedBoundary/WarpXInitEB.cpp index 371bd6a0570..8b7ad7b9d64 100644 --- a/Source/EmbeddedBoundary/WarpXInitEB.cpp +++ b/Source/EmbeddedBoundary/WarpXInitEB.cpp @@ -13,31 +13,17 @@ # include "Utils/Parser/ParserUtils.H" # include "Utils/TextMsg.H" -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include -# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include +# include # include # include @@ -123,404 +109,6 @@ WarpX::InitEB () #endif } -#ifdef AMREX_USE_EB - -void -WarpX::MarkReducedShapeCells ( - std::unique_ptr & eb_reduce_particle_shape, - amrex::EBFArrayBoxFactory const & eb_fact, - int const particle_shape_order ) -{ - // Pre-fill array with 0, including in the ghost cells outside of the domain. - // (The guard cells in the domain will be updated by `FillBoundary` at the end of this function.) - eb_reduce_particle_shape->setVal(0, eb_reduce_particle_shape->nGrow()); - - // Extract structures for embedded boundaries - amrex::FabArray const& eb_flag = eb_fact.getMultiEBCellFlagFab(); - -#ifdef AMREX_USE_OMP -#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) -#endif - for (amrex::MFIter mfi(*eb_reduce_particle_shape); mfi.isValid(); ++mfi) { - - const amrex::Box& box = mfi.tilebox(); - amrex::Array4 const & eb_reduce_particle_shape_arr = eb_reduce_particle_shape->array(mfi); - - // Check if the box (including one layer of guard cells) contains a mix of covered and regular cells - const amrex::Box eb_info_box = mfi.tilebox(amrex::IntVect::TheCellVector()).grow(1); - amrex::FabType const fab_type = eb_flag[mfi].getType( eb_info_box ); - - if (fab_type == amrex::FabType::regular) { // All cells in the box are regular - - // Every cell in box is regular: do not reduce particle shape in any cell - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - eb_reduce_particle_shape_arr(i, j, k) = 0; - }); - - } else if (fab_type == amrex::FabType::covered) { // All cells in the box are covered - - // Every cell in box is fully covered: reduce particle shape - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - eb_reduce_particle_shape_arr(i, j, k) = 1; - }); - - } else { // The box contains a mix of covered and regular cells - - auto const & flag = eb_flag[mfi].array(); - - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - - // Check if any of the neighboring cells over which the particle shape might extend - // are either partially or fully covered. In this case, set eb_reduce_particle_shape_arr - // to one for this cell, to indicate that the particle should use an order 1 shape - // (This ensures that the particle never deposits any charge in a partially or - // fully covered cell, even with higher-order shapes) - // Note: in the code below `particle_shape_order/2` corresponds to the number of neighboring cells - // over which the shape factor could extend, in each direction. - int const i_start = i-particle_shape_order/2; - int const i_end = i+particle_shape_order/2; -#if AMREX_SPACEDIM > 1 - int const j_start = j-particle_shape_order/2; - int const j_end = j+particle_shape_order/2; -#else - int const j_start = j; - int const j_end = j; -#endif -#if AMREX_SPACEDIM > 2 - int const k_start = k-particle_shape_order/2; - int const k_end = k+particle_shape_order/2; -#else - int const k_start = k; - int const k_end = k; -#endif - int reduce_shape = 0; - for (int i_cell = i_start; i_cell <= i_end; ++i_cell) { - for (int j_cell = j_start; j_cell <= j_end; ++j_cell) { - for (int k_cell = k_start; k_cell <= k_end; ++k_cell) { - // `isRegular` returns `false` if the cell is either partially or fully covered. - if ( !flag(i_cell, j_cell, k_cell).isRegular() ) { - reduce_shape = 1; - } - } - } - } - eb_reduce_particle_shape_arr(i, j, k) = reduce_shape; - }); - - } - - } - - // FillBoundary to set the values in the guard cells - eb_reduce_particle_shape->FillBoundary(Geom(0).periodicity()); - -} - -void -WarpX::MarkUpdateCellsStairCase ( - std::array< std::unique_ptr,3> & eb_update, - ablastr::fields::VectorField const& field, - amrex::EBFArrayBoxFactory const & eb_fact ) -{ - - using ablastr::fields::Direction; - using warpx::fields::FieldType; - - // Extract structures for embedded boundaries - amrex::FabArray const& eb_flag = eb_fact.getMultiEBCellFlagFab(); - - for (int idim = 0; idim < 3; ++idim) { - -#ifdef AMREX_USE_OMP -#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) -#endif - for (amrex::MFIter mfi(*field[idim]); mfi.isValid(); ++mfi) { - - const amrex::Box& box = mfi.tilebox(); - amrex::Array4 const & eb_update_arr = eb_update[idim]->array(mfi); - - // Check if the box (including one layer of guard cells) contains a mix of covered and regular cells - const amrex::Box eb_info_box = mfi.tilebox(amrex::IntVect::TheCellVector()).grow(1); - amrex::FabType const fab_type = eb_flag[mfi].getType( eb_info_box ); - - if (fab_type == amrex::FabType::regular) { // All cells in the box are regular - - // Every cell in box is regular: update field in every cell - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - eb_update_arr(i, j, k) = 1; - }); - - } else if (fab_type == amrex::FabType::covered) { // All cells in the box are covered - - // Every cell in box is fully covered: do not update field - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - eb_update_arr(i, j, k) = 0; - }); - - } else { // The box contains a mix of covered and regular cells - - auto const & flag = eb_flag[mfi].array(); - auto index_type = field[idim]->ixType(); - - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - - // Stair-case approximation: If neighboring cells of this gridpoint - // are either partially or fully covered: do not update field - - // The number of cells that we need to check depend on the index type - // of the `eb_update_arr` in each direction. - // If `eb_update_arr` is nodal in a given direction, we need to check the cells - // to the left and right of this nodal gridpoint. - // For instance, if `eb_update_arr` is nodal in the first dimension, we need - // to check the cells at index i-1 and at index i, since, with AMReX indexing conventions, - // these are the neighboring cells for the nodal gripoint at index i. - // If `eb_update_arr` is cell-centerd in a given direction, we only need to check - // the cell at the same position (e.g., in the first dimension: the cell at index i). - int const i_start = ( index_type.nodeCentered(0) )? i-1 : i; -#if AMREX_SPACEDIM > 1 - int const j_start = ( index_type.nodeCentered(1) )? j-1 : j; -#else - int const j_start = j; -#endif -#if AMREX_SPACEDIM > 2 - int const k_start = ( index_type.nodeCentered(2) )? k-1 : k; -#else - int const k_start = k; -#endif - // Loop over neighboring cells - int eb_update_flag = 1; - for (int i_cell = i_start; i_cell <= i; ++i_cell) { - for (int j_cell = j_start; j_cell <= j; ++j_cell) { - for (int k_cell = k_start; k_cell <= k; ++k_cell) { - // If one of the neighboring is either partially or fully covered - // (i.e. if they are not regular cells), do not update field - // (`isRegular` returns `false` if the cell is either partially or fully covered.) - if ( !flag(i_cell, j_cell, k_cell).isRegular() ) { - eb_update_flag = 0; - } - } - } - } - eb_update_arr(i, j, k) = eb_update_flag; - }); - - } - - } - - } - -} - -void -WarpX::MarkUpdateECellsECT ( - std::array< std::unique_ptr,3> & eb_update_E, - ablastr::fields::VectorField const& edge_lengths ) -{ - -#ifdef AMREX_USE_OMP -#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) -#endif - for ( amrex::MFIter mfi(*eb_update_E[0], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { - - const amrex::Box& tbx = mfi.tilebox( eb_update_E[0]->ixType().toIntVect(), eb_update_E[0]->nGrowVect() ); - const amrex::Box& tby = mfi.tilebox( eb_update_E[1]->ixType().toIntVect(), eb_update_E[1]->nGrowVect() ); - const amrex::Box& tbz = mfi.tilebox( eb_update_E[2]->ixType().toIntVect(), eb_update_E[2]->nGrowVect() ); - - amrex::Array4 const & eb_update_Ex_arr = eb_update_E[0]->array(mfi); - amrex::Array4 const & eb_update_Ey_arr = eb_update_E[1]->array(mfi); - amrex::Array4 const & eb_update_Ez_arr = eb_update_E[2]->array(mfi); - - amrex::Array4 const & lx_arr = edge_lengths[0]->array(mfi); - amrex::Array4 const & lz_arr = edge_lengths[2]->array(mfi); -#if defined(WARPX_DIM_3D) - amrex::Array4 const & ly_arr = edge_lengths[1]->array(mfi); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::Dim3 const lx_lo = amrex::lbound(lx_arr); - amrex::Dim3 const lx_hi = amrex::ubound(lx_arr); - amrex::Dim3 const lz_lo = amrex::lbound(lz_arr); - amrex::Dim3 const lz_hi = amrex::ubound(lz_arr); -#endif - - amrex::ParallelFor (tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - // Do not update Ex if the edge on which it lives is fully covered - eb_update_Ex_arr(i, j, k) = (lx_arr(i, j, k) == 0)? 0 : 1; - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { -#ifdef WARPX_DIM_3D - // In 3D: Do not update Ey if the edge on which it lives is fully covered - eb_update_Ey_arr(i, j, k) = (ly_arr(i, j, k) == 0)? 0 : 1; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - // In XZ and RZ: Ey is associated with a mesh node, - // so we need to check if the mesh node is covered - if((lx_arr(std::min(i , lx_hi.x), std::min(j , lx_hi.y), k)==0) - ||(lx_arr(std::max(i-1, lx_lo.x), std::min(j , lx_hi.y), k)==0) - ||(lz_arr(std::min(i , lz_hi.x), std::min(j , lz_hi.y), k)==0) - ||(lz_arr(std::min(i , lz_hi.x), std::max(j-1, lz_lo.y), k)==0)) { - eb_update_Ey_arr(i, j, k) = 0; - } else { - eb_update_Ey_arr(i, j, k) = 1; - } -#endif - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - // Do not update Ez if the edge on which it lives is fully covered - eb_update_Ez_arr(i, j, k) = (lz_arr(i, j, k) == 0)? 0 : 1; - } - ); - - } -} - -void -WarpX::MarkUpdateBCellsECT ( - std::array< std::unique_ptr,3> & eb_update_B, - ablastr::fields::VectorField const& face_areas, - ablastr::fields::VectorField const& edge_lengths ) -{ - -#ifdef AMREX_USE_OMP -#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) -#endif - for ( amrex::MFIter mfi(*eb_update_B[0], amrex::TilingIfNotGPU()); mfi.isValid(); ++mfi) { - - const amrex::Box& tbx = mfi.tilebox( eb_update_B[0]->ixType().toIntVect(), eb_update_B[0]->nGrowVect() ); - const amrex::Box& tby = mfi.tilebox( eb_update_B[1]->ixType().toIntVect(), eb_update_B[1]->nGrowVect() ); - const amrex::Box& tbz = mfi.tilebox( eb_update_B[2]->ixType().toIntVect(), eb_update_B[2]->nGrowVect() ); - - amrex::Array4 const & eb_update_Bx_arr = eb_update_B[0]->array(mfi); - amrex::Array4 const & eb_update_By_arr = eb_update_B[1]->array(mfi); - amrex::Array4 const & eb_update_Bz_arr = eb_update_B[2]->array(mfi); - -#ifdef WARPX_DIM_3D - amrex::Array4 const & Sx_arr = face_areas[0]->array(mfi); - amrex::Array4 const & Sy_arr = face_areas[1]->array(mfi); - amrex::Array4 const & Sz_arr = face_areas[2]->array(mfi); - amrex::ignore_unused(edge_lengths); -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - amrex::Array4 const & Sy_arr = face_areas[1]->array(mfi); - amrex::Array4 const & lx_arr = edge_lengths[0]->array(mfi); - amrex::Array4 const & lz_arr = edge_lengths[2]->array(mfi); -#endif - amrex::ParallelFor (tbx, tby, tbz, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { -#ifdef WARPX_DIM_3D - // In 3D: do not update Bx if the face on which it lives is fully covered - eb_update_Bx_arr(i, j, k) = (Sx_arr(i, j, k) == 0)? 0 : 1; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - //In XZ and RZ, Bx lives on a z-edge ; do not update if fully covered - eb_update_Bx_arr(i, j, k) = (lz_arr(i, j, k) == 0)? 0 : 1; -#endif - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { - // Do not update By if the face on which it lives is fully covered - eb_update_By_arr(i, j, k) = (Sy_arr(i, j, k) == 0)? 0 : 1; - }, - [=] AMREX_GPU_DEVICE (int i, int j, int k) { -#ifdef WARPX_DIM_3D - // In 3D: do not update Bz if the face on which it lives is fully covered - eb_update_Bz_arr(i, j, k) = (Sz_arr(i, j, k) == 0)? 0 : 1; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - //In XZ and RZ, Bz lives on a x-edge ; do not update if fully covered - eb_update_Bz_arr(i, j, k) = (lx_arr(i, j, k) == 0)? 0 : 1; -#endif - } - ); - - } -} - -void -WarpX::MarkExtensionCells () -{ - using ablastr::fields::Direction; - using warpx::fields::FieldType; - -#ifndef WARPX_DIM_RZ - auto const &cell_size = CellSize(maxLevel()); - -#if !defined(WARPX_DIM_3D) && !defined(WARPX_DIM_XZ) - WARPX_ABORT_WITH_MESSAGE("MarkExtensionCells only implemented in 2D and 3D"); -#endif - - for (int idim = 0; idim < 3; ++idim) { -#if defined(WARPX_DIM_XZ) - if (idim == 0 || idim == 2) { - m_flag_info_face[maxLevel()][idim]->setVal(0.); - m_flag_ext_face[maxLevel()][idim]->setVal(0.); - continue; - } -#endif - for (amrex::MFIter mfi(*m_fields.get(FieldType::Bfield_fp, Direction{idim}, maxLevel())); mfi.isValid(); ++mfi) { - auto* face_areas_idim_max_lev = - m_fields.get(FieldType::face_areas, Direction{idim}, maxLevel()); - - const amrex::Box& box = mfi.tilebox(face_areas_idim_max_lev->ixType().toIntVect(), - face_areas_idim_max_lev->nGrowVect() ); - - auto const &S = face_areas_idim_max_lev->array(mfi); - auto const &flag_info_face = m_flag_info_face[maxLevel()][idim]->array(mfi); - auto const &flag_ext_face = m_flag_ext_face[maxLevel()][idim]->array(mfi); - const auto &lx = m_fields.get(FieldType::edge_lengths, Direction{0}, maxLevel())->array(mfi); - const auto &ly = m_fields.get(FieldType::edge_lengths, Direction{1}, maxLevel())->array(mfi); - const auto &lz = m_fields.get(FieldType::edge_lengths, Direction{2}, maxLevel())->array(mfi); - auto const &mod_areas_dim = m_fields.get(FieldType::area_mod, Direction{idim}, maxLevel())->array(mfi); - - const amrex::Real dx = cell_size[0]; - const amrex::Real dy = cell_size[1]; - const amrex::Real dz = cell_size[2]; - - amrex::ParallelFor(box, [=] AMREX_GPU_DEVICE (int i, int j, int k) { - // Minimal area for this cell to be stable - mod_areas_dim(i, j, k) = S(i, j, k); - double S_stab; - if (idim == 0){ - S_stab = 0.5 * std::max({ly(i, j, k) * dz, ly(i, j, k + 1) * dz, - lz(i, j, k) * dy, lz(i, j + 1, k) * dy}); - }else if (idim == 1){ -#ifdef WARPX_DIM_XZ - S_stab = 0.5 * std::max({lx(i, j, k) * dz, lx(i, j + 1, k) * dz, - lz(i, j, k) * dx, lz(i + 1, j, k) * dx}); -#elif defined(WARPX_DIM_3D) - S_stab = 0.5 * std::max({lx(i, j, k) * dz, lx(i, j, k + 1) * dz, - lz(i, j, k) * dx, lz(i + 1, j, k) * dx}); -#endif - }else { - S_stab = 0.5 * std::max({lx(i, j, k) * dy, lx(i, j + 1, k) * dy, - ly(i, j, k) * dx, ly(i + 1, j, k) * dx}); - } - - // Does this face need to be extended? - // The difference between flag_info_face and flag_ext_face is that: - // - for every face flag_info_face contains a: - // * 0 if the face needs to be extended - // * 1 if the face is large enough to lend area to other faces - // * 2 if the face is actually intruded by other face - // Here we only take care of the first two cases. The entries corresponding - // to the intruded faces are going to be set in the function ComputeFaceExtensions - // - for every face flag_ext_face contains a: - // * 1 if the face needs to be extended - // * 0 otherwise - // In the function ComputeFaceExtensions, after the cells are extended, the - // corresponding entries in flag_ext_face are set to zero. This helps to keep - // track of which cells could not be extended - flag_ext_face(i, j, k) = int(S(i, j, k) < S_stab && S(i, j, k) > 0); - if(flag_ext_face(i, j, k)){ - flag_info_face(i, j, k) = 0; - } - // Is this face available to lend area to other faces? - // The criterion is that the face has to be interior and not already unstable itself - if(int(S(i, j, k) > 0 && !flag_ext_face(i, j, k))) { - flag_info_face(i, j, k) = 1; - } - }); - } - } -#endif -} -#endif - void WarpX::ComputeDistanceToEB () { diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index b2885f8ca6a..9c2784fe867 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -18,7 +18,7 @@ #include "Diagnostics/ReducedDiags/MultiReducedDiags.H" #include "EmbeddedBoundary/Enabled.H" #ifdef AMREX_USE_EB -# include "EmbeddedBoundary/EmbeddedBoundary.H" +# include "EmbeddedBoundary/EmbeddedBoundaryInit.H" #endif #include "Fields.H" #include "FieldSolver/ElectrostaticSolvers/ElectrostaticSolver.H" @@ -1247,22 +1247,27 @@ void WarpX::InitializeEBGridData (int lev) warpx::embedded_boundary::ScaleAreas(face_areas_lev, CellSize(lev)); // Compute additional quantities required for the ECT solver - MarkExtensionCells(); + const auto& area_mod = m_fields.get_alldirs(FieldType::area_mod, maxLevel()); + warpx::embedded_boundary::MarkExtensionCells( + CellSize(maxLevel()), m_flag_info_face[maxLevel()], m_flag_ext_face[maxLevel()], + m_fields.get_alldirs(FieldType::Bfield_fp, maxLevel()), + face_areas_lev, + edge_lengths_lev, area_mod); ComputeFaceExtensions(); // Mark on which grid points E should be updated - MarkUpdateECellsECT( m_eb_update_E[lev], edge_lengths_lev ); + warpx::embedded_boundary::MarkUpdateECellsECT( m_eb_update_E[lev], edge_lengths_lev ); // Mark on which grid points B should be updated - MarkUpdateBCellsECT( m_eb_update_B[lev], face_areas_lev, edge_lengths_lev); + warpx::embedded_boundary::MarkUpdateBCellsECT( m_eb_update_B[lev], face_areas_lev, edge_lengths_lev); } else { // Mark on which grid points E should be updated (stair-case approximation) - MarkUpdateCellsStairCase( + warpx::embedded_boundary::MarkUpdateCellsStairCase( m_eb_update_E[lev], m_fields.get_alldirs(FieldType::Efield_fp, lev), eb_fact ); // Mark on which grid points B should be updated (stair-case approximation) - MarkUpdateCellsStairCase( + warpx::embedded_boundary::MarkUpdateCellsStairCase( m_eb_update_B[lev], m_fields.get_alldirs(FieldType::Bfield_fp, lev), eb_fact ); @@ -1271,7 +1276,7 @@ void WarpX::InitializeEBGridData (int lev) } ComputeDistanceToEB(); - MarkReducedShapeCells( m_eb_reduce_particle_shape[lev], eb_fact, WarpX::nox ); + warpx::embedded_boundary::MarkReducedShapeCells( m_eb_reduce_particle_shape[lev], eb_fact, nox, Geom(0).periodicity()); } #else diff --git a/Source/WarpX.H b/Source/WarpX.H index 7d164a9e685..a1595210389 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -959,85 +959,6 @@ public: void InitEB (); -#ifdef AMREX_USE_EB - - /** \brief Set a flag to indicate in which cells a particle should deposit charge/current - * with a reduced, order 1 shape. - * - * More specifically, the flag is set to 1 if any of the neighboring cells over which the - * particle shape might extend are either partially or fully covered by an embedded boundary. - * This ensures that a particle in this cell deposits with an order 1 shape, which in turn - * makes sure that the particle never deposits any charge in a partially or fully covered cell. - * - * \param[in] eb_reduce_particle_shape multifab to be filled with 1s and 0s - * \param[in] eb_fact EB factory - * \param[in] particle_shape_order order of the particle shape function - */ - - - void MarkReducedShapeCells ( - std::unique_ptr & eb_reduce_particle_shape, - amrex::EBFArrayBoxFactory const & eb_fact, - int particle_shape_order ); - - /** \brief Set a flag to indicate on which grid points the field `field` - * should be updated, depending on their position relative to the embedded boundary. - * - * This function is used by all finite-difference solvers, except the - * ECT solver, which instead uses `MarkUpdateECellsECT` and `MarkUpdateBCellsECT`. - * It uses a stair-case approximation of the embedded boundary: - * If a grid point touches cells that are either partially or fully covered - * by the embedded boundary: the corresponding field is not updated. - * - * More specifically, this function fills the iMultiFabs in `eb_update` - * (which have the same indexType as the MultiFabs in `field`) with 1 - * or 0, depending on whether the grid point should be updated or not. - */ - void MarkUpdateCellsStairCase ( - std::array< std::unique_ptr,3> & eb_update, - ablastr::fields::VectorField const & field, - amrex::EBFArrayBoxFactory const & eb_fact ); - - /** \brief Set a flag to indicate on which grid points the E field - * should be updated, depending on their position relative to the embedded boundary. - * - * This function is used by ECT solver. The E field is not updated if - * the edge on which it is defined is fully covered by the embedded boundary. - * - * More specifically, this function fills the iMultiFabs in `eb_update_E` - * (which have the same indexType as the E field) with 1 or 0, depending - * on whether the grid point should be updated or not. - */ - void MarkUpdateECellsECT ( - std::array< std::unique_ptr,3> & eb_update_E, - ablastr::fields::VectorField const& edge_lengths ); - - /** \brief Set a flag to indicate on which grid points the B field - * should be updated, depending on their position relative to the embedded boundary. - * - * This function is used by ECT solver. The B field is not updated if - * the face on which it is defined is fully covered by the embedded boundary. - * - * More specifically, this function fills the iMultiFabs in `eb_update_B` - * (which have the same indexType as the B field) with 1 or 0, depending - * on whether the grid point should be updated or not. - */ - void MarkUpdateBCellsECT ( - std::array< std::unique_ptr,3> & eb_update_B, - ablastr::fields::VectorField const& face_areas, - ablastr::fields::VectorField const& edge_lengths ); - - /** - * \brief Initialize information for cell extensions. - * The flags convention for m_flag_info_face is as follows - * - 0 for unstable cells - * - 1 for stable cells which have not been intruded - * - 2 for stable cells which have been intruded - * Here we cannot know if a cell is intruded or not so we initialize all stable cells with 1 - */ - void MarkExtensionCells(); -#endif - /** * \brief Compute the level set function used for particle-boundary interaction. */ From 7e339a02d3b3bf9c7b43ce32fae0880ebd080604 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 14 Feb 2025 04:23:14 +0100 Subject: [PATCH 232/278] WarpX class: simplify return type of get_spectral_solver_fp using `auto&` (#5656) This PR simplifies the return type of a method of the WarpX class by replacing: ``` # ifdef WARPX_DIM_RZ SpectralSolverRZ& # else SpectralSolver& # endif ``` with ``` auto& ``` --- Source/WarpX.H | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/Source/WarpX.H b/Source/WarpX.H index a1595210389..ce4a846eace 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -1005,13 +1005,7 @@ public: void PSATDSubtractCurrentPartialSumsAvg (); #ifdef WARPX_USE_FFT - -# ifdef WARPX_DIM_RZ - SpectralSolverRZ& -# else - SpectralSolver& -# endif - get_spectral_solver_fp (int lev) {return *spectral_solver_fp[lev];} + auto& get_spectral_solver_fp (int lev) {return *spectral_solver_fp[lev];} #endif FiniteDifferenceSolver * get_pointer_fdtd_solver_fp (int lev) { return m_fdtd_solver_fp[lev].get(); } From eb2627703166d1f437d25711e2bc8bc059ed7c0b Mon Sep 17 00:00:00 2001 From: Arianna Formenti Date: Fri, 14 Feb 2025 07:55:24 -0800 Subject: [PATCH 233/278] Add reduced diagnostic: 2d differential luminosity (#5545) Adds a luminosity diagnostic differentiated in the energies of two colliding species, called `DifferentialLuminosity2D`. It is defined as follows: ```math \begin{align*} \frac{d^2\mathcal{L}}{dE_1 dE_2}(E_1, E_2, t) = \int_0^t dt'\int d\boldsymbol{x}\, & \int d\boldsymbol{p}_1 \int d\boldsymbol{p}_2\; \sqrt{ |\boldsymbol{v}_1 - \boldsymbol{v}_2|^2 - |\boldsymbol{v}_1\times\boldsymbol{v}_2|^2/c^2} \\ & f_1(\boldsymbol{x}, \boldsymbol{p}_1, t')f_2(\boldsymbol{x}, \boldsymbol{p}_2, t') \delta(E_1 - E_1(\boldsymbol{p}_1) \delta( E_2 - E_2(\boldsymbol{p}_2)) \end{align*} ``` where: * $\boldsymbol{p}_i$ is the momentum of a particle of species $i$ * $E_i$ is the energy of a particle of species $i$, $E_i (\boldsymbol{p}_i) = \sqrt{m_1^2c^4 + c^2 |\boldsymbol{p}_i|^2}$ * $f_i$ is the distribution function of species $i$, normalized such that $\int \int f(\boldsymbol{x} \boldsymbol{p}, t )d\boldsymbol{x} d\boldsymbol{p} = N$, the number of particles in species $i$ at time $t$ The 2D differential luminosity is given in units of $\text{m}^{-2} \ \text{eV}^{-2}$. The user must specify the minimum, maximum, and number of bins to discretize the $E_1$ and $E_2$ axes. The computation of this diagnostic is similar to that of `ParticleHistogram2D`. The output is a folder containing a set of openPMD files. The values of the diagnostic are stored in a record labeled `d2L_dE1_dE2`, with axes `E1` and `E2`. --------- Co-authored-by: Remi Lehe --- Docs/source/usage/parameters.rst | 46 ++ Examples/Tests/diff_lumi_diag/CMakeLists.txt | 5 +- Examples/Tests/diff_lumi_diag/analysis.py | 57 ++- Examples/Tests/diff_lumi_diag/inputs_base_3d | 17 +- .../Diagnostics/ReducedDiags/CMakeLists.txt | 1 + .../ReducedDiags/DifferentialLuminosity2D.H | 70 +++ .../ReducedDiags/DifferentialLuminosity2D.cpp | 401 ++++++++++++++++++ Source/Diagnostics/ReducedDiags/Make.package | 1 + .../ReducedDiags/MultiReducedDiags.cpp | 2 + 9 files changed, 583 insertions(+), 17 deletions(-) create mode 100644 Source/Diagnostics/ReducedDiags/DifferentialLuminosity2D.H create mode 100644 Source/Diagnostics/ReducedDiags/DifferentialLuminosity2D.cpp diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 253f9ca0071..dc53ae5295f 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -3626,6 +3626,52 @@ This shifts analysis from post-processing to runtime calculation of reduction op * ``.bin_min`` (`float`, in eV) The maximum value of :math:`\mathcal{E}^*` for which the differential luminosity is computed. + * ``DifferentialLuminosity2D`` + This type computes the two-dimensional differential luminosity between two species, defined as: + + .. math:: + + \frac{d^2\mathcal{L}}{dE_1 dE_2}(E_1, E_2, t) = \int_0^t dt'\int d\boldsymbol{x}\, \int d\boldsymbol{p}_1 \int d\boldsymbol{p}_2\; + \sqrt{ |\boldsymbol{v}_1 - \boldsymbol{v}_2|^2 - |\boldsymbol{v}_1\times\boldsymbol{v}_2|^2/c^2} \\ + f_1(\boldsymbol{x}, \boldsymbol{p}_1, t')f_2(\boldsymbol{x}, \boldsymbol{p}_2, t') \delta(E_1 - E_1(\boldsymbol{p}_1)) \delta(E_2 - E_2(\boldsymbol{p}_2)) + + where :math:`f_i` is the distribution function of species :math:`i` + (normalized such that :math:`\int \int f(\boldsymbol{x} \boldsymbol{p}, t )d\boldsymbol{x} d\boldsymbol{p} = N` + is the number of particles in species :math:`i` at time :math:`t`), + :math:`\boldsymbol{p}_i` and :math:`E_i (\boldsymbol{p}_i) = \sqrt{m_1^2c^4 + c^2 |\boldsymbol{p}_i|^2}` + are, respectively, the momentum and the energy of a particle of the :math:`i`-th species. + The 2D differential luminosity is given in units of :math:`\text{m}^{-2}.\text{eV}^{-2}`. + + * ``.species`` (`list of two strings`) + The names of the two species for which the differential luminosity is computed. + + * ``.bin_number_1`` (`int` > 0) + The number of bins in energy :math:`E_1` + + * ``.bin_max_1`` (`float`, in eV) + The minimum value of :math:`E_1` for which the 2D differential luminosity is computed. + + * ``.bin_min_1`` (`float`, in eV) + The maximum value of :math:`E_2` for which the 2D differential luminosity is compute + + * ``.bin_number_2`` (`int` > 0) + The number of bins in energy :math:`E_2` + + * ``.bin_max_2`` (`float`, in eV) + The minimum value of :math:`E_2` for which the 2D differential luminosity is computed. + + * ``.bin_min_2`` (`float`, in eV) + The minimum value of :math:`E_2` for which the 2D differential luminosity is computed. + + * ``.file_min_digits`` (`int`) optional (default `6`) + The minimum number of digits used for the iteration number appended to the diagnostic file names. + + The output is a ```` folder containing a set of openPMD files. + The values of the diagnostic are stored in a record labeled `d2L_dE1_dE2`. + An example input file and a loading python script of + using the DifferentialLuminosity2D reduced diagnostics + are given in ``Examples/Tests/diff_lumi_diag/``. + * ``Timestep`` This type outputs the simulation's physical timestep (in seconds) at each mesh refinement level. diff --git a/Examples/Tests/diff_lumi_diag/CMakeLists.txt b/Examples/Tests/diff_lumi_diag/CMakeLists.txt index f16449a976c..9a4e58d0e62 100644 --- a/Examples/Tests/diff_lumi_diag/CMakeLists.txt +++ b/Examples/Tests/diff_lumi_diag/CMakeLists.txt @@ -1,6 +1,6 @@ # Add tests (alphabetical order) ############################################## # - +if(WarpX_FFT) add_warpx_test( test_3d_diff_lumi_diag_leptons # name 3 # dims @@ -10,7 +10,9 @@ add_warpx_test( "analysis_default_regression.py --path diags/diag1000080 --rtol 1e-2" # checksum OFF # dependency ) +endif() +if(WarpX_FFT) add_warpx_test( test_3d_diff_lumi_diag_photons # name 3 # dims @@ -20,3 +22,4 @@ add_warpx_test( "analysis_default_regression.py --path diags/diag1000080 --rtol 1e-2" # checksum OFF # dependency ) +endif() diff --git a/Examples/Tests/diff_lumi_diag/analysis.py b/Examples/Tests/diff_lumi_diag/analysis.py index cadb21023ab..f8ed5f79779 100755 --- a/Examples/Tests/diff_lumi_diag/analysis.py +++ b/Examples/Tests/diff_lumi_diag/analysis.py @@ -5,15 +5,20 @@ # In that case, the differential luminosity can be calculated analytically. import os +import re import numpy as np -from read_raw_data import read_reduced_diags_histogram +from openpmd_viewer import OpenPMDTimeSeries -# Extract the differential luminosity from the file -_, _, E_bin, bin_data = read_reduced_diags_histogram( - "./diags/reducedfiles/DifferentialLuminosity_beam1_beam2.txt" -) -dL_dE_sim = bin_data[-1] # Differential luminosity at the end of the simulation +# Extract the 1D differential luminosity from the file +filename = "./diags/reducedfiles/DifferentialLuminosity_beam1_beam2.txt" +with open(filename) as f: + # First line: header, contains the energies + line = f.readline() + E_bin = np.array(list(map(float, re.findall("=(.*?)\(", line)))) +data = np.loadtxt(filename) +dE_bin = E_bin[1] - E_bin[0] +dL_dE_sim = data[-1, 2:] # Differential luminosity at the end of the simulation # Beam parameters N = 1.2e10 @@ -33,21 +38,47 @@ * np.exp(-((E_bin - 2 * E_beam) ** 2) / (2 * sigma_E**2)) ) +# Extract the 2D differential luminosity from the file +series = OpenPMDTimeSeries("./diags/reducedfiles/DifferentialLuminosity2d_beam1_beam2/") +d2L_dE1_dE2_sim, info = series.get_field("d2L_dE1_dE2", iteration=80) + +# Compute the analytical 2D differential luminosity for 2 Gaussian beams +assert info.axes[0] == "E2" +assert info.axes[1] == "E1" +E2, E1 = np.meshgrid(info.E2, info.E1, indexing="ij") +d2L_dE1_dE2_th = ( + N**2 + / (2 * (2 * np.pi) ** 2 * sigma_x * sigma_y * sigma_E1 * sigma_E2) + * np.exp( + -((E1 - E_beam) ** 2) / (2 * sigma_E1**2) + - (E2 - E_beam) ** 2 / (2 * sigma_E2**2) + ) +) + # Extract test name from path test_name = os.path.split(os.getcwd())[1] print("test_name", test_name) # Pick tolerance if "leptons" in test_name: - tol = 1e-2 + tol1 = 0.02 + tol2 = 0.04 elif "photons" in test_name: # In the photons case, the particles are # initialized from a density distribution ; # tolerance is larger due to lower particle statistics - tol = 6e-2 + tol1 = 0.021 + tol2 = 0.06 + +# Check that the 1D diagnostic and analytical result match +error1 = abs(dL_dE_sim - dL_dE_th).max() / abs(dL_dE_th).max() +print("Relative error: ", error1) +print("Tolerance: ", tol1) + +# Check that the 2D and 1D diagnostics match +error2 = abs(d2L_dE1_dE2_sim - d2L_dE1_dE2_th).max() / abs(d2L_dE1_dE2_th).max() +print("Relative error: ", error2) +print("Tolerance: ", tol2) -# Check that the simulation result and analytical result match -error = abs(dL_dE_sim - dL_dE_th).max() / abs(dL_dE_th).max() -print("Relative error: ", error) -print("Tolerance: ", tol) -assert error < tol +assert error1 < tol1 +assert error2 < tol2 diff --git a/Examples/Tests/diff_lumi_diag/inputs_base_3d b/Examples/Tests/diff_lumi_diag/inputs_base_3d index ba3c823b52b..0c65850e82b 100644 --- a/Examples/Tests/diff_lumi_diag/inputs_base_3d +++ b/Examples/Tests/diff_lumi_diag/inputs_base_3d @@ -28,6 +28,7 @@ my_constants.dt = sigmaz/clight/10. ################################# ####### GENERAL PARAMETERS ###### ################################# + stop_time = T amr.n_cell = nx ny nz amr.max_grid_size = 128 @@ -93,11 +94,21 @@ diag1.dump_last_timestep = 1 diag1.species = beam1 beam2 # REDUCED -warpx.reduced_diags_names = DifferentialLuminosity_beam1_beam2 +warpx.reduced_diags_names = DifferentialLuminosity_beam1_beam2 DifferentialLuminosity2d_beam1_beam2 DifferentialLuminosity_beam1_beam2.type = DifferentialLuminosity -DifferentialLuminosity_beam1_beam2.intervals = 5 +DifferentialLuminosity_beam1_beam2.intervals = 80 DifferentialLuminosity_beam1_beam2.species = beam1 beam2 DifferentialLuminosity_beam1_beam2.bin_number = 128 DifferentialLuminosity_beam1_beam2.bin_max = 2.1*beam_energy_eV -DifferentialLuminosity_beam1_beam2.bin_min = 1.9*beam_energy_eV +DifferentialLuminosity_beam1_beam2.bin_min = 0 + +DifferentialLuminosity2d_beam1_beam2.type = DifferentialLuminosity2D +DifferentialLuminosity2d_beam1_beam2.intervals = 80 +DifferentialLuminosity2d_beam1_beam2.species = beam1 beam2 +DifferentialLuminosity2d_beam1_beam2.bin_number_1 = 128 +DifferentialLuminosity2d_beam1_beam2.bin_max_1 = 1.45*beam_energy_eV +DifferentialLuminosity2d_beam1_beam2.bin_min_1 = 0 +DifferentialLuminosity2d_beam1_beam2.bin_number_2 = 128 +DifferentialLuminosity2d_beam1_beam2.bin_max_2 = 1.45*beam_energy_eV +DifferentialLuminosity2d_beam1_beam2.bin_min_2 = 0 diff --git a/Source/Diagnostics/ReducedDiags/CMakeLists.txt b/Source/Diagnostics/ReducedDiags/CMakeLists.txt index 4fbfc489aba..c548553b875 100644 --- a/Source/Diagnostics/ReducedDiags/CMakeLists.txt +++ b/Source/Diagnostics/ReducedDiags/CMakeLists.txt @@ -6,6 +6,7 @@ foreach(D IN LISTS WarpX_DIMS) ChargeOnEB.cpp ColliderRelevant.cpp DifferentialLuminosity.cpp + DifferentialLuminosity2D.cpp FieldEnergy.cpp FieldMaximum.cpp FieldMomentum.cpp diff --git a/Source/Diagnostics/ReducedDiags/DifferentialLuminosity2D.H b/Source/Diagnostics/ReducedDiags/DifferentialLuminosity2D.H new file mode 100644 index 00000000000..7ffefec324e --- /dev/null +++ b/Source/Diagnostics/ReducedDiags/DifferentialLuminosity2D.H @@ -0,0 +1,70 @@ +/* Copyright 2023 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Arianna Formenti, Remi Lehe + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_DIAGNOSTICS_REDUCEDDIAGS_DIFFERENTIALLUMINOSITY2D_H_ +#define WARPX_DIAGNOSTICS_REDUCEDDIAGS_DIFFERENTIALLUMINOSITY2D_H_ + +#include "ReducedDiags.H" +#include +#include + +#include +#include +#include + +/** + * This class contains the differential luminosity diagnostics. + */ +class DifferentialLuminosity2D : public ReducedDiags +{ +public: + + /** + * constructor + * @param[in] rd_name reduced diags names + */ + DifferentialLuminosity2D(const std::string& rd_name); + + /// File type + std::string m_openpmd_backend {"default"}; + + /// minimum number of digits for file suffix (file-based only supported for now) */ + int m_file_min_digits = 6; + + /// name of the two colliding species + std::vector m_beam_name; + + /// number of bins for the c.o.m. energy of the 2 species + int m_bin_num_1; + int m_bin_num_2; + + /// max and min bin values + amrex::Real m_bin_max_1; + amrex::Real m_bin_min_1; + amrex::Real m_bin_max_2; + amrex::Real m_bin_min_2; + + /// bin size + amrex::Real m_bin_size_1; + amrex::Real m_bin_size_2; + + /// output data + amrex::TableData m_h_data_2D; + + void ComputeDiags(int step) final; + + void WriteToFile (int step) const final; + +private: + + /// output table in which to accumulate the luminosity across timesteps + amrex::TableData m_d_data_2D; + +}; + +#endif // WARPX_DIAGNOSTICS_REDUCEDDIAGS_DIFFERENTIALLUMINOSITY2D_H_ diff --git a/Source/Diagnostics/ReducedDiags/DifferentialLuminosity2D.cpp b/Source/Diagnostics/ReducedDiags/DifferentialLuminosity2D.cpp new file mode 100644 index 00000000000..b3968b9fb02 --- /dev/null +++ b/Source/Diagnostics/ReducedDiags/DifferentialLuminosity2D.cpp @@ -0,0 +1,401 @@ +/* Copyright 2023 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: Arianna Formenti, Yinjian Zhao, Remi Lehe + * License: BSD-3-Clause-LBNL + */ +#include "DifferentialLuminosity2D.H" + +#include "Diagnostics/ReducedDiags/ReducedDiags.H" +#include "Diagnostics/OpenPMDHelpFunction.H" +#include "Particles/MultiParticleContainer.H" +#include "Particles/Pusher/GetAndSetPosition.H" +#include "Particles/SpeciesPhysicalProperties.H" +#include "Particles/WarpXParticleContainer.H" +#include "Utils/ParticleUtils.H" +#include "Utils/Parser/ParserUtils.H" +#include "Utils/WarpXConst.H" +#include "Utils/TextMsg.H" +#include "Utils/WarpXProfilerWrapper.H" +#include "WarpX.H" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef WARPX_USE_OPENPMD +# include +#endif + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +using ParticleType = WarpXParticleContainer::ParticleType; +using ParticleTileType = WarpXParticleContainer::ParticleTileType; +using ParticleTileDataType = ParticleTileType::ParticleTileDataType; +using ParticleBins = amrex::DenseBins; +using index_type = ParticleBins::index_type; + +#ifdef WARPX_USE_OPENPMD +namespace io = openPMD; +#endif + +using namespace amrex; + +DifferentialLuminosity2D::DifferentialLuminosity2D (const std::string& rd_name) +: ReducedDiags{rd_name} +{ + // RZ coordinate is not supported +#if (defined WARPX_DIM_RZ) + WARPX_ABORT_WITH_MESSAGE( + "DifferentialLuminosity2D diagnostics does not work in RZ geometry."); +#endif + + // read colliding species names - must be 2 + amrex::ParmParse pp_rd_name(m_rd_name); + pp_rd_name.getarr("species", m_beam_name); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + m_beam_name.size() == 2u, + "DifferentialLuminosity2D diagnostics must involve exactly two species"); + + pp_rd_name.query("openpmd_backend", m_openpmd_backend); + pp_rd_name.query("file_min_digits", m_file_min_digits); + // pick first available backend if default is chosen + if( m_openpmd_backend == "default" ) { + m_openpmd_backend = WarpXOpenPMDFileType(); + } + pp_rd_name.add("openpmd_backend", m_openpmd_backend); + + // read bin parameters for species 1 + int bin_num_1 = 0; + amrex::Real bin_max_1 = 0.0_rt, bin_min_1 = 0.0_rt; + utils::parser::getWithParser(pp_rd_name, "bin_number_1", bin_num_1); + utils::parser::getWithParser(pp_rd_name, "bin_max_1", bin_max_1); + utils::parser::getWithParser(pp_rd_name, "bin_min_1", bin_min_1); + m_bin_num_1 = bin_num_1; + m_bin_max_1 = bin_max_1; + m_bin_min_1 = bin_min_1; + m_bin_size_1 = (bin_max_1 - bin_min_1) / bin_num_1; + + // read bin parameters for species 2 + int bin_num_2 = 0; + amrex::Real bin_max_2 = 0.0_rt, bin_min_2 = 0.0_rt; + utils::parser::getWithParser(pp_rd_name, "bin_number_2", bin_num_2); + utils::parser::getWithParser(pp_rd_name, "bin_max_2", bin_max_2); + utils::parser::getWithParser(pp_rd_name, "bin_min_2", bin_min_2); + m_bin_num_2 = bin_num_2; + m_bin_max_2 = bin_max_2; + m_bin_min_2 = bin_min_2; + m_bin_size_2 = (bin_max_2 - bin_min_2) / bin_num_2; + + // resize data array on the host + Array tlo{0,0}; // lower bounds + Array thi{m_bin_num_1-1, m_bin_num_2-1}; // inclusive upper bounds + m_h_data_2D.resize(tlo, thi, The_Pinned_Arena()); + + auto const& h_table_data = m_h_data_2D.table(); + // initialize data on the host + for (int i = tlo[0]; i <= thi[0]; ++i) { + for (int j = tlo[1]; j <= thi[1]; ++j) { + h_table_data(i,j) = 0.0_rt; + } + } + + // resize data on the host + m_d_data_2D.resize(tlo, thi); + // copy data from host to device + m_d_data_2D.copy(m_h_data_2D); + Gpu::streamSynchronize(); +} // end constructor + +void DifferentialLuminosity2D::ComputeDiags (int step) +{ +#if defined(WARPX_DIM_RZ) + amrex::ignore_unused(step); +#else + + WARPX_PROFILE("DifferentialLuminosity2D::ComputeDiags"); + + // Since this diagnostic *accumulates* the luminosity in the + // table m_d_data_2D, we add contributions at *each timestep*, but + // we only write the data to file at intervals specified by the user. + const Real c_sq = PhysConst::c*PhysConst::c; + const Real c_over_qe = PhysConst::c/PhysConst::q_e; + + // output table data + auto d_table = m_d_data_2D.table(); + + // get a reference to WarpX instance + auto& warpx = WarpX::GetInstance(); + const Real dt = warpx.getdt(0); + // get cell volume + Geometry const & geom = warpx.Geom(0); + const Real dV = AMREX_D_TERM(geom.CellSize(0), *geom.CellSize(1), *geom.CellSize(2)); + + // declare local variables + auto const num_bins_1 = m_bin_num_1; + Real const bin_min_1 = m_bin_min_1; + Real const bin_size_1 = m_bin_size_1; + auto const num_bins_2 = m_bin_num_2; + Real const bin_min_2 = m_bin_min_2; + Real const bin_size_2 = m_bin_size_2; + + // get MultiParticleContainer class object + const MultiParticleContainer& mypc = warpx.GetPartContainer(); + + auto& species_1 = mypc.GetParticleContainerFromName(m_beam_name[0]); + auto& species_2 = mypc.GetParticleContainerFromName(m_beam_name[1]); + + const ParticleReal m1 = species_1.getMass(); + const ParticleReal m2 = species_2.getMass(); + + // Enable tiling + amrex::MFItInfo info; + if (amrex::Gpu::notInLaunchRegion()) { info.EnableTiling(WarpXParticleContainer::tile_size); } + + int const nlevs = std::max(0, species_1.finestLevel()+1); // species_1 ? + for (int lev = 0; lev < nlevs; ++lev) { +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + + for (amrex::MFIter mfi = species_1.MakeMFIter(lev, info); mfi.isValid(); ++mfi){ + + ParticleTileType& ptile_1 = species_1.ParticlesAt(lev, mfi); + ParticleTileType& ptile_2 = species_2.ParticlesAt(lev, mfi); + + ParticleBins bins_1 = ParticleUtils::findParticlesInEachCell( lev, mfi, ptile_1 ); + ParticleBins bins_2 = ParticleUtils::findParticlesInEachCell( lev, mfi, ptile_2 ); + + // species 1 + const auto soa_1 = ptile_1.getParticleTileData(); + index_type* AMREX_RESTRICT indices_1 = bins_1.permutationPtr(); + index_type const* AMREX_RESTRICT cell_offsets_1 = bins_1.offsetsPtr(); + + // extract particle data of species 1 in the current tile/box + amrex::ParticleReal * const AMREX_RESTRICT w1 = soa_1.m_rdata[PIdx::w]; + amrex::ParticleReal * const AMREX_RESTRICT u1x = soa_1.m_rdata[PIdx::ux]; // u=v*gamma=p/m + amrex::ParticleReal * const AMREX_RESTRICT u1y = soa_1.m_rdata[PIdx::uy]; + amrex::ParticleReal * const AMREX_RESTRICT u1z = soa_1.m_rdata[PIdx::uz]; + bool const species1_is_photon = species_1.AmIA(); + + // same for species 2 + const auto soa_2 = ptile_2.getParticleTileData(); + index_type* AMREX_RESTRICT indices_2 = bins_2.permutationPtr(); + index_type const* AMREX_RESTRICT cell_offsets_2 = bins_2.offsetsPtr(); + + amrex::ParticleReal * const AMREX_RESTRICT w2 = soa_2.m_rdata[PIdx::w]; + amrex::ParticleReal * const AMREX_RESTRICT u2x = soa_2.m_rdata[PIdx::ux]; + amrex::ParticleReal * const AMREX_RESTRICT u2y = soa_2.m_rdata[PIdx::uy]; + amrex::ParticleReal * const AMREX_RESTRICT u2z = soa_2.m_rdata[PIdx::uz]; + bool const species2_is_photon = species_2.AmIA(); + + // Extract low-level (cell-level) data + auto const n_cells = static_cast(bins_1.numBins()); + + // Loop over cells + amrex::ParallelFor( n_cells, + [=] AMREX_GPU_DEVICE (int i_cell) noexcept + { + + // The particles from species1 that are in the cell `i_cell` are + // given by the `indices_1[cell_start_1:cell_stop_1]` + index_type const cell_start_1 = cell_offsets_1[i_cell]; + index_type const cell_stop_1 = cell_offsets_1[i_cell+1]; + // Same for species 2 + index_type const cell_start_2 = cell_offsets_2[i_cell]; + index_type const cell_stop_2 = cell_offsets_2[i_cell+1]; + + for(index_type i_1=cell_start_1; i_1=num_bins_1 ) { continue; } // discard if out-of-range + + // determine energy bin of particle 2 + int const bin_2 = int(Math::floor((E_2-bin_min_2)/bin_size_2)); + if ( bin_2<0 || bin_2>=num_bins_2 ) { continue; } // discard if out-of-range + + Real const inv_p1t = 1.0_rt/p1t; + Real const inv_p2t = 1.0_rt/p2t; + + Real const beta1_sq = (p1x*p1x + p1y*p1y + p1z*p1z) * inv_p1t*inv_p1t; + Real const beta2_sq = (p2x*p2x + p2y*p2y + p2z*p2z) * inv_p2t*inv_p2t; + Real const beta1_dot_beta2 = (p1x*p2x + p1y*p2y + p1z*p2z) * inv_p1t*inv_p2t; + + // Here we use the fact that: + // (v1 - v2)^2 = v1^2 + v2^2 - 2 v1.v2 + // and (v1 x v2)^2 = v1^2 v2^2 - (v1.v2)^2 + // we also use beta=v/c instead of v + Real const radicand = beta1_sq + beta2_sq - 2*beta1_dot_beta2 - beta1_sq*beta2_sq + beta1_dot_beta2*beta1_dot_beta2; + + Real const d2L_dE1_dE2 = PhysConst::c * std::sqrt( radicand ) * w1[j_1] * w2[j_2] / (dV * bin_size_1 * bin_size_2) * dt; // m^-2 eV^-2 + + amrex::Real &data = d_table(bin_1, bin_2); + amrex::HostDevice::Atomic::Add(&data, d2L_dE1_dE2); + + } // particles species 2 + } // particles species 1 + }); // cells + } // boxes + } // levels + + // Only write to file at intervals specified by the user. + // At these intervals, the data needs to ready on the CPU, + // so we copy it from the GPU to the CPU and reduce across MPI ranks. + if (m_intervals.contains(step+1)) { + + // Copy data from GPU memory + m_h_data_2D.copy(m_d_data_2D); + + // reduced sum over mpi ranks + const int size = static_cast (m_d_data_2D.size()); + ParallelDescriptor::ReduceRealSum + (m_h_data_2D.table().p, size, ParallelDescriptor::IOProcessorNumber()); + } + + // Return for all that are not IO processor + if ( !ParallelDescriptor::IOProcessor() ) { return; } + +#endif // not RZ +} // end void DifferentialLuminosity2D::ComputeDiags + +void DifferentialLuminosity2D::WriteToFile (int step) const +{ + // Judge if the diags should be done at this step + if (!m_intervals.contains(step+1)) { return; } + +#ifdef WARPX_USE_OPENPMD + // only IO processor writes + if ( !ParallelDescriptor::IOProcessor() ) { return; } + + // TODO: support different filename templates + std::string filename = "openpmd"; + // TODO: support also group-based encoding + const std::string fileSuffix = std::string("_%0") + std::to_string(m_file_min_digits) + std::string("T"); + filename = filename.append(fileSuffix).append(".").append(m_openpmd_backend); + + // transform paths for Windows + #ifdef _WIN32 + const std::string filepath = openPMD::auxiliary::replace_all( + m_path + m_rd_name + "/" + filename, "/", "\\"); + #else + const std::string filepath = m_path + m_rd_name + "/" + filename; + #endif + + // Create the OpenPMD series + auto series = io::Series( + filepath, + io::Access::CREATE); + auto i = series.iterations[step + 1]; + // record + auto f_mesh = i.meshes["d2L_dE1_dE2"]; // m^-2 eV^-2 + f_mesh.setUnitDimension({ + {io::UnitDimension::L, -6}, + {io::UnitDimension::M, -2}, + {io::UnitDimension::T, 4} + }); + + // record components + auto data = f_mesh[io::RecordComponent::SCALAR]; + + // meta data + f_mesh.setAxisLabels({"E2", "E1"}); // eV, eV + std::vector< double > const& gridGlobalOffset = {m_bin_min_2, m_bin_min_1}; + f_mesh.setGridGlobalOffset(gridGlobalOffset); + f_mesh.setGridSpacing({m_bin_size_2, m_bin_size_1}); + + data.setPosition({0.5, 0.5}); + + auto dataset = io::Dataset( + io::determineDatatype(), + {static_cast(m_bin_num_2), static_cast(m_bin_num_1)}); + data.resetDataset(dataset); + + // Get time at level 0 + auto & warpx = WarpX::GetInstance(); + auto const time = warpx.gett_new(0); + i.setTime(time); + + auto const& h_table_data = m_h_data_2D.table(); + data.storeChunkRaw( + h_table_data.p, + {0, 0}, + {static_cast(m_bin_num_2), static_cast(m_bin_num_1)}); + + series.flush(); + i.close(); + series.close(); +#else + amrex::ignore_unused(step); + WARPX_ABORT_WITH_MESSAGE("DifferentialLuminosity2D: Needs openPMD-api compiled into WarpX, but was not found!"); +#endif +} diff --git a/Source/Diagnostics/ReducedDiags/Make.package b/Source/Diagnostics/ReducedDiags/Make.package index 4d2e4d7def9..98fa093e2df 100644 --- a/Source/Diagnostics/ReducedDiags/Make.package +++ b/Source/Diagnostics/ReducedDiags/Make.package @@ -4,6 +4,7 @@ CEXE_sources += BeamRelevant.cpp CEXE_sources += ChargeOnEB.cpp CEXE_sources += ColliderRelevant.cpp CEXE_sources += DifferentialLuminosity.cpp +CEXE_sources += DifferentialLuminosity2D.cpp CEXE_sources += FieldEnergy.cpp CEXE_sources += FieldMaximum.cpp CEXE_sources += FieldMomentum.cpp diff --git a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp index 0ce18174111..e4c982f7323 100644 --- a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp +++ b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp @@ -10,6 +10,7 @@ #include "ChargeOnEB.H" #include "ColliderRelevant.H" #include "DifferentialLuminosity.H" +#include "DifferentialLuminosity2D.H" #include "FieldEnergy.H" #include "FieldMaximum.H" #include "FieldMomentum.H" @@ -58,6 +59,7 @@ MultiReducedDiags::MultiReducedDiags () {"ChargeOnEB", [](CS s){return std::make_unique(s);}}, {"ColliderRelevant", [](CS s){return std::make_unique(s);}}, {"DifferentialLuminosity",[](CS s){return std::make_unique(s);}}, + {"DifferentialLuminosity2D",[](CS s){return std::make_unique(s);}}, {"ParticleEnergy", [](CS s){return std::make_unique(s);}}, {"ParticleExtrema", [](CS s){return std::make_unique(s);}}, {"ParticleHistogram", [](CS s){return std::make_unique(s);}}, From f4ece6e746f1d97b7b5f2599fc6ecfd0d68f556f Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 14 Feb 2025 18:40:44 +0100 Subject: [PATCH 234/278] WarpX class: move SetDotMask to anonymous namespace in WarpX.cpp (#5644) `SetDotMask`, a member function of the WarpX class, is only used inside the member function `getFieldDotMaskPointer` . This PR turns it into a pure function and moves it into an anonymous namespace inside `WarpX.cpp`. This (slightly) simplifies the WarpX class header. --- .../ImplicitSolvers/WarpXSolverVec.cpp | 4 +- Source/WarpX.H | 9 +--- Source/WarpX.cpp | 48 +++++++++++-------- 3 files changed, 30 insertions(+), 31 deletions(-) diff --git a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp index f091353a4df..05b5f1caa0c 100644 --- a/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp +++ b/Source/FieldSolver/ImplicitSolvers/WarpXSolverVec.cpp @@ -149,7 +149,7 @@ void WarpXSolverVec::Copy ( FieldType a_array_type, for (int lev = 0; lev < m_num_amr_levels; ++lev) { if (m_array_type != FieldType::None) { for (int n = 0; n < 3; ++n) { - const amrex::iMultiFab* dotMask = m_WarpX->getFieldDotMaskPointer(m_array_type,lev,n); + const amrex::iMultiFab* dotMask = m_WarpX->getFieldDotMaskPointer(m_array_type, lev, ablastr::fields::Direction{n}); auto rtmp = amrex::MultiFab::Dot( *dotMask, *m_array_vec[lev][n], 0, *a_X.getArrayVec()[lev][n], 0, 1, 0, local); @@ -157,7 +157,7 @@ void WarpXSolverVec::Copy ( FieldType a_array_type, } } if (m_scalar_type != FieldType::None) { - const amrex::iMultiFab* dotMask = m_WarpX->getFieldDotMaskPointer(m_scalar_type,lev,0); + const amrex::iMultiFab* dotMask = m_WarpX->getFieldDotMaskPointer(m_scalar_type,lev, ablastr::fields::Direction{0}); auto rtmp = amrex::MultiFab::Dot( *dotMask, *m_scalar_vec[lev], 0, *a_X.getScalarVec()[lev], 0, 1, 0, local); diff --git a/Source/WarpX.H b/Source/WarpX.H index ce4a846eace..ddfd545db74 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -412,14 +412,7 @@ public: * Get pointer to the amrex::MultiFab containing the dotMask for the specified field */ [[nodiscard]] const amrex::iMultiFab* - getFieldDotMaskPointer (warpx::fields::FieldType field_type, int lev, int dir) const; - - /** - * \brief - * Set the dotMask container - */ - void SetDotMask( std::unique_ptr& field_dotMask, - std::string const & field_name, int lev, int dir ) const; + getFieldDotMaskPointer (warpx::fields::FieldType field_type, int lev, ablastr::fields::Direction dir) const; [[nodiscard]] bool DoPML () const {return do_pml;} [[nodiscard]] bool DoFluidSpecies () const {return do_fluid_species;} diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index a17c7ff432e..4a0633369ce 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -200,6 +200,27 @@ namespace std::any_of(field_boundary_hi.begin(), field_boundary_hi.end(), is_pml); return is_any_pml; } + + /** + * \brief + * Set the dotMask container + */ + void SetDotMask( std::unique_ptr& field_dotMask, + ablastr::fields::ConstScalarField const& field, + amrex::Periodicity const& periodicity) + + { + // Define the dot mask for this field_type needed to properly compute dotProduct() + // for field values that have shared locations on different MPI ranks + if (field_dotMask != nullptr) { return; } + + const auto& this_ba = field->boxArray(); + const auto tmp = amrex::MultiFab{ + this_ba, field->DistributionMap(), + 1, 0, amrex::MFInfo().SetAlloc(false)}; + + field_dotMask = tmp.OwnerMask(periodicity); + } } void WarpX::MakeWarpX () @@ -3316,40 +3337,25 @@ WarpX::MakeDistributionMap (int lev, amrex::BoxArray const& ba) } const amrex::iMultiFab* -WarpX::getFieldDotMaskPointer ( FieldType field_type, int lev, int dir ) const +WarpX::getFieldDotMaskPointer ( FieldType field_type, int lev, ablastr::fields::Direction dir ) const { + const auto periodicity = Geom(lev).periodicity(); switch(field_type) { case FieldType::Efield_fp : - SetDotMask( Efield_dotMask[lev][dir], "Efield_fp", lev, dir ); + ::SetDotMask( Efield_dotMask[lev][dir], m_fields.get("Efield_fp", dir, lev), periodicity); return Efield_dotMask[lev][dir].get(); case FieldType::Bfield_fp : - SetDotMask( Bfield_dotMask[lev][dir], "Bfield_fp", lev, dir ); + ::SetDotMask( Bfield_dotMask[lev][dir], m_fields.get("Bfield_fp", dir, lev), periodicity); return Bfield_dotMask[lev][dir].get(); case FieldType::vector_potential_fp : - SetDotMask( Afield_dotMask[lev][dir], "vector_potential_fp", lev, dir ); + ::SetDotMask( Afield_dotMask[lev][dir], m_fields.get("vector_potential_fp", dir, lev), periodicity); return Afield_dotMask[lev][dir].get(); case FieldType::phi_fp : - SetDotMask( phi_dotMask[lev], "phi_fp", lev, 0 ); + ::SetDotMask( phi_dotMask[lev], m_fields.get("phi_fp", dir, lev), periodicity); return phi_dotMask[lev].get(); default: WARPX_ABORT_WITH_MESSAGE("Invalid field type for dotMask"); return Efield_dotMask[lev][dir].get(); } } - -void WarpX::SetDotMask( std::unique_ptr& field_dotMask, - std::string const & field_name, int lev, int dir ) const -{ - // Define the dot mask for this field_type needed to properly compute dotProduct() - // for field values that have shared locations on different MPI ranks - if (field_dotMask != nullptr) { return; } - - ablastr::fields::ConstVectorField const& this_field = m_fields.get_alldirs(field_name,lev); - const amrex::BoxArray& this_ba = this_field[dir]->boxArray(); - const amrex::MultiFab tmp( this_ba, this_field[dir]->DistributionMap(), - 1, 0, amrex::MFInfo().SetAlloc(false) ); - const amrex::Periodicity& period = Geom(lev).periodicity(); - field_dotMask = tmp.OwnerMask(period); - -} From 17692a04f5e4f24c4feb85013ff5da25523ee713 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 14 Feb 2025 09:45:19 -0800 Subject: [PATCH 235/278] Update to latest AMReX version (#5669) Update to latest AMReX version to pull the latest bug fix in https://github.com/AMReX-Codes/amrex/pull/4333. --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 6e87134904f..3b65f406728 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -127,7 +127,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 198da4879a63f1bc8c4e8d674bf9185525318f61 && cd - + cd ../amrex && git checkout --detach 275f55f25fec350dfedb54f75a19200b52ced93f && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 7f5546a931b..813734282c7 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -294,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "198da4879a63f1bc8c4e8d674bf9185525318f61" +set(WarpX_amrex_branch "275f55f25fec350dfedb54f75a19200b52ced93f" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") From 18578b963b7c2250201ce6d3984aff3185dc54e3 Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Fri, 14 Feb 2025 16:36:59 -0800 Subject: [PATCH 236/278] Add external particle fields ohms law hybrid (#5275) This PR allows for the addition of external fields through the particle fields analytical interface. This is useful for field splitting external vs. self fields in the hybrid ohm's law solver. --------- Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> --- Docs/source/refs.bib | 10 + Docs/source/usage/parameters.rst | 21 + Examples/Tests/CMakeLists.txt | 1 + .../CMakeLists.txt | 24 ++ .../analysis_default_regression.py | 1 + ...d_ohm_solver_cylinder_compression_picmi.py | 393 ++++++++++++++++++ ...z_ohm_solver_cylinder_compression_picmi.py | 383 +++++++++++++++++ Python/pywarpx/HybridPICModel.py | 1 + Python/pywarpx/WarpX.py | 3 +- Python/pywarpx/__init__.py | 2 +- Python/pywarpx/fields.py | 99 +++++ Python/pywarpx/picmi.py | 77 ++++ ...ohm_solver_cylinder_compression_picmi.json | 20 + ...ohm_solver_cylinder_compression_picmi.json | 20 + .../FiniteDifferenceSolver/CMakeLists.txt | 1 + .../FiniteDifferenceSolver/ComputeCurlA.cpp | 306 ++++++++++++++ .../FiniteDifferenceSolver.H | 44 +- .../HybridPICModel/CMakeLists.txt | 1 + .../HybridPICModel/ExternalVectorPotential.H | 101 +++++ .../ExternalVectorPotential.cpp | 376 +++++++++++++++++ .../HybridPICModel/HybridPICModel.H | 44 +- .../HybridPICModel/HybridPICModel.cpp | 60 ++- .../HybridPICModel/Make.package | 1 + .../HybridPICSolveE.cpp | 169 ++++++-- .../FiniteDifferenceSolver/Make.package | 1 + .../FieldSolver/WarpXPushFieldsHybridPIC.cpp | 75 +++- Source/Fields.H | 4 + Source/Initialization/WarpXInitData.cpp | 89 +++- Source/Particles/Gather/GetExternalFields.H | 6 +- Source/Python/WarpX.cpp | 4 + Source/WarpX.H | 52 ++- Source/WarpX.cpp | 25 +- 32 files changed, 2336 insertions(+), 78 deletions(-) create mode 100644 Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt create mode 120000 Examples/Tests/ohm_solver_cylinder_compression/analysis_default_regression.py create mode 100644 Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py create mode 100644 Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py create mode 100644 Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json create mode 100644 Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json create mode 100644 Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp create mode 100644 Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H create mode 100644 Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp diff --git a/Docs/source/refs.bib b/Docs/source/refs.bib index 6623bacd452..49f4658af4c 100644 --- a/Docs/source/refs.bib +++ b/Docs/source/refs.bib @@ -518,3 +518,13 @@ @article{Rhee1987 url = {https://doi.org/10.1063/1.1139314}, eprint = {https://pubs.aip.org/aip/rsi/article-pdf/58/2/240/19154912/240\_1\_online.pdf}, } + +@misc{holmstrom2013handlingvacuumregionshybrid, + title={Handling vacuum regions in a hybrid plasma solver}, + author={M. Holmstrom}, + year={2013}, + eprint={1301.0272}, + archivePrefix={arXiv}, + primaryClass={physics.space-ph}, + url={https://arxiv.org/abs/1301.0272}, +} diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index dc53ae5295f..77f99044448 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2537,6 +2537,27 @@ Maxwell solver: kinetic-fluid hybrid * ``hybrid_pic_model.substeps`` (`int`) optional (default ``10``) If ``algo.maxwell_solver`` is set to ``hybrid``, this sets the number of sub-steps to take during the B-field update. +* ``hybrid_pic_model.holmstrom_vacuum_region`` (`bool`) optional (default ``false``) + If ``algo.maxwell_solver`` is set to ``hybrid``, this sets the vacuum region handling of the generalized Ohm's Law to suppress vacuum fluctuations. :cite:t:`param-holmstrom2013handlingvacuumregionshybrid`. + +* ``hybid_pic_model.add_external_fields`` (`bool`) optional (default ``false``) + If ``algo.maxwell_solver`` is set to ``hybrid``, this sets the hybrid solver to use split external fields defined in external_vector_potential inputs. + +* ``external_vector_potential.fields`` (list of `str`) optional (default ``empty``) + If ``hybid_pic_model.add_external_fields`` is set to ``true``, this adds a list names for external time varying vector potentials to be added to hybrid solver. + +* ``external_vector_potential..read_from_file`` (`bool`) optional (default ``false``) + If ``hybid_pic_model.add_external_fields`` is set to ``true``, this flag determines whether to load an external field or use an implcit function to evaluate the time varying field. + +* ``external_vector_potential..path`` (`str`) optional (default ``""``) + If ``external_vector_potential..read_from_file`` is set to ``true``, sets the path to an OpenPMD file that can be loaded externally in :math:`weber/m`. + +* ``external_vector_potential..A[x,y,z]_external_grid_function(x,y,z)`` (`str`) optional (default ``"0"``) + If ``external_vector_potential..read_from_file`` is set to ``false``, Sets the external vector potential to be populated by an implicit function (on the grid) in :math:`weber/m`. + +* ``external_vector_potential..A_time_external_grid_function(t)`` (`str`) optional (default ``"1"``) + This sets the relative strength of the external vector potential by a dimensionless implicit time function, which can compute the external B fields and E fields based on the value and first time derivative of the function. + .. note:: Based on results from :cite:t:`param-Stanier2020` it is recommended to use diff --git a/Examples/Tests/CMakeLists.txt b/Examples/Tests/CMakeLists.txt index 5ff1d4a9a70..b80e6158f49 100644 --- a/Examples/Tests/CMakeLists.txt +++ b/Examples/Tests/CMakeLists.txt @@ -41,6 +41,7 @@ add_subdirectory(nci_fdtd_stability) add_subdirectory(nci_psatd_stability) add_subdirectory(nodal_electrostatic) add_subdirectory(nuclear_fusion) +add_subdirectory(ohm_solver_cylinder_compression) add_subdirectory(ohm_solver_em_modes) add_subdirectory(ohm_solver_ion_beam_instability) add_subdirectory(ohm_solver_ion_Landau_damping) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt b/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt new file mode 100644 index 00000000000..c813d669fa6 --- /dev/null +++ b/Examples/Tests/ohm_solver_cylinder_compression/CMakeLists.txt @@ -0,0 +1,24 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_ohm_solver_cylinder_compression_picmi # name + 3 # dims + 2 # nprocs + "inputs_test_3d_ohm_solver_cylinder_compression_picmi.py --test" # inputs + OFF # analysis + "analysis_default_regression.py --path diags/diag1000020 --rtol 1e-6" # checksum + OFF # dependency +) +label_warpx_test(test_3d_ohm_solver_cylinder_compression_picmi slow) + +add_warpx_test( + test_rz_ohm_solver_cylinder_compression_picmi # name + RZ # dims + 2 # nprocs + "inputs_test_rz_ohm_solver_cylinder_compression_picmi.py --test" # inputs + OFF # analysis + "analysis_default_regression.py --path diags/diag1000020 --rtol 1e-6" # output + OFF # dependency +) +label_warpx_test(test_rz_ohm_solver_cylinder_compression_picmi slow) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/analysis_default_regression.py b/Examples/Tests/ohm_solver_cylinder_compression/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/ohm_solver_cylinder_compression/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py new file mode 100644 index 00000000000..4f05fd15d83 --- /dev/null +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -0,0 +1,393 @@ +#!/usr/bin/env python3 +# +# --- Test script for the kinetic-fluid hybrid model in WarpX wherein ions are +# --- treated as kinetic particles and electrons as an isothermal, inertialess +# --- background fluid. The script demonstrates the use of this model to +# --- simulate adiabatic compression of a plasma cylinder initialized from an +# --- analytical Grad-Shafranov solution. + +import argparse +import shutil +import sys +from pathlib import Path + +import numpy as np +import openpmd_api as io +from mpi4py import MPI as mpi + +from pywarpx import fields, picmi + +constants = picmi.constants + +comm = mpi.COMM_WORLD + +simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=False) + + +class PlasmaCylinderCompression(object): + # B0 is chosen with all other quantities scaled by it + n0 = 1e20 + T_i = 10 # eV + T_e = 0 + p0 = n0 * constants.q_e * T_i + + B0 = np.sqrt(2 * constants.mu0 * p0) # Initial magnetic field strength (T) + + # Do a 2x uniform B-field compression + dB = B0 + + # Flux Conserver radius + R_c = 0.5 + + # Plasma Radius (These values control the analytical GS solution) + R_p = 0.25 + delta_p = 0.025 + + # Domain parameters + LX = 2.0 * R_c * 1.05 # m + LY = 2.0 * R_c * 1.05 + LZ = 0.5 # m + + LT = 10 # ion cyclotron periods + DT = 1e-3 # ion cyclotron periods + + # Resolution parameters + NX = 256 + NY = 256 + NZ = 128 + + # Starting number of particles per cell + NPPC = 100 + + # Number of substeps used to update B + substeps = 20 + + def Bz(self, r): + return np.sqrt( + self.B0**2 + - 2.0 + * constants.mu0 + * self.n0 + * constants.q_e + * self.T_i + / (1.0 + np.exp((r - self.R_p) / self.delta_p)) + ) + + def __init__(self, test, verbose): + self.test = test + self.verbose = verbose or self.test + + self.Lx = self.LX + self.Ly = self.LY + self.Lz = self.LZ + + self.DX = self.LX / self.NX + self.DY = self.LY / self.NY + self.DZ = self.LZ / self.NZ + + if comm.rank == 0: + # Write uniform compression dataset to OpenPMD to exercise reading openPMD data + # for the time varying external fields + xvec = np.linspace(-self.LX, self.LX, num=2 * self.NX) + yvec = np.linspace(-self.LY, self.LY, num=2 * self.NY) + zvec = np.linspace(-self.LZ, self.LZ, num=2 * self.NZ) + XM, YM, ZM = np.meshgrid(xvec, yvec, zvec, indexing="ij") + + RM = np.sqrt(XM**2 + YM**2) + + Ax_data = -0.5 * YM * self.dB + Ay_data = 0.5 * XM * self.dB + Az_data = np.zeros_like(RM) + + # Write vector potential to file to exercise field loading via OpenPMD + series = io.Series("Afield.h5", io.Access.create) + + it = series.iterations[0] + + A = it.meshes["A"] + A.grid_spacing = [self.DX, self.DY, self.DZ] + A.grid_global_offset = [-self.LX, -self.LY, -self.LZ] + A.grid_unit_SI = 1.0 + A.axis_labels = ["x", "y", "z"] + A.data_order = "C" + A.unit_dimension = { + io.Unit_Dimension.M: 1.0, + io.Unit_Dimension.T: -2.0, + io.Unit_Dimension.I: -1.0, + io.Unit_Dimension.L: -1.0, + } + + Ax = A["x"] + Ay = A["y"] + Az = A["z"] + + Ax.position = [0.0, 0.0] + Ay.position = [0.0, 0.0] + Az.position = [0.0, 0.0] + + Ax_dataset = io.Dataset(Ax_data.dtype, Ax_data.shape) + + Ay_dataset = io.Dataset(Ay_data.dtype, Ay_data.shape) + + Az_dataset = io.Dataset(Az_data.dtype, Az_data.shape) + + Ax.reset_dataset(Ax_dataset) + Ay.reset_dataset(Ay_dataset) + Az.reset_dataset(Az_dataset) + + Ax.store_chunk(Ax_data) + Ay.store_chunk(Ay_data) + Az.store_chunk(Az_data) + + series.flush() + series.close() + + comm.Barrier() + + # calculate various plasma parameters based on the simulation input + self.get_plasma_quantities() + + self.dt = self.DT * self.t_ci + + # run very low resolution as a CI test + if self.test: + self.total_steps = 20 + self.diag_steps = self.total_steps // 5 + self.NX = 64 + self.NY = 64 + self.NZ = 32 + else: + self.total_steps = int(self.LT / self.DT) + self.diag_steps = 100 + + # print out plasma parameters + if comm.rank == 0: + print( + f"Initializing simulation with input parameters:\n" + f"\tTi = {self.T_i:.1f} eV\n" + f"\tn0 = {self.n0:.1e} m^-3\n" + f"\tB0 = {self.B0:.2f} T\n", + f"\tDX/DY = {self.DX / self.l_i:.3f} c/w_pi\n" + f"\tDZ = {self.DZ / self.l_i:.3f} c/w_pi\n", + ) + print( + f"Plasma parameters:\n" + f"\tl_i = {self.l_i:.1e} m\n" + f"\tt_ci = {self.t_ci:.1e} s\n" + f"\tv_ti = {self.vi_th:.1e} m/s\n" + f"\tvA = {self.vA:.1e} m/s\n" + ) + print( + f"Numerical parameters:\n" + f"\tdz = {self.Lz / self.NZ:.1e} m\n" + f"\tdt = {self.dt:.1e} s\n" + f"\tdiag steps = {self.diag_steps:d}\n" + f"\ttotal steps = {self.total_steps:d}\n" + ) + + self.setup_run() + + def get_plasma_quantities(self): + """Calculate various plasma parameters based on the simulation input.""" + + # Ion mass (kg) + self.M = constants.m_p + + # Cyclotron angular frequency (rad/s) and period (s) + self.w_ci = constants.q_e * abs(self.B0) / self.M + self.t_ci = 2.0 * np.pi / self.w_ci + + # Ion plasma frequency (Hz) + self.w_pi = np.sqrt(constants.q_e**2 * self.n0 / (self.M * constants.ep0)) + + # Ion skin depth (m) + self.l_i = constants.c / self.w_pi + + # # Alfven speed (m/s): vA = B / sqrt(mu0 * n * (M + m)) = c * omega_ci / w_pi + self.vA = abs(self.B0) / np.sqrt( + constants.mu0 * self.n0 * (constants.m_e + self.M) + ) + + # calculate thermal speeds + self.vi_th = np.sqrt(self.T_i * constants.q_e / self.M) + + # Ion Larmor radius (m) + self.rho_i = self.vi_th / self.w_ci + + def load_fields(self): + Bx = fields.BxFPExternalWrapper(include_ghosts=False) + By = fields.ByFPExternalWrapper(include_ghosts=False) + Bz = fields.BzFPExternalWrapper(include_ghosts=False) + + Bx[:, :] = 0.0 + By[:, :] = 0.0 + + XM, YM, ZM = np.meshgrid( + Bz.mesh("x"), Bz.mesh("y"), Bz.mesh("z"), indexing="ij" + ) + + RM = np.sqrt(XM**2 + YM**2) + + Bz[:, :] = self.Bz(RM) + comm.Barrier() + + def setup_run(self): + """Setup simulation components.""" + + ####################################################################### + # Set geometry and boundary conditions # + ####################################################################### + + # Create grid + self.grid = picmi.Cartesian3DGrid( + number_of_cells=[self.NX, self.NY, self.NZ], + lower_bound=[-0.5 * self.Lx, -0.5 * self.Ly, -0.5 * self.Lz], + upper_bound=[0.5 * self.Lx, 0.5 * self.Ly, 0.5 * self.Lz], + lower_boundary_conditions=["dirichlet", "dirichlet", "periodic"], + upper_boundary_conditions=["dirichlet", "dirichlet", "periodic"], + lower_boundary_conditions_particles=["absorbing", "absorbing", "periodic"], + upper_boundary_conditions_particles=["absorbing", "absorbing", "periodic"], + warpx_max_grid_size=self.NZ, + ) + simulation.time_step_size = self.dt + simulation.max_steps = self.total_steps + simulation.current_deposition_algo = "direct" + simulation.particle_shape = 1 + simulation.use_filter = True + simulation.verbose = self.verbose + + ####################################################################### + # Field solver and external field # + ####################################################################### + # External Field definition. Sigmoid starting around 2.5 us + A_ext = { + "uniform": { + "read_from_file": True, + "path": "Afield.h5", + "A_time_external_function": "1/(1+exp(5*(1-(t-t0_ramp)*sqrt(2)/tau_ramp)))", + } + } + + self.solver = picmi.HybridPICSolver( + grid=self.grid, + gamma=1.0, + Te=self.T_e, + n0=self.n0, + n_floor=0.05 * self.n0, + plasma_resistivity="if(rho<=rho_floor,eta_v,eta_p)", + plasma_hyper_resistivity=1e-8, + substeps=self.substeps, + A_external=A_ext, + tau_ramp=20e-6, + t0_ramp=5e-6, + rho_floor=0.05 * self.n0 * constants.q_e, + eta_p=1e-8, + eta_v=1e-3, + ) + simulation.solver = self.solver + + simulation.embedded_boundary = picmi.EmbeddedBoundary( + implicit_function="(x**2+y**2-R_w**2)", R_w=self.R_c + ) + + # Add field loader callback + B_ext = picmi.LoadInitialFieldFromPython( + load_from_python=self.load_fields, + warpx_do_divb_cleaning_external=True, + load_B=True, + load_E=False, + ) + simulation.add_applied_field(B_ext) + + ####################################################################### + # Particle types setup # + ####################################################################### + r_omega = "(sqrt(x*x+y*y)*q_e*B0/m_p)" + dlnndr = "((-1/delta_p)/(1+exp(-(sqrt(x*x+y*y)-R_p)/delta_p)))" + vth = f"0.5*(-{r_omega}+sqrt({r_omega}*{r_omega}+4*q_e*T_i*{dlnndr}/m_p))" + + momentum_expr = [f"y*{vth}", f"-x*{vth}", "0"] + + self.ions = picmi.Species( + name="ions", + charge="q_e", + mass=self.M, + initial_distribution=picmi.AnalyticDistribution( + density_expression="n0_p/(1+exp((sqrt(x*x+y*y)-R_p)/delta_p))", + momentum_expressions=momentum_expr, + warpx_momentum_spread_expressions=[f"{str(self.vi_th)}"] * 3, + warpx_density_min=0.01 * self.n0, + R_p=self.R_p, + delta_p=self.delta_p, + n0_p=self.n0, + B0=self.B0, + T_i=self.T_i, + ), + ) + simulation.add_species( + self.ions, + layout=picmi.PseudoRandomLayout( + grid=self.grid, n_macroparticles_per_cell=self.NPPC + ), + ) + + ####################################################################### + # Add diagnostics # + ####################################################################### + + if self.test: + particle_diag = picmi.ParticleDiagnostic( + name="diag1", + period=self.diag_steps, + species=[self.ions], + data_list=["ux", "uy", "uz", "x", "z", "weighting"], + write_dir="diags", + warpx_format="plotfile", + ) + simulation.add_diagnostic(particle_diag) + field_diag = picmi.FieldDiagnostic( + name="diag1", + grid=self.grid, + period=self.diag_steps, + data_list=["B", "E", "rho"], + write_dir="diags", + warpx_format="plotfile", + ) + simulation.add_diagnostic(field_diag) + + ####################################################################### + # Initialize # + ####################################################################### + + if comm.rank == 0: + if Path.exists(Path("diags")): + shutil.rmtree("diags") + Path("diags").mkdir(parents=True, exist_ok=True) + + # Initialize inputs and WarpX instance + simulation.initialize_inputs() + simulation.initialize_warpx() + + +########################## +# parse input parameters +########################## + +parser = argparse.ArgumentParser() +parser.add_argument( + "-t", + "--test", + help="toggle whether this script is run as a short CI test", + action="store_true", +) +parser.add_argument( + "-v", + "--verbose", + help="Verbose output", + action="store_true", +) +args, left = parser.parse_known_args() +sys.argv = sys.argv[:1] + left + +run = PlasmaCylinderCompression(test=args.test, verbose=args.verbose) +simulation.step() diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py new file mode 100644 index 00000000000..8c65f88ae79 --- /dev/null +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py @@ -0,0 +1,383 @@ +#!/usr/bin/env python3 +# +# --- Test script for the kinetic-fluid hybrid model in WarpX wherein ions are +# --- treated as kinetic particles and electrons as an isothermal, inertialess +# --- background fluid. The script demonstrates the use of this model to +# --- simulate adiabatic compression of a plasma cylinder initialized from an +# --- analytical Grad-Shafranov solution. + +import argparse +import shutil +import sys +from pathlib import Path + +import numpy as np +import openpmd_api as io +from mpi4py import MPI as mpi + +from pywarpx import fields, picmi + +constants = picmi.constants + +comm = mpi.COMM_WORLD + +simulation = picmi.Simulation(warpx_serialize_initial_conditions=True, verbose=False) + + +class PlasmaCylinderCompression(object): + # B0 is chosen with all other quantities scaled by it + n0 = 1e20 + T_i = 10 # eV + T_e = 0 + p0 = n0 * constants.q_e * T_i + + B0 = np.sqrt(2 * constants.mu0 * p0) # External magnetic field strength (T) + + # Do a 2x uniform B-field compression + dB = B0 + + # Flux Conserver radius + R_c = 0.5 + + # Plasma Radius (These values control the analytical GS solution) + R_p = 0.25 + delta_p = 0.025 + + # Domain parameters + LR = R_c # m + LZ = 0.25 * R_c # m + + LT = 10 # ion cyclotron periods + DT = 1e-3 # ion cyclotron periods + + # Resolution parameters + NR = 128 + NZ = 32 + + # Starting number of particles per cell + NPPC = 100 + + # Number of substeps used to update B + substeps = 20 + + def Bz(self, r): + return np.sqrt( + self.B0**2 + - 2.0 + * constants.mu0 + * self.n0 + * constants.q_e + * self.T_i + / (1.0 + np.exp((r - self.R_p) / self.delta_p)) + ) + + def __init__(self, test, verbose): + self.test = test + self.verbose = verbose or self.test + + self.Lr = self.LR + self.Lz = self.LZ + + self.DR = self.LR / self.NR + self.DZ = self.LZ / self.NZ + + # Write A to OpenPMD for a uniform B field to exercise file based loader + if comm.rank == 0: + mvec = np.array([0]) + rvec = np.linspace(0, 2 * self.LR, num=2 * self.NR) + zvec = np.linspace(-self.LZ, self.LZ, num=2 * self.NZ) + MM, RM, ZM = np.meshgrid(mvec, rvec, zvec, indexing="ij") + + # Write uniform compression dataset to OpenPMD to exercise reading openPMD data + # for the time varying external fields + Ar_data = np.zeros_like(RM) + Az_data = np.zeros_like(RM) + + # Zero padded outside of domain + At_data = 0.5 * RM * self.dB + + # Write vector potential to file to exercise field loading via + series = io.Series("Afield.h5", io.Access.create) + + it = series.iterations[0] + + A = it.meshes["A"] + A.geometry = io.Geometry.thetaMode + A.geometry_parameters = "m=0" + A.grid_spacing = [self.DR, self.DZ] + A.grid_global_offset = [0.0, -self.LZ] + A.grid_unit_SI = 1.0 + A.axis_labels = ["r", "z"] + A.data_order = "C" + A.unit_dimension = { + io.Unit_Dimension.M: 1.0, + io.Unit_Dimension.T: -2.0, + io.Unit_Dimension.I: -1.0, + io.Unit_Dimension.L: -1.0, + } + + Ar = A["r"] + At = A["t"] + Az = A["z"] + + Ar.position = [0.0, 0.0] + At.position = [0.0, 0.0] + Az.position = [0.0, 0.0] + + Ar_dataset = io.Dataset(Ar_data.dtype, Ar_data.shape) + + At_dataset = io.Dataset(At_data.dtype, At_data.shape) + + Az_dataset = io.Dataset(Az_data.dtype, Az_data.shape) + + Ar.reset_dataset(Ar_dataset) + At.reset_dataset(At_dataset) + Az.reset_dataset(Az_dataset) + + Ar.store_chunk(Ar_data) + At.store_chunk(At_data) + Az.store_chunk(Az_data) + + series.flush() + series.close() + + comm.Barrier() + + # calculate various plasma parameters based on the simulation input + self.get_plasma_quantities() + + self.dt = self.DT * self.t_ci + + # run very low resolution as a CI test + if self.test: + self.total_steps = 20 + self.diag_steps = self.total_steps // 5 + self.NR = 64 + self.NZ = 16 + else: + self.total_steps = int(self.LT / self.DT) + self.diag_steps = 100 + + # print out plasma parameters + if comm.rank == 0: + print( + f"Initializing simulation with input parameters:\n" + f"\tTi = {self.T_i:.1f} eV\n" + f"\tn0 = {self.n0:.1e} m^-3\n" + f"\tB0 = {self.B0:.2f} T\n", + f"\tDR = {self.DR / self.l_i:.3f} c/w_pi\n" + f"\tDZ = {self.DZ / self.l_i:.3f} c/w_pi\n", + ) + print( + f"Plasma parameters:\n" + f"\tl_i = {self.l_i:.1e} m\n" + f"\tt_ci = {self.t_ci:.1e} s\n" + f"\tv_ti = {self.vi_th:.1e} m/s\n" + f"\tvA = {self.vA:.1e} m/s\n" + ) + print( + f"Numerical parameters:\n" + f"\tdz = {self.Lz / self.NZ:.1e} m\n" + f"\tdt = {self.dt:.1e} s\n" + f"\tdiag steps = {self.diag_steps:d}\n" + f"\ttotal steps = {self.total_steps:d}\n" + ) + + self.setup_run() + + def get_plasma_quantities(self): + """Calculate various plasma parameters based on the simulation input.""" + + # Ion mass (kg) + self.M = constants.m_p + + # Cyclotron angular frequency (rad/s) and period (s) + self.w_ci = constants.q_e * abs(self.B0) / self.M + self.t_ci = 2.0 * np.pi / self.w_ci + + # Ion plasma frequency (Hz) + self.w_pi = np.sqrt(constants.q_e**2 * self.n0 / (self.M * constants.ep0)) + + # Ion skin depth (m) + self.l_i = constants.c / self.w_pi + + # # Alfven speed (m/s): vA = B / sqrt(mu0 * n * (M + m)) = c * omega_ci / w_pi + self.vA = abs(self.B0) / np.sqrt( + constants.mu0 * self.n0 * (constants.m_e + self.M) + ) + + # calculate thermal speeds + self.vi_th = np.sqrt(self.T_i * constants.q_e / self.M) + + # Ion Larmor radius (m) + self.rho_i = self.vi_th / self.w_ci + + def load_fields(self): + Br = fields.BxFPExternalWrapper(include_ghosts=False) + Bt = fields.ByFPExternalWrapper(include_ghosts=False) + Bz = fields.BzFPExternalWrapper(include_ghosts=False) + + Br[:, :] = 0.0 + Bt[:, :] = 0.0 + + RM, ZM = np.meshgrid(Bz.mesh("r"), Bz.mesh("z"), indexing="ij") + + Bz[:, :] = self.Bz(RM) * (RM <= self.R_c) + comm.Barrier() + + def setup_run(self): + """Setup simulation components.""" + + ####################################################################### + # Set geometry and boundary conditions # + ####################################################################### + + # Create grid + self.grid = picmi.CylindricalGrid( + number_of_cells=[self.NR, self.NZ], + lower_bound=[0.0, -self.Lz / 2.0], + upper_bound=[self.Lr, self.Lz / 2.0], + lower_boundary_conditions=["none", "periodic"], + upper_boundary_conditions=["dirichlet", "periodic"], + lower_boundary_conditions_particles=["none", "periodic"], + upper_boundary_conditions_particles=["absorbing", "periodic"], + warpx_max_grid_size=self.NZ, + ) + simulation.time_step_size = self.dt + simulation.max_steps = self.total_steps + simulation.current_deposition_algo = "direct" + simulation.particle_shape = 1 + simulation.use_filter = True + simulation.verbose = self.verbose + + ####################################################################### + # Field solver and external field # + ####################################################################### + # External Field definition. Sigmoid starting around 2.5 us + A_ext = { + "uniform": { + "read_from_file": True, + "path": "Afield.h5", + "A_time_external_function": "1/(1+exp(5*(1-(t-t0_ramp)*sqrt(2)/tau_ramp)))", + } + } + + self.solver = picmi.HybridPICSolver( + grid=self.grid, + gamma=1.0, + Te=self.T_e, + n0=self.n0, + n_floor=0.05 * self.n0, + plasma_resistivity="if(rho<=rho_floor,eta_v,eta_p)", + plasma_hyper_resistivity=1e-8, + substeps=self.substeps, + A_external=A_ext, + tau_ramp=20e-6, + t0_ramp=5e-6, + rho_floor=0.05 * self.n0 * constants.q_e, + eta_p=1e-8, + eta_v=1e-3, + ) + simulation.solver = self.solver + + # Add field loader callback + B_ext = picmi.LoadInitialFieldFromPython( + load_from_python=self.load_fields, + warpx_do_divb_cleaning_external=True, + load_B=True, + load_E=False, + ) + simulation.add_applied_field(B_ext) + + ####################################################################### + # Particle types setup # + ####################################################################### + r_omega = "(sqrt(x*x+y*y)*q_e*B0/m_p)" + dlnndr = "((-1/delta_p)/(1+exp(-(sqrt(x*x+y*y)-R_p)/delta_p)))" + vth = f"0.5*(-{r_omega}+sqrt({r_omega}*{r_omega}+4*q_e*T_i*{dlnndr}/m_p))" + + momentum_expr = [f"y*{vth}", f"-x*{vth}", "0"] + + self.ions = picmi.Species( + name="ions", + charge="q_e", + mass=self.M, + initial_distribution=picmi.AnalyticDistribution( + density_expression="n0_p/(1+exp((sqrt(x*x+y*y)-R_p)/delta_p))", + momentum_expressions=momentum_expr, + warpx_momentum_spread_expressions=[f"{str(self.vi_th)}"] * 3, + warpx_density_min=0.01 * self.n0, + R_p=self.R_p, + delta_p=self.delta_p, + n0_p=self.n0, + B0=self.B0, + T_i=self.T_i, + ), + ) + simulation.add_species( + self.ions, + layout=picmi.PseudoRandomLayout( + grid=self.grid, n_macroparticles_per_cell=self.NPPC + ), + ) + + ####################################################################### + # Add diagnostics # + ####################################################################### + + if self.test: + particle_diag = picmi.ParticleDiagnostic( + name="diag1", + period=self.diag_steps, + species=[self.ions], + data_list=["ux", "uy", "uz", "x", "z", "weighting"], + write_dir="diags", + warpx_format="plotfile", + ) + simulation.add_diagnostic(particle_diag) + field_diag = picmi.FieldDiagnostic( + name="diag1", + grid=self.grid, + period=self.diag_steps, + data_list=["B", "E", "rho"], + write_dir="diags", + warpx_format="plotfile", + ) + simulation.add_diagnostic(field_diag) + + ####################################################################### + # Initialize # + ####################################################################### + + if comm.rank == 0: + if Path.exists(Path("diags")): + shutil.rmtree("diags") + Path("diags").mkdir(parents=True, exist_ok=True) + + # Initialize inputs and WarpX instance + simulation.initialize_inputs() + simulation.initialize_warpx() + + +########################## +# parse input parameters +########################## + +parser = argparse.ArgumentParser() +parser.add_argument( + "-t", + "--test", + help="toggle whether this script is run as a short CI test", + action="store_true", +) +parser.add_argument( + "-v", + "--verbose", + help="Verbose output", + action="store_true", +) +args, left = parser.parse_known_args() +sys.argv = sys.argv[:1] + left + +run = PlasmaCylinderCompression(test=args.test, verbose=args.verbose) +simulation.step() diff --git a/Python/pywarpx/HybridPICModel.py b/Python/pywarpx/HybridPICModel.py index 7bd8c961950..f94f44ce931 100644 --- a/Python/pywarpx/HybridPICModel.py +++ b/Python/pywarpx/HybridPICModel.py @@ -9,3 +9,4 @@ from .Bucket import Bucket hybridpicmodel = Bucket("hybrid_pic_model") +external_vector_potential = Bucket("external_vector_potential") diff --git a/Python/pywarpx/WarpX.py b/Python/pywarpx/WarpX.py index 9ef7019cda9..9b0446bcc79 100644 --- a/Python/pywarpx/WarpX.py +++ b/Python/pywarpx/WarpX.py @@ -20,7 +20,7 @@ from .Diagnostics import diagnostics, reduced_diagnostics from .EB2 import eb2 from .Geometry import geometry -from .HybridPICModel import hybridpicmodel +from .HybridPICModel import external_vector_potential, hybridpicmodel from .Interpolation import interpolation from .Lasers import lasers, lasers_list from .Particles import particles, particles_list @@ -46,6 +46,7 @@ def create_argv_list(self, **kw): argv += amrex.attrlist() argv += geometry.attrlist() argv += hybridpicmodel.attrlist() + argv += external_vector_potential.attrlist() argv += boundary.attrlist() argv += algo.attrlist() argv += interpolation.attrlist() diff --git a/Python/pywarpx/__init__.py b/Python/pywarpx/__init__.py index 054ca451756..b8e025342dd 100644 --- a/Python/pywarpx/__init__.py +++ b/Python/pywarpx/__init__.py @@ -33,7 +33,7 @@ from .Diagnostics import diagnostics, reduced_diagnostics # noqa from .EB2 import eb2 # noqa from .Geometry import geometry # noqa -from .HybridPICModel import hybridpicmodel # noqa +from .HybridPICModel import hybridpicmodel, external_vector_potential # noqa from .Interpolation import interpolation # noqa from .Lasers import lasers # noqa from .LoadThirdParty import load_cupy # noqa diff --git a/Python/pywarpx/fields.py b/Python/pywarpx/fields.py index 9beef1de5c8..a81999103d9 100644 --- a/Python/pywarpx/fields.py +++ b/Python/pywarpx/fields.py @@ -578,6 +578,24 @@ def norm0(self, *args): return self.mf.norm0(*args) +def CustomNamedxWrapper(mf_name, level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name=mf_name, idir=0, level=level, include_ghosts=include_ghosts + ) + + +def CustomNamedyWrapper(mf_name, level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name=mf_name, idir=1, level=level, include_ghosts=include_ghosts + ) + + +def CustomNamedzWrapper(mf_name, level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name=mf_name, idir=2, level=level, include_ghosts=include_ghosts + ) + + def ExWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( mf_name="Efield_aux", idir=0, level=level, include_ghosts=include_ghosts @@ -704,6 +722,87 @@ def BzFPExternalWrapper(level=0, include_ghosts=False): ) +def AxHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_A_fp_external", + idir=0, + level=level, + include_ghosts=include_ghosts, + ) + + +def AyHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_A_fp_external", + idir=1, + level=level, + include_ghosts=include_ghosts, + ) + + +def AzHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_A_fp_external", + idir=2, + level=level, + include_ghosts=include_ghosts, + ) + + +def ExHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_E_fp_external", + idir=0, + level=level, + include_ghosts=include_ghosts, + ) + + +def EyHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_E_fp_external", + idir=1, + level=level, + include_ghosts=include_ghosts, + ) + + +def EzHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_E_fp_external", + idir=2, + level=level, + include_ghosts=include_ghosts, + ) + + +def BxHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_B_fp_external", + idir=0, + level=level, + include_ghosts=include_ghosts, + ) + + +def ByHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_B_fp_external", + idir=1, + level=level, + include_ghosts=include_ghosts, + ) + + +def BzHybridExternalWrapper(level=0, include_ghosts=False): + return _MultiFABWrapper( + mf_name="hybrid_B_fp_external", + idir=2, + level=level, + include_ghosts=include_ghosts, + ) + + def JxFPWrapper(level=0, include_ghosts=False): return _MultiFABWrapper( mf_name="current_fp", idir=0, level=level, include_ghosts=include_ghosts diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index da673671953..f660570ca7c 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1853,8 +1853,37 @@ class HybridPICSolver(picmistandard.base._ClassWithInit): substeps: int, default=100 Number of substeps to take when updating the B-field. + holmstrom_vacuum_region: bool, default=False + Flag to determine handling of vacuum region. Setting to True will solve the simplified Generalized Ohm's Law dropping the Hall and pressure terms in the vacuum region. + This flag is useful for suppressing vacuum region fluctuations. A large resistivity value must be used when rho <= rho_floor. + Jx/y/z_external_function: str Function of space and time specifying external (non-plasma) currents. + + A_external: dict + Function of space and time specifying external (non-plasma) vector potential fields. + It is expected that a nested dicitonary will be passed + into picmi for each field that has different timings + e.g. + A_external = { + '': { + 'Ax_external_function': , + 'Ay_external_function': , + 'Az_external_function': , + 'A_time_external_function': + }, + ': {...}' + } + + or if fields are to be loaded from an OpenPMD file + A_external = { + '': { + 'load_from_file': True, + 'path': , + 'A_time_external_function': + }, + ': {...}' + } """ def __init__( @@ -1867,9 +1896,11 @@ def __init__( plasma_resistivity=None, plasma_hyper_resistivity=None, substeps=None, + holmstrom_vacuum_region=None, Jx_external_function=None, Jy_external_function=None, Jz_external_function=None, + A_external=None, **kw, ): self.grid = grid @@ -1884,10 +1915,14 @@ def __init__( self.substeps = substeps + self.holmstrom_vacuum_region = holmstrom_vacuum_region + self.Jx_external_function = Jx_external_function self.Jy_external_function = Jy_external_function self.Jz_external_function = Jz_external_function + self.A_external = A_external + # Handle keyword arguments used in expressions self.user_defined_kw = {} for k in list(kw.keys()): @@ -1918,6 +1953,7 @@ def solver_initialize_inputs(self): ) pywarpx.hybridpicmodel.plasma_hyper_resistivity = self.plasma_hyper_resistivity pywarpx.hybridpicmodel.substeps = self.substeps + pywarpx.hybridpicmodel.holmstrom_vacuum_region = self.holmstrom_vacuum_region pywarpx.hybridpicmodel.__setattr__( "Jx_external_grid_function(x,y,z,t)", pywarpx.my_constants.mangle_expression( @@ -1936,6 +1972,47 @@ def solver_initialize_inputs(self): self.Jz_external_function, self.mangle_dict ), ) + if self.A_external is not None: + pywarpx.hybridpicmodel.add_external_fields = True + pywarpx.external_vector_potential.__setattr__( + "fields", + pywarpx.my_constants.mangle_expression( + list(self.A_external.keys()), self.mangle_dict + ), + ) + for field_name, field_dict in self.A_external.items(): + if field_dict.get("read_from_file", False): + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.read_from_file", field_dict["read_from_file"] + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.path", field_dict["path"] + ) + else: + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.Ax_external_grid_function(x,y,z)", + pywarpx.my_constants.mangle_expression( + field_dict["Ax_external_function"], self.mangle_dict + ), + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.Ay_external_grid_function(x,y,z)", + pywarpx.my_constants.mangle_expression( + field_dict["Ay_external_function"], self.mangle_dict + ), + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.Az_external_grid_function(x,y,z)", + pywarpx.my_constants.mangle_expression( + field_dict["Az_external_function"], self.mangle_dict + ), + ) + pywarpx.external_vector_potential.__setattr__( + f"{field_name}.A_time_external_function(t)", + pywarpx.my_constants.mangle_expression( + field_dict["A_time_external_function"], self.mangle_dict + ), + ) class ElectrostaticSolver(picmistandard.PICMI_ElectrostaticSolver): diff --git a/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json new file mode 100644 index 00000000000..6cde3a9450e --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json @@ -0,0 +1,20 @@ +{ + "lev=0": { + "Bx": 0.5334253070691776, + "By": 0.5318560243634998, + "Bz": 2252.108905639938, + "Ex": 10509838.331420777, + "Ey": 10512676.798857061, + "Ez": 8848.113963901804, + "rho": 384112.2912140536 + }, + "ions": { + "particle_momentum_x": 2.161294367543349e-16, + "particle_momentum_y": 2.161870747294985e-16, + "particle_momentum_z": 2.0513400435256855e-16, + "particle_position_x": 769864.202585846, + "particle_position_y": 769908.6569812088, + "particle_position_z": 620721.1900338201, + "particle_weight": 1.008292384042714e+19 + } +} \ No newline at end of file diff --git a/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json new file mode 100644 index 00000000000..6fd2ca04fce --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json @@ -0,0 +1,20 @@ +{ + "lev=0": { + "Br": 0.01190012639573578, + "Bt": 0.011313481779415917, + "Bz": 11.684908684984164, + "Er": 154581.58512851578, + "Et": 4798.276941148807, + "Ez": 193.22344271401872, + "rho": 7968.182346905438 + }, + "ions": { + "particle_momentum_x": 3.1125151786241107e-18, + "particle_momentum_y": 3.119385993047207e-18, + "particle_momentum_z": 3.0289560038617916e-18, + "particle_position_x": 13628.662686419664, + "particle_position_y": 2285.6952310457755, + "particle_theta": 115055.48935725243, + "particle_weight": 2.525423582445981e+18 + } +} \ No newline at end of file diff --git a/Source/FieldSolver/FiniteDifferenceSolver/CMakeLists.txt b/Source/FieldSolver/FiniteDifferenceSolver/CMakeLists.txt index 19c2092d1a6..7539d706632 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/CMakeLists.txt +++ b/Source/FieldSolver/FiniteDifferenceSolver/CMakeLists.txt @@ -3,6 +3,7 @@ foreach(D IN LISTS WarpX_DIMS) target_sources(lib_${SD} PRIVATE ComputeDivE.cpp + ComputeCurlA.cpp EvolveB.cpp EvolveBPML.cpp EvolveE.cpp diff --git a/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp new file mode 100644 index 00000000000..30cbdb60508 --- /dev/null +++ b/Source/FieldSolver/FiniteDifferenceSolver/ComputeCurlA.cpp @@ -0,0 +1,306 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: S. Eric Clark (Helion Energy) + * + * License: BSD-3-Clause-LBNL + */ + +#include "FiniteDifferenceSolver.H" + +#include "EmbeddedBoundary/Enabled.H" +#ifdef WARPX_DIM_RZ +# include "FiniteDifferenceAlgorithms/CylindricalYeeAlgorithm.H" +#else +# include "FiniteDifferenceAlgorithms/CartesianYeeAlgorithm.H" +#endif + +#include "Utils/TextMsg.H" +#include "WarpX.H" + +using namespace amrex; + +void FiniteDifferenceSolver::ComputeCurlA ( + ablastr::fields::VectorField& Bfield, + ablastr::fields::VectorField const& Afield, + std::array< std::unique_ptr,3> const& eb_update_B, + int lev ) +{ + // Select algorithm (The choice of algorithm is a runtime option, + // but we compile code for each algorithm, using templates) + if (m_fdtd_algo == ElectromagneticSolverAlgo::HybridPIC) { +#ifdef WARPX_DIM_RZ + ComputeCurlACylindrical ( + Bfield, Afield, eb_update_B, lev + ); + +#else + ComputeCurlACartesian ( + Bfield, Afield, eb_update_B, lev + ); + +#endif + } else { + amrex::Abort(Utils::TextMsg::Err( + "ComputeCurl: Unknown algorithm choice.")); + } +} + +// /** +// * \brief Calculate B from the curl of A +// * i.e. B = curl(A) output field on B field mesh staggering +// * +// * \param[out] curlField output of curl operation +// * \param[in] field input staggered field, should be on E/J/A mesh staggering +// */ +#ifdef WARPX_DIM_RZ +template +void FiniteDifferenceSolver::ComputeCurlACylindrical ( + ablastr::fields::VectorField& Bfield, + ablastr::fields::VectorField const& Afield, + std::array< std::unique_ptr,3> const& eb_update_B, + int lev +) +{ + // for the profiler + amrex::LayoutData* cost = WarpX::getCosts(lev); + + // reset Bfield + Bfield[0]->setVal(0); + Bfield[1]->setVal(0); + Bfield[2]->setVal(0); + + // Loop through the grids, and over the tiles within each grid +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*Afield[0], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { + if (cost && WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) + { + amrex::Gpu::synchronize(); + } + Real wt = static_cast(amrex::second()); + + // Extract field data for this grid/tile + Array4 const& Ar = Afield[0]->const_array(mfi); + Array4 const& At = Afield[1]->const_array(mfi); + Array4 const& Az = Afield[2]->const_array(mfi); + Array4 const& Br = Bfield[0]->array(mfi); + Array4 const& Bt = Bfield[1]->array(mfi); + Array4 const& Bz = Bfield[2]->array(mfi); + + // Extract structures indicating where the fields + // should be updated, given the position of the embedded boundaries. + amrex::Array4 update_Br_arr, update_Bt_arr, update_Bz_arr; + if (EB::enabled()) { + update_Br_arr = eb_update_B[0]->array(mfi); + update_Bt_arr = eb_update_B[1]->array(mfi); + update_Bz_arr = eb_update_B[2]->array(mfi); + } + + // Extract stencil coefficients + Real const * const AMREX_RESTRICT coefs_r = m_stencil_coefs_r.dataPtr(); + int const n_coefs_r = static_cast(m_stencil_coefs_r.size()); + Real const * const AMREX_RESTRICT coefs_z = m_stencil_coefs_z.dataPtr(); + int const n_coefs_z = static_cast(m_stencil_coefs_z.size()); + + // Extract cylindrical specific parameters + Real const dr = m_dr; + int const nmodes = m_nmodes; + Real const rmin = m_rmin; + + // Extract tileboxes for which to loop over + Box const& tbr = mfi.tilebox(Bfield[0]->ixType().toIntVect()); + Box const& tbt = mfi.tilebox(Bfield[1]->ixType().toIntVect()); + Box const& tbz = mfi.tilebox(Bfield[2]->ixType().toIntVect()); + + // Calculate the B-field from the A-field + amrex::ParallelFor(tbr, tbt, tbz, + + // Br calculation + [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ + // Skip field update in the embedded boundaries + if (update_Br_arr && update_Br_arr(i, j, 0) == 0) { return; } + + Real const r = rmin + i*dr; // r on nodal point (Br is nodal in r) + if (r != 0) { // Off-axis, regular Maxwell equations + Br(i, j, 0, 0) = - T_Algo::UpwardDz(At, coefs_z, n_coefs_z, i, j, 0, 0); // Mode m=0 + for (int m=1; m(amrex::second()) - wt; + amrex::HostDevice::Atomic::Add( &(*cost)[mfi.index()], wt); + } + } +} + +#else + +template +void FiniteDifferenceSolver::ComputeCurlACartesian ( + ablastr::fields::VectorField & Bfield, + ablastr::fields::VectorField const& Afield, + std::array< std::unique_ptr,3> const& eb_update_B, + int lev +) +{ + using ablastr::fields::Direction; + + // for the profiler + amrex::LayoutData* cost = WarpX::getCosts(lev); + + // reset Bfield + Bfield[0]->setVal(0); + Bfield[1]->setVal(0); + Bfield[2]->setVal(0); + + // Loop through the grids, and over the tiles within each grid +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*Afield[0], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { + if (cost && WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) { + amrex::Gpu::synchronize(); + } + auto wt = static_cast(amrex::second()); + + // Extract field data for this grid/tile + Array4 const &Bx = Bfield[0]->array(mfi); + Array4 const &By = Bfield[1]->array(mfi); + Array4 const &Bz = Bfield[2]->array(mfi); + Array4 const &Ax = Afield[0]->const_array(mfi); + Array4 const &Ay = Afield[1]->const_array(mfi); + Array4 const &Az = Afield[2]->const_array(mfi); + + // Extract structures indicating where the fields + // should be updated, given the position of the embedded boundaries. + amrex::Array4 update_Bx_arr, update_By_arr, update_Bz_arr; + if (EB::enabled()) { + update_Bx_arr = eb_update_B[0]->array(mfi); + update_By_arr = eb_update_B[1]->array(mfi); + update_Bz_arr = eb_update_B[2]->array(mfi); + } + + // Extract stencil coefficients + Real const * const AMREX_RESTRICT coefs_x = m_stencil_coefs_x.dataPtr(); + auto const n_coefs_x = static_cast(m_stencil_coefs_x.size()); + Real const * const AMREX_RESTRICT coefs_y = m_stencil_coefs_y.dataPtr(); + auto const n_coefs_y = static_cast(m_stencil_coefs_y.size()); + Real const * const AMREX_RESTRICT coefs_z = m_stencil_coefs_z.dataPtr(); + auto const n_coefs_z = static_cast(m_stencil_coefs_z.size()); + + // Extract tileboxes for which to loop + Box const& tbx = mfi.tilebox(Bfield[0]->ixType().toIntVect()); + Box const& tby = mfi.tilebox(Bfield[1]->ixType().toIntVect()); + Box const& tbz = mfi.tilebox(Bfield[2]->ixType().toIntVect()); + + // Calculate the curl of A + amrex::ParallelFor(tbx, tby, tbz, + + // Bx calculation + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + // Skip field update in the embedded boundaries + if (update_Bx_arr && update_Bx_arr(i, j, k) == 0) { return; } + + Bx(i, j, k) = ( + - T_Algo::UpwardDz(Ay, coefs_z, n_coefs_z, i, j, k) + + T_Algo::UpwardDy(Az, coefs_y, n_coefs_y, i, j, k) + ); + }, + + // By calculation + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + // Skip field update in the embedded boundaries + if (update_By_arr && update_By_arr(i, j, k) == 0) { return; } + + By(i, j, k) = ( + - T_Algo::UpwardDx(Az, coefs_x, n_coefs_x, i, j, k) + + T_Algo::UpwardDz(Ax, coefs_z, n_coefs_z, i, j, k) + ); + }, + + // Bz calculation + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + // Skip field update in the embedded boundaries + if (update_Bz_arr && update_Bz_arr(i, j, k) == 0) { return; } + + Bz(i, j, k) = ( + - T_Algo::UpwardDy(Ax, coefs_y, n_coefs_y, i, j, k) + + T_Algo::UpwardDx(Ay, coefs_x, n_coefs_x, i, j, k) + ); + } + ); + + if (cost && WarpX::load_balance_costs_update_algo == LoadBalanceCostsUpdateAlgo::Timers) + { + amrex::Gpu::synchronize(); + wt = static_cast(amrex::second()) - wt; + amrex::HostDevice::Atomic::Add( &(*cost)[mfi.index()], wt); + } + } +} +#endif diff --git a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H index 19b822e3628..0d12d104436 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H @@ -1,7 +1,10 @@ -/* Copyright 2020 Remi Lehe +/* Copyright 2020-2024 The WarpX Community * * This file is part of WarpX. * + * Authors: Remi Lehe (LBNL) + * S. Eric Clark (Helion Energy) + * * License: BSD-3-Clause-LBNL */ @@ -172,10 +175,25 @@ class FiniteDifferenceSolver * \param[in] lev level number for the calculation */ void CalculateCurrentAmpere ( - ablastr::fields::VectorField& Jfield, - ablastr::fields::VectorField const& Bfield, - std::array< std::unique_ptr,3> const& eb_update_E, - int lev ); + ablastr::fields::VectorField& Jfield, + ablastr::fields::VectorField const& Bfield, + std::array< std::unique_ptr,3> const& eb_update_E, + int lev ); + + /** + * \brief Calculation of B field from the vector potential A + * B = (curl x A) / mu0. + * + * \param[out] Bfield vector of current MultiFabs at a given level + * \param[in] Afield vector of magnetic field MultiFabs at a given level + * \param[in] edge_lengths length of edges along embedded boundaries + * \param[in] lev level number for the calculation + */ + void ComputeCurlA ( + ablastr::fields::VectorField& Bfield, + ablastr::fields::VectorField const& Afield, + std::array< std::unique_ptr,3> const& eb_update_B, + int lev ); private: @@ -255,6 +273,14 @@ class FiniteDifferenceSolver int lev ); + template + void ComputeCurlACylindrical ( + ablastr::fields::VectorField& Bfield, + ablastr::fields::VectorField const& Afield, + std::array< std::unique_ptr,3> const& eb_update_B, + int lev + ); + #else template< typename T_Algo > void EvolveBCartesian ( @@ -358,6 +384,14 @@ class FiniteDifferenceSolver std::array< std::unique_ptr,3> const& eb_update_E, int lev ); + + template + void ComputeCurlACartesian ( + ablastr::fields::VectorField & Bfield, + ablastr::fields::VectorField const& Afield, + std::array< std::unique_ptr,3> const& eb_update_B, + int lev + ); #endif }; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/CMakeLists.txt b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/CMakeLists.txt index 1367578b0aa..bb29baefcb9 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/CMakeLists.txt +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/CMakeLists.txt @@ -3,5 +3,6 @@ foreach(D IN LISTS WarpX_DIMS) target_sources(lib_${SD} PRIVATE HybridPICModel.cpp + ExternalVectorPotential.cpp ) endforeach() diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H new file mode 100644 index 00000000000..632ff2bd785 --- /dev/null +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H @@ -0,0 +1,101 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: S. Eric Clark (Helion Energy) + * + * License: BSD-3-Clause-LBNL + */ + +#ifndef WARPX_EXTERNAL_VECTOR_POTENTIAL_H_ +#define WARPX_EXTERNAL_VECTOR_POTENTIAL_H_ + +#include "Fields.H" + +#include "Utils/WarpXAlgorithmSelection.H" + +#include "EmbeddedBoundary/Enabled.H" +#include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" +#include "Utils/Parser/ParserUtils.H" +#include "Utils/WarpXConst.H" +#include "Utils/WarpXProfilerWrapper.H" + +#include + +#include +#include +#include +#include +#include + +#include + +/** + * \brief This class contains the parameters needed to evaluate a + * time varying external vector potential, leading to external E/B + * fields to be applied in Hybrid Solver. This class is used to break up + * the passed in fields into a spatial and time dependent solution. + * + * Eventually this can be used in a list to control independent external + * fields with different time profiles. + * + */ +class ExternalVectorPotential +{ +protected: + int m_nFields; + + std::vector m_field_names; + + std::vector m_Ax_ext_grid_function; + std::vector m_Ay_ext_grid_function; + std::vector m_Az_ext_grid_function; + std::vector, 3>> m_A_external_parser; + std::vector, 3>> m_A_external; + + std::vector m_A_ext_time_function; + std::vector> m_A_external_time_parser; + std::vector> m_A_time_scale; + + std::vector m_read_A_from_file; + std::vector m_external_file_path; + +public: + + // Default Constructor + ExternalVectorPotential (); + + void ReadParameters (); + + void AllocateLevelMFs ( + ablastr::fields::MultiFabRegister & fields, + int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, + int ncomps, + const amrex::IntVect& ngEB, + const amrex::IntVect& Ex_nodal_flag, + const amrex::IntVect& Ey_nodal_flag, + const amrex::IntVect& Ez_nodal_flag, + const amrex::IntVect& Bx_nodal_flag, + const amrex::IntVect& By_nodal_flag, + const amrex::IntVect& Bz_nodal_flag + ); + + void InitData (); + + void CalculateExternalCurlA (); + void CalculateExternalCurlA (std::string& coil_name); + + AMREX_FORCE_INLINE + void PopulateExternalFieldFromVectorPotential ( + ablastr::fields::VectorField const& dstField, + amrex::Real scale_factor, + ablastr::fields::VectorField const& srcField, + std::array< std::unique_ptr,3> const& eb_update); + + void UpdateHybridExternalFields ( + amrex::Real t, + amrex::Real dt + ); +}; + +#endif //WARPX_TIME_DEPENDENT_VECTOR_POTENTIAL_H_ diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp new file mode 100644 index 00000000000..50a62335b57 --- /dev/null +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -0,0 +1,376 @@ +/* Copyright 2024 The WarpX Community + * + * This file is part of WarpX. + * + * Authors: S. Eric Clark (Helion Energy) + * + * License: BSD-3-Clause-LBNL + */ + +#include "ExternalVectorPotential.H" +#include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" +#include "Fields.H" +#include "WarpX.H" + +#include + +using namespace amrex; +using namespace warpx::fields; + +ExternalVectorPotential::ExternalVectorPotential () +{ + ReadParameters(); +} + +void +ExternalVectorPotential::ReadParameters () +{ + const ParmParse pp_ext_A("external_vector_potential"); + + pp_ext_A.queryarr("fields", m_field_names); + + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!m_field_names.empty(), + "No external field names defined in external_vector_potential.fields"); + + m_nFields = static_cast(m_field_names.size()); + + // Resize vectors and set defaults + m_Ax_ext_grid_function.resize(m_nFields); + m_Ay_ext_grid_function.resize(m_nFields); + m_Az_ext_grid_function.resize(m_nFields); + for (std::string & field : m_Ax_ext_grid_function) { field = "0.0"; } + for (std::string & field : m_Ay_ext_grid_function) { field = "0.0"; } + for (std::string & field : m_Az_ext_grid_function) { field = "0.0"; } + + m_A_external_parser.resize(m_nFields); + m_A_external.resize(m_nFields); + + m_A_ext_time_function.resize(m_nFields); + for (std::string & field_time : m_A_ext_time_function) {field_time = "1.0"; } + + m_A_external_time_parser.resize(m_nFields); + m_A_time_scale.resize(m_nFields); + + m_read_A_from_file.resize(m_nFields); + m_external_file_path.resize(m_nFields); + for (std::string & file_name : m_external_file_path) { file_name = ""; } + + for (int i = 0; i < m_nFields; ++i) { + bool read_from_file = false; + utils::parser::queryWithParser(pp_ext_A, + (m_field_names[i]+".read_from_file").c_str(), read_from_file); + m_read_A_from_file[i] = read_from_file; + + if (m_read_A_from_file[i]) { + pp_ext_A.query((m_field_names[i]+".path").c_str(), m_external_file_path[i]); + } else { + pp_ext_A.query((m_field_names[i]+".Ax_external_grid_function(x,y,z)").c_str(), + m_Ax_ext_grid_function[i]); + pp_ext_A.query((m_field_names[i]+".Ay_external_grid_function(x,y,z)").c_str(), + m_Ay_ext_grid_function[i]); + pp_ext_A.query((m_field_names[i]+".Az_external_grid_function(x,y,z)").c_str(), + m_Az_ext_grid_function[i]); + } + + pp_ext_A.query((m_field_names[i]+".A_time_external_function(t)").c_str(), + m_A_ext_time_function[i]); + } +} + +void +ExternalVectorPotential::AllocateLevelMFs ( + ablastr::fields::MultiFabRegister & fields, + int lev, const BoxArray& ba, const DistributionMapping& dm, + const int ncomps, + const IntVect& ngEB, + const IntVect& Ex_nodal_flag, + const IntVect& Ey_nodal_flag, + const IntVect& Ez_nodal_flag, + const IntVect& Bx_nodal_flag, + const IntVect& By_nodal_flag, + const IntVect& Bz_nodal_flag) +{ + using ablastr::fields::Direction; + for (std::string const & field_name : m_field_names) { + const std::string Aext_field = field_name + std::string{"_Aext"}; + fields.alloc_init(Aext_field, Direction{0}, + lev, amrex::convert(ba, Ex_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(Aext_field, Direction{1}, + lev, amrex::convert(ba, Ey_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(Aext_field, Direction{2}, + lev, amrex::convert(ba, Ez_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + + const std::string curlAext_field = field_name + std::string{"_curlAext"}; + fields.alloc_init(curlAext_field, Direction{0}, + lev, amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(curlAext_field, Direction{1}, + lev, amrex::convert(ba, By_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(curlAext_field, Direction{2}, + lev, amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + } + fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{0}, + lev, amrex::convert(ba, Ex_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{1}, + lev, amrex::convert(ba, Ey_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_E_fp_external, Direction{2}, + lev, amrex::convert(ba, Ez_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{0}, + lev, amrex::convert(ba, Bx_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{1}, + lev, amrex::convert(ba, By_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); + fields.alloc_init(FieldType::hybrid_B_fp_external, Direction{2}, + lev, amrex::convert(ba, Bz_nodal_flag), + dm, ncomps, ngEB, 0.0_rt); +} + +void +ExternalVectorPotential::InitData () +{ + using ablastr::fields::Direction; + auto& warpx = WarpX::GetInstance(); + + int A_time_dep_count = 0; + + for (int i = 0; i < m_nFields; ++i) { + + const std::string Aext_field = m_field_names[i] + std::string{"_Aext"}; + + if (m_read_A_from_file[i]) { + // Read A fields from file + for (auto lev = 0; lev <= warpx.finestLevel(); ++lev) { +#if defined(WARPX_DIM_RZ) + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{0}, lev), + "A", "r"); + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{1}, lev), + "A", "t"); + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{2}, lev), + "A", "z"); +#else + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{0}, lev), + "A", "x"); + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{1}, lev), + "A", "y"); + warpx.ReadExternalFieldFromFile(m_external_file_path[i], + warpx.m_fields.get(Aext_field, Direction{2}, lev), + "A", "z"); +#endif + } + } else { + // Initialize the A fields from expression + m_A_external_parser[i][0] = std::make_unique( + utils::parser::makeParser(m_Ax_ext_grid_function[i],{"x","y","z","t"})); + m_A_external_parser[i][1] = std::make_unique( + utils::parser::makeParser(m_Ay_ext_grid_function[i],{"x","y","z","t"})); + m_A_external_parser[i][2] = std::make_unique( + utils::parser::makeParser(m_Az_ext_grid_function[i],{"x","y","z","t"})); + m_A_external[i][0] = m_A_external_parser[i][0]->compile<4>(); + m_A_external[i][1] = m_A_external_parser[i][1]->compile<4>(); + m_A_external[i][2] = m_A_external_parser[i][2]->compile<4>(); + + // check if the external current parsers depend on time + for (int idim=0; idim<3; idim++) { + const std::set A_ext_symbols = m_A_external_parser[i][idim]->symbols(); + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(A_ext_symbols.count("t") == 0, + "Externally Applied Vector potential time variation must be set with A_time_external_function(t)"); + } + + // Initialize data onto grid + for (auto lev = 0; lev <= warpx.finestLevel(); ++lev) { + warpx.ComputeExternalFieldOnGridUsingParser( + Aext_field, + m_A_external[i][0], + m_A_external[i][1], + m_A_external[i][2], + lev, PatchType::fine, + warpx.GetEBUpdateEFlag(), + false); + + for (int idir = 0; idir < 3; ++idir) { + warpx.m_fields.get(Aext_field, Direction{idir}, lev)-> + FillBoundary(warpx.Geom(lev).periodicity()); + } + } + } + + amrex::Gpu::streamSynchronize(); + + CalculateExternalCurlA(m_field_names[i]); + + // Generate parser for time function + m_A_external_time_parser[i] = std::make_unique( + utils::parser::makeParser(m_A_ext_time_function[i],{"t",})); + m_A_time_scale[i] = m_A_external_time_parser[i]->compile<1>(); + + const std::set A_time_ext_symbols = m_A_external_time_parser[i]->symbols(); + A_time_dep_count += static_cast(A_time_ext_symbols.count("t")); + } + + if (A_time_dep_count > 0) { + ablastr::warn_manager::WMRecordWarning( + "HybridPIC ExternalVectorPotential", + "Coulomb Gauge is Expected, please be sure to have a divergence free A. Divergence cleaning of A to be implemented soon.", + ablastr::warn_manager::WarnPriority::low + ); + } + + UpdateHybridExternalFields(warpx.gett_new(0), warpx.getdt(0)); +} + + +void +ExternalVectorPotential::CalculateExternalCurlA () +{ + for (auto fname : m_field_names) { + CalculateExternalCurlA(fname); + } +} + +void +ExternalVectorPotential::CalculateExternalCurlA (std::string& coil_name) +{ + using ablastr::fields::Direction; + auto & warpx = WarpX::GetInstance(); + + // Compute the curl of the reference A field (unscaled by time function) + const std::string Aext_field = coil_name + std::string{"_Aext"}; + const std::string curlAext_field = coil_name + std::string{"_curlAext"}; + + ablastr::fields::MultiLevelVectorField A_ext = + warpx.m_fields.get_mr_levels_alldirs(Aext_field, warpx.finestLevel()); + ablastr::fields::MultiLevelVectorField curlA_ext = + warpx.m_fields.get_mr_levels_alldirs(curlAext_field, warpx.finestLevel()); + + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { + warpx.get_pointer_fdtd_solver_fp(lev)->ComputeCurlA( + curlA_ext[lev], + A_ext[lev], + warpx.GetEBUpdateBFlag()[lev], + lev); + + for (int idir = 0; idir < 3; ++idir) { + warpx.m_fields.get(curlAext_field, Direction{idir}, lev)-> + FillBoundary(warpx.Geom(lev).periodicity()); + } + } +} + +AMREX_FORCE_INLINE +void +ExternalVectorPotential::PopulateExternalFieldFromVectorPotential ( + ablastr::fields::VectorField const& dstField, + amrex::Real scale_factor, + ablastr::fields::VectorField const& srcField, + std::array< std::unique_ptr,3> const& eb_update) +{ + // Loop through the grids, and over the tiles within each grid +#ifdef AMREX_USE_OMP +#pragma omp parallel if (amrex::Gpu::notInLaunchRegion()) +#endif + for ( MFIter mfi(*dstField[0], TilingIfNotGPU()); mfi.isValid(); ++mfi ) { + // Extract field data for this grid/tile + Array4 const& Fx = dstField[0]->array(mfi); + Array4 const& Fy = dstField[1]->array(mfi); + Array4 const& Fz = dstField[2]->array(mfi); + + Array4 const& Sx = srcField[0]->const_array(mfi); + Array4 const& Sy = srcField[1]->const_array(mfi); + Array4 const& Sz = srcField[2]->const_array(mfi); + + // Extract structures indicating where the fields + // should be updated, given the position of the embedded boundaries. + amrex::Array4 update_Fx_arr, update_Fy_arr, update_Fz_arr; + if (EB::enabled()) { + update_Fx_arr = eb_update[0]->array(mfi); + update_Fy_arr = eb_update[1]->array(mfi); + update_Fz_arr = eb_update[2]->array(mfi); + } + + // Extract tileboxes for which to loop + Box const& tbx = mfi.tilebox(dstField[0]->ixType().toIntVect()); + Box const& tby = mfi.tilebox(dstField[1]->ixType().toIntVect()); + Box const& tbz = mfi.tilebox(dstField[2]->ixType().toIntVect()); + + // Loop over the cells and update the fields + amrex::ParallelFor(tbx, tby, tbz, + + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + // Skip field update in the embedded boundaries + if (update_Fx_arr && update_Fx_arr(i, j, k) == 0) { return; } + + Fx(i,j,k) = scale_factor * Sx(i,j,k); + }, + + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + // Skip field update in the embedded boundaries + if (update_Fy_arr && update_Fy_arr(i, j, k) == 0) { return; } + + Fy(i,j,k) = scale_factor * Sy(i,j,k); + }, + + [=] AMREX_GPU_DEVICE (int i, int j, int k){ + // Skip field update in the embedded boundaries + if (update_Fz_arr && update_Fz_arr(i, j, k) == 0) { return; } + + Fz(i,j,k) = scale_factor * Sz(i,j,k); + } + ); + } +} + +void +ExternalVectorPotential::UpdateHybridExternalFields (const amrex::Real t, const amrex::Real dt) +{ + using ablastr::fields::Direction; + auto& warpx = WarpX::GetInstance(); + + + ablastr::fields::MultiLevelVectorField B_ext = + warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_B_fp_external, warpx.finestLevel()); + ablastr::fields::MultiLevelVectorField E_ext = + warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_E_fp_external, warpx.finestLevel()); + + for (int i = 0; i < m_nFields; ++i) { + const std::string Aext_field = m_field_names[i] + std::string{"_Aext"}; + const std::string curlAext_field = m_field_names[i] + std::string{"_curlAext"}; + + // Get B-field Scaling Factor + const amrex::Real scale_factor_B = m_A_time_scale[i](t); + + // Get dA/dt scaling factor based on time centered FD around t + const amrex::Real sf_l = m_A_time_scale[i](t-0.5_rt*dt); + const amrex::Real sf_r = m_A_time_scale[i](t+0.5_rt*dt); + const amrex::Real scale_factor_E = -(sf_r - sf_l)/dt; + + ablastr::fields::MultiLevelVectorField A_ext = + warpx.m_fields.get_mr_levels_alldirs(Aext_field, warpx.finestLevel()); + ablastr::fields::MultiLevelVectorField curlA_ext = + warpx.m_fields.get_mr_levels_alldirs(curlAext_field, warpx.finestLevel()); + + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { + PopulateExternalFieldFromVectorPotential(E_ext[lev], scale_factor_E, A_ext[lev], warpx.GetEBUpdateEFlag()[lev]); + PopulateExternalFieldFromVectorPotential(B_ext[lev], scale_factor_B, curlA_ext[lev], warpx.GetEBUpdateBFlag()[lev]); + + for (int idir = 0; idir < 3; ++idir) { + E_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); + B_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); + } + } + } + amrex::Gpu::streamSynchronize(); +} diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 4b50c16a0c8..2a489e1c806 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -1,8 +1,9 @@ -/* Copyright 2023 The WarpX Community +/* Copyright 2023-2024 The WarpX Community * * This file is part of WarpX. * * Authors: Roelof Groenewald (TAE Technologies) + * S. Eric Clark (Helion Energy) * * License: BSD-3-Clause-LBNL */ @@ -12,6 +13,9 @@ #include "HybridPICModel_fwd.H" +#include "Fields.H" + +#include "ExternalVectorPotential.H" #include "Utils/WarpXAlgorithmSelection.H" #include "FieldSolver/FiniteDifferenceSolver/FiniteDifferenceSolver.H" @@ -23,6 +27,9 @@ #include #include +#include +#include +#include #include @@ -39,11 +46,26 @@ public: void ReadParameters (); /** Allocate hybrid-PIC specific multifabs. Called in constructor. */ - void AllocateLevelMFs (ablastr::fields::MultiFabRegister & fields, - int lev, const amrex::BoxArray& ba, const amrex::DistributionMapping& dm, - int ncomps, const amrex::IntVect& ngJ, const amrex::IntVect& ngRho, - const amrex::IntVect& jx_nodal_flag, const amrex::IntVect& jy_nodal_flag, - const amrex::IntVect& jz_nodal_flag, const amrex::IntVect& rho_nodal_flag); + void AllocateLevelMFs ( + ablastr::fields::MultiFabRegister & fields, + int lev, + const amrex::BoxArray& ba, + const amrex::DistributionMapping& dm, + int ncomps, + const amrex::IntVect& ngJ, + const amrex::IntVect& ngRho, + const amrex::IntVect& ngEB, + const amrex::IntVect& jx_nodal_flag, + const amrex::IntVect& jy_nodal_flag, + const amrex::IntVect& jz_nodal_flag, + const amrex::IntVect& rho_nodal_flag, + const amrex::IntVect& Ex_nodal_flag, + const amrex::IntVect& Ey_nodal_flag, + const amrex::IntVect& Ez_nodal_flag, + const amrex::IntVect& Bx_nodal_flag, + const amrex::IntVect& By_nodal_flag, + const amrex::IntVect& Bz_nodal_flag + ) const; void InitData (); @@ -142,7 +164,7 @@ public: * charge density (and assumption of quasi-neutrality) using the user * specified electron equation of state. * - * \param[out] Pe_filed scalar electron pressure MultiFab at a given level + * \param[out] Pe_field scalar electron pressure MultiFab at a given level * \param[in] rho_field scalar ion charge density Multifab at a given level */ void FillElectronPressureMF ( @@ -153,6 +175,8 @@ public: /** Number of substeps to take when evolving B */ int m_substeps = 10; + bool m_holmstrom_vacuum_region = false; + /** Electron temperature in eV */ amrex::Real m_elec_temp; /** Reference electron density */ @@ -178,7 +202,11 @@ public: std::string m_Jz_ext_grid_function = "0.0"; std::array< std::unique_ptr, 3> m_J_external_parser; std::array< amrex::ParserExecutor<4>, 3> m_J_external; - bool m_external_field_has_time_dependence = false; + bool m_external_current_has_time_dependence = false; + + /** External E/B fields */ + bool m_add_external_fields = false; + std::unique_ptr m_external_vector_potential; /** Gpu Vector with index type of the Jx multifab */ amrex::GpuArray Jx_IndexType; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 64ee83b10e0..3e5c04e9794 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -1,8 +1,9 @@ -/* Copyright 2023 The WarpX Community +/* Copyright 2023-2024 The WarpX Community * * This file is part of WarpX. * * Authors: Roelof Groenewald (TAE Technologies) + * S. Eric Clark (Helion Energy) * * License: BSD-3-Clause-LBNL */ @@ -12,6 +13,8 @@ #include "EmbeddedBoundary/Enabled.H" #include "Python/callbacks.H" #include "Fields.H" +#include "Particles/MultiParticleContainer.H" +#include "ExternalVectorPotential.H" #include "WarpX.H" using namespace amrex; @@ -30,6 +33,8 @@ void HybridPICModel::ReadParameters () // of sub steps can be specified by the user (defaults to 50). utils::parser::queryWithParser(pp_hybrid, "substeps", m_substeps); + utils::parser::queryWithParser(pp_hybrid, "holmstrom_vacuum_region", m_holmstrom_vacuum_region); + // The hybrid model requires an electron temperature, reference density // and exponent to be given. These values will be used to calculate the // electron pressure according to p = n0 * Te * (n/n0)^gamma @@ -54,15 +59,31 @@ void HybridPICModel::ReadParameters () pp_hybrid.query("Jx_external_grid_function(x,y,z,t)", m_Jx_ext_grid_function); pp_hybrid.query("Jy_external_grid_function(x,y,z,t)", m_Jy_ext_grid_function); pp_hybrid.query("Jz_external_grid_function(x,y,z,t)", m_Jz_ext_grid_function); + + // external fields + pp_hybrid.query("add_external_fields", m_add_external_fields); + + if (m_add_external_fields) { + m_external_vector_potential = std::make_unique(); + } } -void HybridPICModel::AllocateLevelMFs (ablastr::fields::MultiFabRegister & fields, - int lev, const BoxArray& ba, const DistributionMapping& dm, - const int ncomps, const IntVect& ngJ, const IntVect& ngRho, - const IntVect& jx_nodal_flag, - const IntVect& jy_nodal_flag, - const IntVect& jz_nodal_flag, - const IntVect& rho_nodal_flag) +void HybridPICModel::AllocateLevelMFs ( + ablastr::fields::MultiFabRegister & fields, + int lev, const BoxArray& ba, const DistributionMapping& dm, + const int ncomps, + const IntVect& ngJ, const IntVect& ngRho, + const IntVect& ngEB, + const IntVect& jx_nodal_flag, + const IntVect& jy_nodal_flag, + const IntVect& jz_nodal_flag, + const IntVect& rho_nodal_flag, + const IntVect& Ex_nodal_flag, + const IntVect& Ey_nodal_flag, + const IntVect& Ez_nodal_flag, + const IntVect& Bx_nodal_flag, + const IntVect& By_nodal_flag, + const IntVect& Bz_nodal_flag) const { using ablastr::fields::Direction; @@ -114,6 +135,16 @@ void HybridPICModel::AllocateLevelMFs (ablastr::fields::MultiFabRegister & field lev, amrex::convert(ba, jz_nodal_flag), dm, ncomps, IntVect(1), 0.0_rt); + if (m_add_external_fields) { + m_external_vector_potential->AllocateLevelMFs( + fields, + lev, ba, dm, + ncomps, ngEB, + Ex_nodal_flag, Ey_nodal_flag, Ez_nodal_flag, + Bx_nodal_flag, By_nodal_flag, Bz_nodal_flag + ); + } + #ifdef WARPX_DIM_RZ WARPX_ALWAYS_ASSERT_WITH_MESSAGE( (ncomps == 1), @@ -142,7 +173,7 @@ void HybridPICModel::InitData () // check if the external current parsers depend on time for (int i=0; i<3; i++) { const std::set J_ext_symbols = m_J_external_parser[i]->symbols(); - m_external_field_has_time_dependence += J_ext_symbols.count("t"); + m_external_current_has_time_dependence += J_ext_symbols.count("t"); } auto & warpx = WarpX::GetInstance(); @@ -230,11 +261,15 @@ void HybridPICModel::InitData () lev, PatchType::fine, warpx.GetEBUpdateEFlag()); } + + if (m_add_external_fields) { + m_external_vector_potential->InitData(); + } } void HybridPICModel::GetCurrentExternal () { - if (!m_external_field_has_time_dependence) { return; } + if (!m_external_current_has_time_dependence) { return; } auto& warpx = WarpX::GetInstance(); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) @@ -541,6 +576,7 @@ void HybridPICModel::BfieldEvolveRK ( } } + void HybridPICModel::FieldPush ( ablastr::fields::MultiLevelVectorField const& Bfield, ablastr::fields::MultiLevelVectorField const& Efield, @@ -552,13 +588,15 @@ void HybridPICModel::FieldPush ( { auto& warpx = WarpX::GetInstance(); + amrex::Real const t_old = warpx.gett_old(0); + // Calculate J = curl x B / mu0 - J_ext CalculatePlasmaCurrent(Bfield, eb_update_E); // Calculate the E-field from Ohm's law HybridPICSolveE(Efield, Jfield, Bfield, rhofield, eb_update_E, true); warpx.FillBoundaryE(ng, nodal_sync); + // Push forward the B-field using Faraday's law - amrex::Real const t_old = warpx.gett_old(0); warpx.EvolveB(dt, dt_type, t_old); warpx.FillBoundaryB(ng, nodal_sync); } diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/Make.package b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/Make.package index 8145cfcef2f..d4fa9bfc390 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/Make.package +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/Make.package @@ -1,3 +1,4 @@ CEXE_sources += HybridPICModel.cpp +CEXE_sources += ExternalVectorPotential.cpp VPATH_LOCATIONS += $(WARPX_HOME)/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index 2047e87b696..b750a7e4f20 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -1,8 +1,9 @@ -/* Copyright 2023 The WarpX Community +/* Copyright 2023-2024 The WarpX Community * * This file is part of WarpX. * * Authors: Roelof Groenewald (TAE Technologies) + * S. Eric Clark (Helion Energy) * * License: BSD-3-Clause-LBNL */ @@ -22,6 +23,7 @@ #include using namespace amrex; +using warpx::fields::FieldType; void FiniteDifferenceSolver::CalculateCurrentAmpere ( ablastr::fields::VectorField & Jfield, @@ -429,6 +431,17 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const bool include_hyper_resistivity_term = (eta_h > 0.0) && solve_for_Faraday; + const bool include_external_fields = hybrid_model->m_add_external_fields; + + const bool holmstrom_vacuum_region = hybrid_model->m_holmstrom_vacuum_region; + + auto & warpx = WarpX::GetInstance(); + ablastr::fields::VectorField Bfield_external, Efield_external; + if (include_external_fields) { + Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 + Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 + } + // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations amrex::GpuArray const& Er_stag = hybrid_model->Ex_IndexType; @@ -485,6 +498,13 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& Bt = Bfield[1]->const_array(mfi); Array4 const& Bz = Bfield[2]->const_array(mfi); + Array4 Br_ext, Bt_ext, Bz_ext; + if (include_external_fields) { + Br_ext = Bfield_external[0]->array(mfi); + Bt_ext = Bfield_external[1]->array(mfi); + Bz_ext = Bfield_external[2]->array(mfi); + } + // Loop over the cells and update the nodal E field amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int /*k*/){ @@ -499,9 +519,15 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( auto const jiz_interp = Interp(Jiz, Jz_stag, nodal, coarsen, i, j, 0, 0); // interpolate the B field to a nodal grid - auto const Br_interp = Interp(Br, Br_stag, nodal, coarsen, i, j, 0, 0); - auto const Bt_interp = Interp(Bt, Bt_stag, nodal, coarsen, i, j, 0, 0); - auto const Bz_interp = Interp(Bz, Bz_stag, nodal, coarsen, i, j, 0, 0); + auto Br_interp = Interp(Br, Br_stag, nodal, coarsen, i, j, 0, 0); + auto Bt_interp = Interp(Bt, Bt_stag, nodal, coarsen, i, j, 0, 0); + auto Bz_interp = Interp(Bz, Bz_stag, nodal, coarsen, i, j, 0, 0); + + if (include_external_fields) { + Br_interp += Interp(Br_ext, Br_stag, nodal, coarsen, i, j, 0, 0); + Bt_interp += Interp(Bt_ext, Bt_stag, nodal, coarsen, i, j, 0, 0); + Bz_interp += Interp(Bz_ext, Bz_stag, nodal, coarsen, i, j, 0, 0); + } // calculate enE = (J - Ji) x B enE_nodal(i, j, 0, 0) = ( @@ -558,6 +584,13 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( update_Ez_arr = eb_update_E[2]->array(mfi); } + Array4 Er_ext, Et_ext, Ez_ext; + if (include_external_fields) { + Er_ext = Efield_external[0]->array(mfi); + Et_ext = Efield_external[1]->array(mfi); + Ez_ext = Efield_external[2]->array(mfi); + } + // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_r = m_stencil_coefs_r.dataPtr(); int const n_coefs_r = static_cast(m_stencil_coefs_r.size()); @@ -582,7 +615,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (update_Er_arr && update_Er_arr(i, j, 0) == 0) { return; } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Er_stag, coarsen, i, j, 0, 0); + const Real rho_val = Interp(rho, nodal, Er_stag, coarsen, i, j, 0, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -594,7 +628,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure if the longitudinal part of // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 @@ -604,7 +638,11 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // interpolate the nodal neE values to the Yee grid auto enE_r = Interp(enE, nodal, Er_stag, coarsen, i, j, 0, 0); - Er(i, j, 0) = (enE_r - grad_Pe) / rho_val; + if (rho_val < rho_floor && holmstrom_vacuum_region) { + Er(i, j, 0) = 0._rt; + } else { + Er(i, j, 0) = (enE_r - grad_Pe) / rho_val_limited; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Er(i, j, 0) += eta(rho_val, jtot_val) * Jr(i, j, 0); } @@ -617,6 +655,10 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( + T_Algo::Dzz(Jr, coefs_z, n_coefs_z, i, j, 0, 0) - jr_val/(r*r); Er(i, j, 0) -= eta_h * nabla2Jr; } + + if (include_external_fields && (rho_val >= rho_floor)) { + Er(i, j, 0) -= Er_ext(i, j, 0); + } }, // Et calculation @@ -634,7 +676,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Et_stag, coarsen, i, j, 0, 0); + const Real rho_val = Interp(rho, nodal, Et_stag, coarsen, i, j, 0, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -646,7 +689,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure // -> d/dt = 0 for m = 0 @@ -655,7 +698,11 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // interpolate the nodal neE values to the Yee grid auto enE_t = Interp(enE, nodal, Et_stag, coarsen, i, j, 0, 1); - Et(i, j, 0) = (enE_t - grad_Pe) / rho_val; + if (rho_val < rho_floor && holmstrom_vacuum_region) { + Et(i, j, 0) = 0._rt; + } else { + Et(i, j, 0) = (enE_t - grad_Pe) / rho_val_limited; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Et(i, j, 0) += eta(rho_val, jtot_val) * Jt(i, j, 0); } @@ -664,9 +711,12 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const Real jt_val = Interp(Jt, Jt_stag, Et_stag, coarsen, i, j, 0, 0); auto nabla2Jt = T_Algo::Dr_rDr_over_r(Jt, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) + T_Algo::Dzz(Jt, coefs_z, n_coefs_z, i, j, 0, 0) - jt_val/(r*r); - Et(i, j, 0) -= eta_h * nabla2Jt; } + + if (include_external_fields && (rho_val >= rho_floor)) { + Et(i, j, 0) -= Et_ext(i, j, 0); + } }, // Ez calculation @@ -676,7 +726,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (update_Ez_arr && update_Ez_arr(i, j, 0) == 0) { return; } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, 0, 0); + const Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, 0, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -688,7 +739,7 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure if the longitudinal part of // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 @@ -698,7 +749,11 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // interpolate the nodal neE values to the Yee grid auto enE_z = Interp(enE, nodal, Ez_stag, coarsen, i, j, 0, 2); - Ez(i, j, 0) = (enE_z - grad_Pe) / rho_val; + if (rho_val < rho_floor && holmstrom_vacuum_region) { + Ez(i, j, 0) = 0._rt; + } else { + Ez(i, j, 0) = (enE_z - grad_Pe) / rho_val_limited; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ez(i, j, 0) += eta(rho_val, jtot_val) * Jz(i, j, 0); } @@ -714,6 +769,10 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Ez(i, j, 0) -= eta_h * nabla2Jz; } + + if (include_external_fields && (rho_val >= rho_floor)) { + Ez(i, j, 0) -= Ez_ext(i, j, 0); + } } ); @@ -753,6 +812,17 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( const bool include_hyper_resistivity_term = (eta_h > 0.) && solve_for_Faraday; + const bool include_external_fields = hybrid_model->m_add_external_fields; + + const bool holmstrom_vacuum_region = hybrid_model->m_holmstrom_vacuum_region; + + auto & warpx = WarpX::GetInstance(); + ablastr::fields::VectorField Bfield_external, Efield_external; + if (include_external_fields) { + Bfield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_B_fp_external, 0); // lev=0 + Efield_external = warpx.m_fields.get_alldirs(FieldType::hybrid_E_fp_external, 0); // lev=0 + } + // Index type required for interpolating fields from their respective // staggering to the Ex, Ey, Ez locations amrex::GpuArray const& Ex_stag = hybrid_model->Ex_IndexType; @@ -809,6 +879,13 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& By = Bfield[1]->const_array(mfi); Array4 const& Bz = Bfield[2]->const_array(mfi); + Array4 Bx_ext, By_ext, Bz_ext; + if (include_external_fields) { + Bx_ext = Bfield_external[0]->array(mfi); + By_ext = Bfield_external[1]->array(mfi); + Bz_ext = Bfield_external[2]->array(mfi); + } + // Loop over the cells and update the nodal E field amrex::ParallelFor(mfi.tilebox(), [=] AMREX_GPU_DEVICE (int i, int j, int k){ @@ -823,9 +900,15 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( auto const jiz_interp = Interp(Jiz, Jz_stag, nodal, coarsen, i, j, k, 0); // interpolate the B field to a nodal grid - auto const Bx_interp = Interp(Bx, Bx_stag, nodal, coarsen, i, j, k, 0); - auto const By_interp = Interp(By, By_stag, nodal, coarsen, i, j, k, 0); - auto const Bz_interp = Interp(Bz, Bz_stag, nodal, coarsen, i, j, k, 0); + auto Bx_interp = Interp(Bx, Bx_stag, nodal, coarsen, i, j, k, 0); + auto By_interp = Interp(By, By_stag, nodal, coarsen, i, j, k, 0); + auto Bz_interp = Interp(Bz, Bz_stag, nodal, coarsen, i, j, k, 0); + + if (include_external_fields) { + Bx_interp += Interp(Bx_ext, Bx_stag, nodal, coarsen, i, j, k, 0); + By_interp += Interp(By_ext, By_stag, nodal, coarsen, i, j, k, 0); + Bz_interp += Interp(Bz_ext, Bz_stag, nodal, coarsen, i, j, k, 0); + } // calculate enE = (J - Ji) x B enE_nodal(i, j, k, 0) = ( @@ -882,6 +965,13 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( update_Ez_arr = eb_update_E[2]->array(mfi); } + Array4 Ex_ext, Ey_ext, Ez_ext; + if (include_external_fields) { + Ex_ext = Efield_external[0]->array(mfi); + Ey_ext = Efield_external[1]->array(mfi); + Ez_ext = Efield_external[2]->array(mfi); + } + // Extract stencil coefficients Real const * const AMREX_RESTRICT coefs_x = m_stencil_coefs_x.dataPtr(); auto const n_coefs_x = static_cast(m_stencil_coefs_x.size()); @@ -904,7 +994,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (update_Ex_arr && update_Ex_arr(i, j, k) == 0) { return; } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Ex_stag, coarsen, i, j, k, 0); + const Real rho_val = Interp(rho, nodal, Ex_stag, coarsen, i, j, k, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -916,7 +1007,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure if the longitudinal part of // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 @@ -926,7 +1017,11 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // interpolate the nodal neE values to the Yee grid auto enE_x = Interp(enE, nodal, Ex_stag, coarsen, i, j, k, 0); - Ex(i, j, k) = (enE_x - grad_Pe) / rho_val; + if (rho_val < rho_floor && holmstrom_vacuum_region) { + Ex(i, j, k) = 0._rt; + } else { + Ex(i, j, k) = (enE_x - grad_Pe) / rho_val_limited; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ex(i, j, k) += eta(rho_val, jtot_val) * Jx(i, j, k); } @@ -937,6 +1032,10 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( + T_Algo::Dzz(Jx, coefs_z, n_coefs_z, i, j, k); Ex(i, j, k) -= eta_h * nabla2Jx; } + + if (include_external_fields && (rho_val >= rho_floor)) { + Ex(i, j, k) -= Ex_ext(i, j, k); + } }, // Ey calculation @@ -946,7 +1045,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (update_Ey_arr && update_Ey_arr(i, j, k) == 0) { return; } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Ey_stag, coarsen, i, j, k, 0); + const Real rho_val = Interp(rho, nodal, Ey_stag, coarsen, i, j, k, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -958,7 +1058,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure if the longitudinal part of // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 @@ -968,7 +1068,11 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // interpolate the nodal neE values to the Yee grid auto enE_y = Interp(enE, nodal, Ey_stag, coarsen, i, j, k, 1); - Ey(i, j, k) = (enE_y - grad_Pe) / rho_val; + if (rho_val < rho_floor && holmstrom_vacuum_region) { + Ey(i, j, k) = 0._rt; + } else { + Ey(i, j, k) = (enE_y - grad_Pe) / rho_val_limited; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ey(i, j, k) += eta(rho_val, jtot_val) * Jy(i, j, k); } @@ -979,6 +1083,10 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( + T_Algo::Dzz(Jy, coefs_z, n_coefs_z, i, j, k); Ey(i, j, k) -= eta_h * nabla2Jy; } + + if (include_external_fields && (rho_val >= rho_floor)) { + Ey(i, j, k) -= Ey_ext(i, j, k); + } }, // Ez calculation @@ -988,7 +1096,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( if (update_Ez_arr && update_Ez_arr(i, j, k) == 0) { return; } // Interpolate to get the appropriate charge density in space - Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, k, 0); + const Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, k, 0); + Real rho_val_limited = rho_val; // Interpolate current to appropriate staggering to match E field Real jtot_val = 0._rt; @@ -1000,7 +1109,7 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( } // safety condition since we divide by rho_val later - if (rho_val < rho_floor) { rho_val = rho_floor; } + if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } // Get the gradient of the electron pressure if the longitudinal part of // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 @@ -1010,7 +1119,11 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // interpolate the nodal neE values to the Yee grid auto enE_z = Interp(enE, nodal, Ez_stag, coarsen, i, j, k, 2); - Ez(i, j, k) = (enE_z - grad_Pe) / rho_val; + if (rho_val < rho_floor && holmstrom_vacuum_region) { + Ez(i, j, k) = 0._rt; + } else { + Ez(i, j, k) = (enE_z - grad_Pe) / rho_val_limited; + } // Add resistivity only if E field value is used to update B if (solve_for_Faraday) { Ez(i, j, k) += eta(rho_val, jtot_val) * Jz(i, j, k); } @@ -1021,6 +1134,10 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( + T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, k); Ez(i, j, k) -= eta_h * nabla2Jz; } + + if (include_external_fields && (rho_val >= rho_floor)) { + Ez(i, j, k) -= Ez_ext(i, j, k); + } } ); diff --git a/Source/FieldSolver/FiniteDifferenceSolver/Make.package b/Source/FieldSolver/FiniteDifferenceSolver/Make.package index b3708c411fa..bc71b9b51a2 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/Make.package +++ b/Source/FieldSolver/FiniteDifferenceSolver/Make.package @@ -5,6 +5,7 @@ CEXE_sources += EvolveF.cpp CEXE_sources += EvolveG.cpp CEXE_sources += EvolveECTRho.cpp CEXE_sources += ComputeDivE.cpp +CEXE_sources += ComputeCurlA.cpp CEXE_sources += MacroscopicEvolveE.cpp CEXE_sources += EvolveBPML.cpp CEXE_sources += EvolveEPML.cpp diff --git a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp index 18efba3f445..b57def5c4fe 100644 --- a/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp +++ b/Source/FieldSolver/WarpXPushFieldsHybridPIC.cpp @@ -1,8 +1,9 @@ -/* Copyright 2023 The WarpX Community +/* Copyright 2023-2024 The WarpX Community * * This file is part of WarpX. * * Authors: Roelof Groenewald (TAE Technologies) + * S. Eric Clark (Helion Energy) * * License: BSD-3-Clause-LBNL */ @@ -33,6 +34,31 @@ void WarpX::HybridPICEvolveFields () finest_level == 0, "Ohm's law E-solve only works with a single level."); + // Get requested number of substeps to use + const int sub_steps = m_hybrid_pic_model->m_substeps; + + // Get flag to include external fields. + const bool add_external_fields = m_hybrid_pic_model->m_add_external_fields; + + // Handle field splitting for Hybrid field push + if (add_external_fields) { + // Get the external fields + m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields( + gett_old(0), + 0.5_rt*dt[0]); + + // If using split fields, subtract the external field at the old time + for (int lev = 0; lev <= finest_level; ++lev) { + for (int idim = 0; idim < 3; ++idim) { + MultiFab::Subtract( + *m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev), + *m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev), + 0, 0, 1, + m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev)->nGrowVect()); + } + } + } + // The particles have now been pushed to their t_{n+1} positions. // Perform charge deposition in component 0 of rho_fp at t_{n+1}. mypc->DepositCharge(m_fields.get_mr_levels(FieldType::rho_fp, finest_level), 0._rt); @@ -64,9 +90,6 @@ void WarpX::HybridPICEvolveFields () } } - // Get requested number of substeps to use - const int sub_steps = m_hybrid_pic_model->m_substeps; - // Get the external current m_hybrid_pic_model->GetCurrentExternal(); @@ -127,6 +150,13 @@ void WarpX::HybridPICEvolveFields () ); } + if (add_external_fields) { + // Get the external fields at E^{n+1/2} + m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields( + gett_old(0) + 0.5_rt*dt[0], + 0.5_rt*dt[0]); + } + // Now push the B field from t=n+1/2 to t=n+1 using the n+1/2 quantities for (int sub_step = 0; sub_step < sub_steps; sub_step++) { @@ -160,6 +190,12 @@ void WarpX::HybridPICEvolveFields () } } + if (add_external_fields) { + m_hybrid_pic_model->m_external_vector_potential->UpdateHybridExternalFields( + gett_new(0), + 0.5_rt*dt[0]); + } + // Calculate the electron pressure at t=n+1 m_hybrid_pic_model->CalculateElectronPressure(); @@ -175,6 +211,25 @@ void WarpX::HybridPICEvolveFields () m_eb_update_E, false); FillBoundaryE(guard_cells.ng_FieldSolver, WarpX::sync_nodal_points); + // Handle field splitting for Hybrid field push + if (add_external_fields) { + // If using split fields, add the external field at the new time + for (int lev = 0; lev <= finest_level; ++lev) { + for (int idim = 0; idim < 3; ++idim) { + MultiFab::Add( + *m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev), + *m_fields.get(FieldType::hybrid_B_fp_external, Direction{idim}, lev), + 0, 0, 1, + m_fields.get(FieldType::Bfield_fp, Direction{idim}, lev)->nGrowVect()); + MultiFab::Add( + *m_fields.get(FieldType::Efield_fp, Direction{idim}, lev), + *m_fields.get(FieldType::hybrid_E_fp_external, Direction{idim}, lev), + 0, 0, 1, + m_fields.get(FieldType::Efield_fp, Direction{idim}, lev)->nGrowVect()); + } + } + } + // Copy the rho^{n+1} values to rho_fp_temp and the J_i^{n+1/2} values to // current_fp_temp since at the next step those values will be needed as // rho^{n} and J_i^{n-1/2}. @@ -232,3 +287,15 @@ void WarpX::HybridPICDepositInitialRhoAndJ () ); } } + +void +WarpX::CalculateExternalCurlA() { + WARPX_PROFILE("WarpX::CalculateExternalCurlA()"); + + auto & warpx = WarpX::GetInstance(); + + // Get reference to External Field Object + auto* ext_vector = warpx.m_hybrid_pic_model->m_external_vector_potential.get(); + ext_vector->CalculateExternalCurlA(); + +} diff --git a/Source/Fields.H b/Source/Fields.H index 77589c4675e..271d5a835a3 100644 --- a/Source/Fields.H +++ b/Source/Fields.H @@ -50,6 +50,8 @@ namespace warpx::fields hybrid_current_fp_temp, /**< Used with Ohm's law solver. Stores the time interpolated/extrapolated current density */ hybrid_current_fp_plasma, /**< Used with Ohm's law solver. Stores plasma current calculated as J_plasma = curl x B / mu0 - J_ext */ hybrid_current_fp_external, /**< Used with Ohm's law solver. Stores external current */ + hybrid_B_fp_external, /**< Used with Ohm's law solver. Stores external B field */ + hybrid_E_fp_external, /**< Used with Ohm's law solver. Stores external E field */ Efield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ Bfield_cp, /**< Only used with MR. The field that is updated by the field solver at each timestep, on the coarse patch of each level */ current_cp, /**< Only used with MR. The current that is used as a source for the field solver, on the coarse patch of each level */ @@ -102,6 +104,8 @@ namespace warpx::fields FieldType::hybrid_current_fp_temp, FieldType::hybrid_current_fp_plasma, FieldType::hybrid_current_fp_external, + FieldType::hybrid_B_fp_external, + FieldType::hybrid_E_fp_external, FieldType::Efield_cp, FieldType::Bfield_cp, FieldType::current_cp, diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 9c2784fe867..90b8d613898 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -1048,20 +1048,25 @@ WarpX::InitLevelData (int lev, Real /*time*/) } } -void WarpX::ComputeExternalFieldOnGridUsingParser ( - warpx::fields::FieldType field, +template +void ComputeExternalFieldOnGridUsingParser_template ( + T field, amrex::ParserExecutor<4> const& fx_parser, amrex::ParserExecutor<4> const& fy_parser, amrex::ParserExecutor<4> const& fz_parser, int lev, PatchType patch_type, - amrex::Vector,3 > > const& eb_update_field) + amrex::Vector,3 > > const& eb_update_field, + bool use_eb_flags) { - auto t = gett_new(lev); + auto &warpx = WarpX::GetInstance(); + auto const &geom = warpx.Geom(lev); - auto dx_lev = geom[lev].CellSizeArray(); - const RealBox& real_box = geom[lev].ProbDomain(); + auto t = warpx.gett_new(lev); - amrex::IntVect refratio = (lev > 0 ) ? RefRatio(lev-1) : amrex::IntVect(1); + auto dx_lev = geom.CellSizeArray(); + const RealBox& real_box = geom.ProbDomain(); + + amrex::IntVect refratio = (lev > 0 ) ? WarpX::RefRatio(lev-1) : amrex::IntVect(1); if (patch_type == PatchType::coarse) { for (int idim = 0; idim < AMREX_SPACEDIM; ++idim) { dx_lev[idim] = dx_lev[idim] * refratio[idim]; @@ -1069,9 +1074,9 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( } using ablastr::fields::Direction; - amrex::MultiFab* mfx = m_fields.get(field, Direction{0}, lev); - amrex::MultiFab* mfy = m_fields.get(field, Direction{1}, lev); - amrex::MultiFab* mfz = m_fields.get(field, Direction{2}, lev); + amrex::MultiFab* mfx = warpx.m_fields.get(field, Direction{0}, lev); + amrex::MultiFab* mfy = warpx.m_fields.get(field, Direction{1}, lev); + amrex::MultiFab* mfz = warpx.m_fields.get(field, Direction{2}, lev); const amrex::IntVect x_nodal_flag = mfx->ixType().toIntVect(); const amrex::IntVect y_nodal_flag = mfy->ixType().toIntVect(); @@ -1087,7 +1092,7 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( auto const& mfzfab = mfz->array(mfi); amrex::Array4 update_fx_arr, update_fy_arr, update_fz_arr; - if (EB::enabled()) { + if (use_eb_flags && EB::enabled()) { update_fx_arr = eb_update_field[lev][0]->array(mfi); update_fy_arr = eb_update_field[lev][1]->array(mfi); update_fz_arr = eb_update_field[lev][2]->array(mfi); @@ -1181,6 +1186,68 @@ void WarpX::ComputeExternalFieldOnGridUsingParser ( } } +void WarpX::ComputeExternalFieldOnGridUsingParser ( + warpx::fields::FieldType field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, + amrex::Vector,3 > > const& eb_update_field, + bool use_eb_flags) +{ + ComputeExternalFieldOnGridUsingParser_template ( + field, + fx_parser, fy_parser, fz_parser, + lev, patch_type, eb_update_field, + use_eb_flags); +} + +void WarpX::ComputeExternalFieldOnGridUsingParser ( + std::string const& field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, + amrex::Vector,3 > > const& eb_update_field, + bool use_eb_flags) +{ + ComputeExternalFieldOnGridUsingParser_template ( + field, + fx_parser, fy_parser, fz_parser, + lev, patch_type, eb_update_field, + use_eb_flags); +} + +void WarpX::ComputeExternalFieldOnGridUsingParser ( + warpx::fields::FieldType field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, + amrex::Vector,3 > > const& eb_update_field) +{ + ComputeExternalFieldOnGridUsingParser_template ( + field, + fx_parser, fy_parser, fz_parser, + lev, patch_type, eb_update_field, + true); +} + +void WarpX::ComputeExternalFieldOnGridUsingParser ( + std::string const& field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, + amrex::Vector,3 > > const& eb_update_field) +{ + ComputeExternalFieldOnGridUsingParser_template ( + field, + fx_parser, fy_parser, fz_parser, + lev, patch_type, eb_update_field, + true); +} + void WarpX::CheckGuardCells() { for (int lev = 0; lev <= max_level; ++lev) diff --git a/Source/Particles/Gather/GetExternalFields.H b/Source/Particles/Gather/GetExternalFields.H index 7000d6d7c26..90a61bd25db 100644 --- a/Source/Particles/Gather/GetExternalFields.H +++ b/Source/Particles/Gather/GetExternalFields.H @@ -112,9 +112,9 @@ struct GetExternalEBField lab_time = m_gamma_boost*m_time + m_uz_boost*z*inv_c2; z = m_gamma_boost*z + m_uz_boost*m_time; } - Bx = m_Bxfield_partparser(x, y, z, lab_time); - By = m_Byfield_partparser(x, y, z, lab_time); - Bz = m_Bzfield_partparser(x, y, z, lab_time); + Bx = m_Bxfield_partparser((amrex::ParticleReal) x, (amrex::ParticleReal) y, (amrex::ParticleReal) z, lab_time); + By = m_Byfield_partparser((amrex::ParticleReal) x, (amrex::ParticleReal) y, (amrex::ParticleReal) z, lab_time); + Bz = m_Bzfield_partparser((amrex::ParticleReal) x, (amrex::ParticleReal) y, (amrex::ParticleReal) z, lab_time); } if (m_Etype == RepeatedPlasmaLens || diff --git a/Source/Python/WarpX.cpp b/Source/Python/WarpX.cpp index 870a3a87c91..5b4b07af07b 100644 --- a/Source/Python/WarpX.cpp +++ b/Source/Python/WarpX.cpp @@ -270,6 +270,10 @@ The physical fields in WarpX have the following naming: [] (WarpX& wx) { wx.ProjectionCleanDivB(); }, "Executes projection based divergence cleaner on loaded Bfield_fp_external." ) + .def_static("calculate_hybrid_external_curlA", + [] (WarpX& wx) { wx.CalculateExternalCurlA(); }, + "Executes calculation of the curl of the external A in the hybrid solver." + ) .def("synchronize", [] (WarpX& wx) { wx.Synchronize(); }, "Synchronize particle velocities and positions." diff --git a/Source/WarpX.H b/Source/WarpX.H index ddfd545db74..29439002a3a 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -164,6 +164,7 @@ public: MultiDiagnostics& GetMultiDiags () {return *multi_diags;} ParticleBoundaryBuffer& GetParticleBoundaryBuffer () { return *m_particle_boundary_buffer; } amrex::Vector,3 > >& GetEBUpdateEFlag() { return m_eb_update_E; } + amrex::Vector,3 > >& GetEBUpdateBFlag() { return m_eb_update_B; } amrex::Vector< std::unique_ptr > const & GetEBReduceParticleShapeFlag() const { return m_eb_reduce_particle_shape; } /** @@ -831,6 +832,7 @@ public: void ComputeDivE(amrex::MultiFab& divE, int lev); void ProjectionCleanDivB (); + void CalculateExternalCurlA (); [[nodiscard]] amrex::IntVect getngEB() const { return guard_cells.ng_alloc_EB; } [[nodiscard]] amrex::IntVect getngF() const { return guard_cells.ng_alloc_F; } @@ -875,14 +877,24 @@ public: * on the staggered yee-grid or cell-centered grid, in the interior cells * and guard cells. * - * \param[in] field FieldType + * \param[in] field FieldType to grab from register to write into * \param[in] fx_parser parser function to initialize x-field * \param[in] fy_parser parser function to initialize y-field * \param[in] fz_parser parser function to initialize z-field * \param[in] lev level of the Multifabs that is initialized * \param[in] patch_type PatchType on which the field is initialized (fine or coarse) * \param[in] eb_update_field flag indicating which gridpoints should be modified by this functions + * \param[in] use_eb_flags (default:true) flag indicating if eb points should be excluded or not */ + void ComputeExternalFieldOnGridUsingParser ( + warpx::fields::FieldType field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, + amrex::Vector,3 > > const& eb_update_field, + bool use_eb_flags); + void ComputeExternalFieldOnGridUsingParser ( warpx::fields::FieldType field, amrex::ParserExecutor<4> const& fx_parser, @@ -891,6 +903,44 @@ public: int lev, PatchType patch_type, amrex::Vector,3 > > const& eb_update_field); + /** + * \brief + * This function computes the E, B, and J fields on each level + * using the parser and the user-defined function for the external fields. + * The subroutine will parse the x_/y_z_external_grid_function and + * then, the field multifab is initialized based on the (x,y,z) position + * on the staggered yee-grid or cell-centered grid, in the interior cells + * and guard cells. + * + * \param[in] field string containing field name to grab from register + * \param[in] fx_parser parser function to initialize x-field + * \param[in] fy_parser parser function to initialize y-field + * \param[in] fz_parser parser function to initialize z-field + * \param[in] edge_lengths edge lengths information + * \param[in] face_areas face areas information + * \param[in] topology flag indicating if field is edge-based or face-based + * \param[in] lev level of the Multifabs that is initialized + * \param[in] patch_type PatchType on which the field is initialized (fine or coarse) + * \param[in] eb_update_field flag indicating which gridpoints should be modified by this functions + * \param[in] use_eb_flags (default:true) flag indicating if eb points should be excluded or not + */ + void ComputeExternalFieldOnGridUsingParser ( + std::string const& field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, + amrex::Vector< std::array< std::unique_ptr,3> > const& eb_update_field, + bool use_eb_flags); + + void ComputeExternalFieldOnGridUsingParser ( + std::string const& field, + amrex::ParserExecutor<4> const& fx_parser, + amrex::ParserExecutor<4> const& fy_parser, + amrex::ParserExecutor<4> const& fz_parser, + int lev, PatchType patch_type, + amrex::Vector< std::array< std::unique_ptr,3> > const& eb_update_field); + /** * \brief Load field values from a user-specified openPMD file, * for the fields Ex, Ey, Ez, Bx, By, Bz diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 4a0633369ce..c9e90850ee1 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -743,12 +743,22 @@ WarpX::ReadParameters () use_kspace_filter = use_filter; use_filter = false; } - else // FDTD + else { - // Filter currently not working with FDTD solver in RZ geometry along R - // (see https://github.com/ECP-WarpX/WarpX/issues/1943) - WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!use_filter || filter_npass_each_dir[0] == 0, - "In RZ geometry with FDTD, filtering can only be apply along z. This can be controlled by setting warpx.filter_npass_each_dir"); + if (WarpX::electromagnetic_solver_id != ElectromagneticSolverAlgo::HybridPIC) { + // Filter currently not working with FDTD solver in RZ geometry along R + // (see https://github.com/ECP-WarpX/WarpX/issues/1943) + WARPX_ALWAYS_ASSERT_WITH_MESSAGE(!use_filter || filter_npass_each_dir[0] == 0, + "In RZ geometry with FDTD, filtering can only be applied along z. This can be controlled by setting warpx.filter_npass_each_dir"); + } else { + if (use_filter && filter_npass_each_dir[0] > 0) { + ablastr::warn_manager::WMRecordWarning( + "HybridPIC ElectromagneticSolver", + "Radial Filtering in RZ is not currently using radial geometric weighting to conserve charge. Use at your own risk.", + ablastr::warn_manager::WarnPriority::low + ); + } + } } #endif @@ -2257,8 +2267,9 @@ WarpX::AllocLevelMFs (int lev, const BoxArray& ba, const DistributionMapping& dm { m_hybrid_pic_model->AllocateLevelMFs( m_fields, - lev, ba, dm, ncomps, ngJ, ngRho, jx_nodal_flag, jy_nodal_flag, - jz_nodal_flag, rho_nodal_flag + lev, ba, dm, ncomps, ngJ, ngRho, ngEB, jx_nodal_flag, jy_nodal_flag, + jz_nodal_flag, rho_nodal_flag, Ex_nodal_flag, Ey_nodal_flag, Ez_nodal_flag, + Bx_nodal_flag, By_nodal_flag, Bz_nodal_flag ); } From 072341c6b02c833e43e12c096bfeba462afd1fbf Mon Sep 17 00:00:00 2001 From: David Grote Date: Tue, 18 Feb 2025 05:44:46 -0800 Subject: [PATCH 237/278] Add PECInsulator to Curl-Curl BC (#5667) This is a temporary fix, setting a boundary condition for the Curl-Curl preconditioner for the implicit solver. This now sets the BC to Dirichlet for the PEC regions. A correct solution would have to be implemented in the curl-curl solver because of the split between the PEC and insulator sections. --- Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp index ab064772922..0a934693710 100644 --- a/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp +++ b/Source/FieldSolver/ImplicitSolvers/ImplicitSolver.cpp @@ -68,7 +68,12 @@ Array ImplicitSolver::convertFieldBCToLinOpBC (const WARPX_ABORT_WITH_MESSAGE("LinOpBCType not set for this FieldBoundaryType"); } else if (a_fbc[i] == FieldBoundaryType::Neumann) { // Also for FieldBoundaryType::PMC - lbc[i] = LinOpBCType::Neumann; + lbc[i] = LinOpBCType::symmetry; + } else if (a_fbc[i] == FieldBoundaryType::PECInsulator) { + ablastr::warn_manager::WMRecordWarning("Implicit solver", + "With PECInsulator, in the Curl-Curl preconditioner Neumann boundary will be used since the full boundary is not yet implemented.", + ablastr::warn_manager::WarnPriority::medium); + lbc[i] = LinOpBCType::symmetry; } else if (a_fbc[i] == FieldBoundaryType::None) { WARPX_ABORT_WITH_MESSAGE("LinOpBCType not set for this FieldBoundaryType"); } else if (a_fbc[i] == FieldBoundaryType::Open) { From 0659286045241691b03bf58d9832aadd6fc73d8d Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 18 Feb 2025 14:06:51 -0800 Subject: [PATCH 238/278] Perlmutter: SW Install Updates (#5648) - [x] profile: avoid repetition, use `SW_DIR` variable as in `install_...` scripts - [x] move from CFS to PSCRATCH (more stable, faster, where the binary lives); uses an undocumented, purge-exempt location for container images/software - [x] build our own boost (SW stack consistency) - [x] get our own CCache (prior one is gone) - [x] RT tested --- Docs/source/install/hpc/perlmutter.rst | 4 +-- .../install_cpu_dependencies.sh | 30 +++++++++++++++---- .../install_gpu_dependencies.sh | 30 +++++++++++++++---- .../perlmutter_cpu_warpx.profile.example | 30 +++++++++++-------- .../perlmutter_gpu_warpx.profile.example | 30 +++++++++++-------- 5 files changed, 86 insertions(+), 38 deletions(-) diff --git a/Docs/source/install/hpc/perlmutter.rst b/Docs/source/install/hpc/perlmutter.rst index 9612b64476d..7e2ae31630e 100644 --- a/Docs/source/install/hpc/perlmutter.rst +++ b/Docs/source/install/hpc/perlmutter.rst @@ -76,7 +76,7 @@ On Perlmutter, you can run either on GPU nodes with fast A100 GPUs (recommended) .. code-block:: bash bash $HOME/src/warpx/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh - source ${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/venvs/warpx-gpu/bin/activate + source ${PSCRATCH}/storage/sw/warpx/perlmutter/gpu/venvs/warpx-gpu/bin/activate .. dropdown:: Script Details :color: light @@ -126,7 +126,7 @@ On Perlmutter, you can run either on GPU nodes with fast A100 GPUs (recommended) .. code-block:: bash bash $HOME/src/warpx/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh - source ${CFS}/${proj}/${USER}/sw/perlmutter/cpu/venvs/warpx-cpu/bin/activate + source ${PSCRATCH}/storage/sw/warpx/perlmutter/cpu/venvs/warpx-cpu/bin/activate .. dropdown:: Script Details :color: light diff --git a/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh b/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh index 7608cb3f666..0ef14844493 100755 --- a/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh +++ b/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh @@ -31,7 +31,7 @@ fi # Remove old dependencies ##################################################### # -SW_DIR="${CFS}/${proj}/${USER}/sw/perlmutter/cpu" +SW_DIR="${PSCRATCH}/storage/sw/warpx/perlmutter/cpu" rm -rf ${SW_DIR} mkdir -p ${SW_DIR} @@ -44,9 +44,29 @@ python3 -m pip uninstall -qqq -y mpi4py 2>/dev/null || true # General extra dependencies ################################################## # +# build parallelism +PARALLEL=16 + # tmpfs build directory: avoids issues often seen with $HOME and is faster build_dir=$(mktemp -d) +# CCache +curl -Lo ccache.tar.xz https://github.com/ccache/ccache/releases/download/v4.10.2/ccache-4.10.2-linux-x86_64.tar.xz +tar -xf ccache.tar.xz +mv ccache-4.10.2-linux-x86_64 ${SW_DIR}/ccache-4.10.2 +rm -rf ccache.tar.xz + +# Boost (QED tables) +rm -rf $HOME/src/boost-temp +mkdir -p $HOME/src/boost-temp +curl -Lo $HOME/src/boost-temp/boost.tar.gz https://archives.boost.io/release/1.82.0/source/boost_1_82_0.tar.gz +tar -xzf $HOME/src/boost-temp/boost.tar.gz -C $HOME/src/boost-temp +cd $HOME/src/boost-temp/boost_1_82_0 +./bootstrap.sh --with-libraries=math --prefix=${SW_DIR}/boost-1.82.0 +./b2 cxxflags="-std=c++17" install -j ${PARALLEL} +cd - +rm -rf $HOME/src/boost-temp + # c-blosc (I/O compression) if [ -d $HOME/src/c-blosc ] then @@ -59,7 +79,7 @@ else fi rm -rf $HOME/src/c-blosc-pm-cpu-build cmake -S $HOME/src/c-blosc -B ${build_dir}/c-blosc-pm-cpu-build -DBUILD_TESTS=OFF -DBUILD_BENCHMARKS=OFF -DDEACTIVATE_AVX2=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/c-blosc-1.21.1 -cmake --build ${build_dir}/c-blosc-pm-cpu-build --target install --parallel 16 +cmake --build ${build_dir}/c-blosc-pm-cpu-build --target install --parallel ${PARALLEL} rm -rf ${build_dir}/c-blosc-pm-cpu-build # ADIOS2 @@ -74,7 +94,7 @@ else fi rm -rf $HOME/src/adios2-pm-cpu-build cmake -S $HOME/src/adios2 -B ${build_dir}/adios2-pm-cpu-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_CUDA=OFF -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 -cmake --build ${build_dir}/adios2-pm-cpu-build --target install -j 16 +cmake --build ${build_dir}/adios2-pm-cpu-build --target install -j ${PARALLEL} rm -rf ${build_dir}/adios2-pm-cpu-build # BLAS++ (for PSATD+RZ) @@ -89,7 +109,7 @@ else fi rm -rf $HOME/src/blaspp-pm-cpu-build CXX=$(which CC) cmake -S $HOME/src/blaspp -B ${build_dir}/blaspp-pm-cpu-build -Duse_openmp=ON -Dgpu_backend=OFF -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=${SW_DIR}/blaspp-2024.05.31 -cmake --build ${build_dir}/blaspp-pm-cpu-build --target install --parallel 16 +cmake --build ${build_dir}/blaspp-pm-cpu-build --target install --parallel ${PARALLEL} rm -rf ${build_dir}/blaspp-pm-cpu-build # LAPACK++ (for PSATD+RZ) @@ -104,7 +124,7 @@ else fi rm -rf $HOME/src/lapackpp-pm-cpu-build CXX=$(which CC) CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S $HOME/src/lapackpp -B ${build_dir}/lapackpp-pm-cpu-build -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=${SW_DIR}/lapackpp-2024.05.31 -cmake --build ${build_dir}/lapackpp-pm-cpu-build --target install --parallel 16 +cmake --build ${build_dir}/lapackpp-pm-cpu-build --target install --parallel ${PARALLEL} rm -rf ${build_dir}/lapackpp-pm-cpu-build # Python ###################################################################### diff --git a/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh b/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh index d08ca7457d4..ffa3d0f0714 100755 --- a/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh +++ b/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh @@ -31,7 +31,7 @@ fi # Remove old dependencies ##################################################### # -SW_DIR="${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu" +SW_DIR="${PSCRATCH}/storage/sw/warpx/perlmutter/gpu" rm -rf ${SW_DIR} mkdir -p ${SW_DIR} @@ -44,9 +44,29 @@ python3 -m pip uninstall -qqq -y mpi4py 2>/dev/null || true # General extra dependencies ################################################## # +# build parallelism +PARALLEL=16 + # tmpfs build directory: avoids issues often seen with $HOME and is faster build_dir=$(mktemp -d) +# CCache +curl -Lo ccache.tar.xz https://github.com/ccache/ccache/releases/download/v4.10.2/ccache-4.10.2-linux-x86_64.tar.xz +tar -xf ccache.tar.xz +mv ccache-4.10.2-linux-x86_64 ${SW_DIR}/ccache-4.10.2 +rm -rf ccache.tar.xz + +# Boost (QED tables) +rm -rf $HOME/src/boost-temp +mkdir -p $HOME/src/boost-temp +curl -Lo $HOME/src/boost-temp/boost.tar.gz https://archives.boost.io/release/1.82.0/source/boost_1_82_0.tar.gz +tar -xzf $HOME/src/boost-temp/boost.tar.gz -C $HOME/src/boost-temp +cd $HOME/src/boost-temp/boost_1_82_0 +./bootstrap.sh --with-libraries=math --prefix=${SW_DIR}/boost-1.82.0 +./b2 cxxflags="-std=c++17" install -j ${PARALLEL} +cd - +rm -rf $HOME/src/boost-temp + # c-blosc (I/O compression) if [ -d $HOME/src/c-blosc ] then @@ -59,7 +79,7 @@ else fi rm -rf $HOME/src/c-blosc-pm-gpu-build cmake -S $HOME/src/c-blosc -B ${build_dir}/c-blosc-pm-gpu-build -DBUILD_TESTS=OFF -DBUILD_BENCHMARKS=OFF -DDEACTIVATE_AVX2=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/c-blosc-1.21.1 -cmake --build ${build_dir}/c-blosc-pm-gpu-build --target install --parallel 16 +cmake --build ${build_dir}/c-blosc-pm-gpu-build --target install --parallel ${PARALLEL} rm -rf ${build_dir}/c-blosc-pm-gpu-build # ADIOS2 @@ -74,7 +94,7 @@ else fi rm -rf $HOME/src/adios2-pm-gpu-build cmake -S $HOME/src/adios2 -B ${build_dir}/adios2-pm-gpu-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 -cmake --build ${build_dir}/adios2-pm-gpu-build --target install -j 16 +cmake --build ${build_dir}/adios2-pm-gpu-build --target install -j ${PARALLEL} rm -rf ${build_dir}/adios2-pm-gpu-build # BLAS++ (for PSATD+RZ) @@ -89,7 +109,7 @@ else fi rm -rf $HOME/src/blaspp-pm-gpu-build CXX=$(which CC) cmake -S $HOME/src/blaspp -B ${build_dir}/blaspp-pm-gpu-build -Duse_openmp=OFF -Dgpu_backend=cuda -DCMAKE_CXX_STANDARD=17 -DCMAKE_INSTALL_PREFIX=${SW_DIR}/blaspp-2024.05.31 -cmake --build ${build_dir}/blaspp-pm-gpu-build --target install --parallel 16 +cmake --build ${build_dir}/blaspp-pm-gpu-build --target install --parallel ${PARALLEL} rm -rf ${build_dir}/blaspp-pm-gpu-build # LAPACK++ (for PSATD+RZ) @@ -104,7 +124,7 @@ else fi rm -rf $HOME/src/lapackpp-pm-gpu-build CXX=$(which CC) CXXFLAGS="-DLAPACK_FORTRAN_ADD_" cmake -S $HOME/src/lapackpp -B ${build_dir}/lapackpp-pm-gpu-build -DCMAKE_CXX_STANDARD=17 -Dbuild_tests=OFF -DCMAKE_INSTALL_RPATH_USE_LINK_PATH=ON -DCMAKE_INSTALL_PREFIX=${SW_DIR}/lapackpp-2024.05.31 -cmake --build ${build_dir}/lapackpp-pm-gpu-build --target install --parallel 16 +cmake --build ${build_dir}/lapackpp-pm-gpu-build --target install --parallel ${PARALLEL} rm -rf ${build_dir}/lapackpp-pm-gpu-build # Python ###################################################################### diff --git a/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example b/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example index a7493ecd4bc..fe665e87130 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example +++ b/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example @@ -10,32 +10,36 @@ module load cpu module load cmake/3.30.2 module load cray-fftw/3.3.10.6 +# missing modules installed here +export SW_DIR=${PSCRATCH}/storage/sw/warpx/perlmutter/cpu + # optional: for QED support with detailed tables -export BOOST_ROOT=/global/common/software/spackecp/perlmutter/e4s-23.08/default/spack/opt/spack/linux-sles15-zen3/gcc-12.3.0/boost-1.83.0-nxqk3hnci5g3wqv75wvsmuke3w74mzxi +export CMAKE_PREFIX_PATH=${SW_DIR}/boost-1.82.0:${CMAKE_PREFIX_PATH} +export LD_LIBRARY_PATH=${SW_DIR}/boost-1.82.0/lib:${LD_LIBRARY_PATH} # optional: for openPMD and PSATD+RZ support module load cray-hdf5-parallel/1.12.2.9 -export CMAKE_PREFIX_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/c-blosc-1.21.1:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/adios2-2.8.3:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/blaspp-2024.05.31:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/lapackpp-2024.05.31:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.1:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.8.3:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.05.31:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.05.31:${CMAKE_PREFIX_PATH} -export LD_LIBRARY_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/c-blosc-1.21.1/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/adios2-2.8.3/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/c-blosc-1.21.1/lib64:${LD_LIBRARY_PATH} +export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.8.3/lib64:${LD_LIBRARY_PATH} +export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.05.31/lib64:${LD_LIBRARY_PATH} +export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.05.31/lib64:${LD_LIBRARY_PATH} -export PATH=${CFS}/${proj}/${USER}/sw/perlmutter/cpu/adios2-2.8.3/bin:${PATH} +export PATH=${SW_DIR}/adios2-2.8.3/bin:${PATH} # optional: CCache -export PATH=/global/common/software/spackecp/perlmutter/e4s-23.08/default/spack/opt/spack/linux-sles15-zen3/gcc-11.2.0/ccache-4.8.2-cvooxdw5wgvv2g3vjxjkrpv6dopginv6/bin:$PATH +export PATH=${SW_DIR}/ccache-4.10.2:$PATH # optional: for Python bindings or libEnsemble module load cray-python/3.11.5 -if [ -d "${CFS}/${proj}/${USER}/sw/perlmutter/cpu/venvs/warpx-cpu" ] +if [ -d "${SW_DIR}/venvs/warpx-cpu" ] then - source ${CFS}/${proj}/${USER}/sw/perlmutter/cpu/venvs/warpx-cpu/bin/activate + source ${SW_DIR}/venvs/warpx-cpu/bin/activate fi # an alias to request an interactive batch node for one hour diff --git a/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example b/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example index 5d413db71e1..dd78bc8ecf3 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example +++ b/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example @@ -14,32 +14,36 @@ module load craype-accel-nvidia80 module load cudatoolkit module load cmake/3.30.2 +# missing modules installed here +export SW_DIR=${PSCRATCH}/storage/sw/warpx/perlmutter/gpu + # optional: for QED support with detailed tables -export BOOST_ROOT=/global/common/software/spackecp/perlmutter/e4s-23.08/default/spack/opt/spack/linux-sles15-zen3/gcc-12.3.0/boost-1.83.0-nxqk3hnci5g3wqv75wvsmuke3w74mzxi +export CMAKE_PREFIX_PATH=${SW_DIR}/boost-1.82.0:${CMAKE_PREFIX_PATH} +export LD_LIBRARY_PATH=${SW_DIR}/boost-1.82.0/lib:${LD_LIBRARY_PATH} # optional: for openPMD and PSATD+RZ support module load cray-hdf5-parallel/1.12.2.9 -export CMAKE_PREFIX_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/c-blosc-1.21.1:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/adios2-2.8.3:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/blaspp-2024.05.31:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/lapackpp-2024.05.31:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.1:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.8.3:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.05.31:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.05.31:${CMAKE_PREFIX_PATH} -export LD_LIBRARY_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/c-blosc-1.21.1/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/adios2-2.8.3/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/c-blosc-1.21.1/lib64:${LD_LIBRARY_PATH} +export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.8.3/lib64:${LD_LIBRARY_PATH} +export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.05.31/lib64:${LD_LIBRARY_PATH} +export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.05.31/lib64:${LD_LIBRARY_PATH} -export PATH=${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/adios2-2.8.3/bin:${PATH} +export PATH=${SW_DIR}/adios2-2.8.3/bin:${PATH} # optional: CCache -export PATH=/global/common/software/spackecp/perlmutter/e4s-23.08/default/spack/opt/spack/linux-sles15-zen3/gcc-11.2.0/ccache-4.8.2-cvooxdw5wgvv2g3vjxjkrpv6dopginv6/bin:$PATH +export PATH=${SW_DIR}/ccache-4.10.2:$PATH # optional: for Python bindings or libEnsemble module load cray-python/3.11.5 -if [ -d "${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/venvs/warpx-gpu" ] +if [ -d "${SW_DIR}/venvs/warpx-gpu" ] then - source ${CFS}/${proj%_g}/${USER}/sw/perlmutter/gpu/venvs/warpx-gpu/bin/activate + source ${SW_DIR}/venvs/warpx-gpu/bin/activate fi # an alias to request an interactive batch node for one hour From e627b9cb66f4bd55017f431a2f9ab6500bfb5423 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Tue, 18 Feb 2025 16:42:43 -0800 Subject: [PATCH 239/278] mini-PR: Cleanup in Ohm solver for readability (#5675) As said in the title, this is just a small PR to make the `HybridPICSolveE` kernels more readable. --------- Signed-off-by: roelof-groenewald --- .../HybridPICSolveE.cpp | 342 +++++++++--------- 1 file changed, 175 insertions(+), 167 deletions(-) diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index b750a7e4f20..f46b2f73e41 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -616,44 +616,45 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Interpolate to get the appropriate charge density in space const Real rho_val = Interp(rho, nodal, Er_stag, coarsen, i, j, 0, 0); - Real rho_val_limited = rho_val; - - // Interpolate current to appropriate staggering to match E field - Real jtot_val = 0._rt; - if (solve_for_Faraday && resistivity_has_J_dependence) { - const Real jr_val = Interp(Jr, Jr_stag, Er_stag, coarsen, i, j, 0, 0); - const Real jt_val = Interp(Jt, Jt_stag, Er_stag, coarsen, i, j, 0, 0); - const Real jz_val = Interp(Jz, Jz_stag, Er_stag, coarsen, i, j, 0, 0); - jtot_val = std::sqrt(jr_val*jr_val + jt_val*jt_val + jz_val*jz_val); - } - - // safety condition since we divide by rho_val later - if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } - - // Get the gradient of the electron pressure if the longitudinal part of - // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 - Real grad_Pe = 0._rt; - if (!solve_for_Faraday) { grad_Pe = T_Algo::UpwardDr(Pe, coefs_r, n_coefs_r, i, j, 0, 0); } - - // interpolate the nodal neE values to the Yee grid - auto enE_r = Interp(enE, nodal, Er_stag, coarsen, i, j, 0, 0); if (rho_val < rho_floor && holmstrom_vacuum_region) { Er(i, j, 0) = 0._rt; } else { + // Get the gradient of the electron pressure if the longitudinal part of + // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 + const Real grad_Pe = (!solve_for_Faraday) ? + T_Algo::UpwardDr(Pe, coefs_r, n_coefs_r, i, j, 0, 0) + : 0._rt; + + // interpolate the nodal neE values to the Yee grid + const auto enE_r = Interp(enE, nodal, Er_stag, coarsen, i, j, 0, 0); + + // safety condition since we divide by rho + const auto rho_val_limited = std::max(rho_val, rho_floor); + Er(i, j, 0) = (enE_r - grad_Pe) / rho_val_limited; } // Add resistivity only if E field value is used to update B - if (solve_for_Faraday) { Er(i, j, 0) += eta(rho_val, jtot_val) * Jr(i, j, 0); } - - if (include_hyper_resistivity_term) { - // r on cell-centered point (Jr is cell-centered in r) - const Real r = rmin + (i + 0.5_rt)*dr; - const Real jr_val = Interp(Jr, Jr_stag, Er_stag, coarsen, i, j, 0, 0); - auto nabla2Jr = T_Algo::Dr_rDr_over_r(Jr, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) - + T_Algo::Dzz(Jr, coefs_z, n_coefs_z, i, j, 0, 0) - jr_val/(r*r); - Er(i, j, 0) -= eta_h * nabla2Jr; + if (solve_for_Faraday) { + Real jtot_val = 0._rt; + if (resistivity_has_J_dependence) { + // Interpolate current to appropriate staggering to match E field + const Real jr_val = Jr(i, j, 0); + const Real jt_val = Interp(Jt, Jt_stag, Er_stag, coarsen, i, j, 0, 0); + const Real jz_val = Interp(Jz, Jz_stag, Er_stag, coarsen, i, j, 0, 0); + jtot_val = std::sqrt(jr_val*jr_val + jt_val*jt_val + jz_val*jz_val); + } + + Er(i, j, 0) += eta(rho_val, jtot_val) * Jr(i, j, 0); + + if (include_hyper_resistivity_term) { + // r on cell-centered point (Jr is cell-centered in r) + const Real r = rmin + (i + 0.5_rt)*dr; + auto nabla2Jr = T_Algo::Dr_rDr_over_r(Jr, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) + + T_Algo::Dzz(Jr, coefs_z, n_coefs_z, i, j, 0, 0) - Jr(i, j, 0)/(r*r); + Er(i, j, 0) -= eta_h * nabla2Jr; + } } if (include_external_fields && (rho_val >= rho_floor)) { @@ -677,41 +678,41 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Interpolate to get the appropriate charge density in space const Real rho_val = Interp(rho, nodal, Et_stag, coarsen, i, j, 0, 0); - Real rho_val_limited = rho_val; - - // Interpolate current to appropriate staggering to match E field - Real jtot_val = 0._rt; - if (solve_for_Faraday && resistivity_has_J_dependence) { - const Real jr_val = Interp(Jr, Jr_stag, Et_stag, coarsen, i, j, 0, 0); - const Real jt_val = Interp(Jt, Jt_stag, Et_stag, coarsen, i, j, 0, 0); - const Real jz_val = Interp(Jz, Jz_stag, Et_stag, coarsen, i, j, 0, 0); - jtot_val = std::sqrt(jr_val*jr_val + jt_val*jt_val + jz_val*jz_val); - } - - // safety condition since we divide by rho_val later - if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } - - // Get the gradient of the electron pressure - // -> d/dt = 0 for m = 0 - auto grad_Pe = 0.0_rt; - - // interpolate the nodal neE values to the Yee grid - auto enE_t = Interp(enE, nodal, Et_stag, coarsen, i, j, 0, 1); if (rho_val < rho_floor && holmstrom_vacuum_region) { Et(i, j, 0) = 0._rt; } else { + // Get the gradient of the electron pressure + // -> d/dt = 0 for m = 0 + const auto grad_Pe = 0.0_rt; + + // interpolate the nodal neE values to the Yee grid + const auto enE_t = Interp(enE, nodal, Et_stag, coarsen, i, j, 0, 1); + + // safety condition since we divide by rho + const auto rho_val_limited = std::max(rho_val, rho_floor); + Et(i, j, 0) = (enE_t - grad_Pe) / rho_val_limited; } // Add resistivity only if E field value is used to update B - if (solve_for_Faraday) { Et(i, j, 0) += eta(rho_val, jtot_val) * Jt(i, j, 0); } + if (solve_for_Faraday) { + Real jtot_val = 0._rt; + if(resistivity_has_J_dependence) { + // Interpolate current to appropriate staggering to match E field + const Real jr_val = Interp(Jr, Jr_stag, Et_stag, coarsen, i, j, 0, 0); + const Real jt_val = Jt(i, j, 0); + const Real jz_val = Interp(Jz, Jz_stag, Et_stag, coarsen, i, j, 0, 0); + jtot_val = std::sqrt(jr_val*jr_val + jt_val*jt_val + jz_val*jz_val); + } + + Et(i, j, 0) += eta(rho_val, jtot_val) * Jt(i, j, 0); - if (include_hyper_resistivity_term) { - const Real jt_val = Interp(Jt, Jt_stag, Et_stag, coarsen, i, j, 0, 0); - auto nabla2Jt = T_Algo::Dr_rDr_over_r(Jt, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) - + T_Algo::Dzz(Jt, coefs_z, n_coefs_z, i, j, 0, 0) - jt_val/(r*r); - Et(i, j, 0) -= eta_h * nabla2Jt; + if (include_hyper_resistivity_term) { + auto nabla2Jt = T_Algo::Dr_rDr_over_r(Jt, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) + + T_Algo::Dzz(Jt, coefs_z, n_coefs_z, i, j, 0, 0) - Jt(i, j, 0)/(r*r); + Et(i, j, 0) -= eta_h * nabla2Jt; + } } if (include_external_fields && (rho_val >= rho_floor)) { @@ -727,47 +728,48 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( // Interpolate to get the appropriate charge density in space const Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, 0, 0); - Real rho_val_limited = rho_val; - - // Interpolate current to appropriate staggering to match E field - Real jtot_val = 0._rt; - if (solve_for_Faraday && resistivity_has_J_dependence) { - const Real jr_val = Interp(Jr, Jr_stag, Ez_stag, coarsen, i, j, 0, 0); - const Real jt_val = Interp(Jt, Jt_stag, Ez_stag, coarsen, i, j, 0, 0); - const Real jz_val = Interp(Jz, Jz_stag, Ez_stag, coarsen, i, j, 0, 0); - jtot_val = std::sqrt(jr_val*jr_val + jt_val*jt_val + jz_val*jz_val); - } - - // safety condition since we divide by rho_val later - if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } - - // Get the gradient of the electron pressure if the longitudinal part of - // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 - Real grad_Pe = 0._rt; - if (!solve_for_Faraday) { grad_Pe = T_Algo::UpwardDz(Pe, coefs_z, n_coefs_z, i, j, 0, 0); } - - // interpolate the nodal neE values to the Yee grid - auto enE_z = Interp(enE, nodal, Ez_stag, coarsen, i, j, 0, 2); if (rho_val < rho_floor && holmstrom_vacuum_region) { Ez(i, j, 0) = 0._rt; } else { + // Get the gradient of the electron pressure if the longitudinal part of + // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 + const Real grad_Pe = (!solve_for_Faraday) ? + T_Algo::UpwardDz(Pe, coefs_z, n_coefs_z, i, j, 0, 0) + : 0._rt; + + // interpolate the nodal neE values to the Yee grid + const auto enE_z = Interp(enE, nodal, Ez_stag, coarsen, i, j, 0, 2); + + // safety condition since we divide by rho + const auto rho_val_limited = std::max(rho_val, rho_floor); + Ez(i, j, 0) = (enE_z - grad_Pe) / rho_val_limited; } // Add resistivity only if E field value is used to update B - if (solve_for_Faraday) { Ez(i, j, 0) += eta(rho_val, jtot_val) * Jz(i, j, 0); } + if (solve_for_Faraday) { + Real jtot_val = 0._rt; + if (resistivity_has_J_dependence) { + // Interpolate current to appropriate staggering to match E field + const Real jr_val = Interp(Jr, Jr_stag, Ez_stag, coarsen, i, j, 0, 0); + const Real jt_val = Interp(Jt, Jt_stag, Ez_stag, coarsen, i, j, 0, 0); + const Real jz_val = Jz(i, j, 0); + jtot_val = std::sqrt(jr_val*jr_val + jt_val*jt_val + jz_val*jz_val); + } - if (include_hyper_resistivity_term) { - // r on nodal point (Jz is nodal in r) - Real const r = rmin + i*dr; + Ez(i, j, 0) += eta(rho_val, jtot_val) * Jz(i, j, 0); - auto nabla2Jz = T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, 0, 0); - if (r > 0.5_rt*dr) { - nabla2Jz += T_Algo::Dr_rDr_over_r(Jz, r, dr, coefs_r, n_coefs_r, i, j, 0, 0); - } + if (include_hyper_resistivity_term) { + // r on nodal point (Jz is nodal in r) + const Real r = rmin + i*dr; - Ez(i, j, 0) -= eta_h * nabla2Jz; + auto nabla2Jz = T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, 0, 0); + if (r > 0.5_rt*dr) { + nabla2Jz += T_Algo::Dr_rDr_over_r(Jz, r, dr, coefs_r, n_coefs_r, i, j, 0, 0); + } + Ez(i, j, 0) -= eta_h * nabla2Jz; + } } if (include_external_fields && (rho_val >= rho_floor)) { @@ -995,42 +997,44 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Interpolate to get the appropriate charge density in space const Real rho_val = Interp(rho, nodal, Ex_stag, coarsen, i, j, k, 0); - Real rho_val_limited = rho_val; - - // Interpolate current to appropriate staggering to match E field - Real jtot_val = 0._rt; - if (solve_for_Faraday && resistivity_has_J_dependence) { - const Real jx_val = Interp(Jx, Jx_stag, Ex_stag, coarsen, i, j, k, 0); - const Real jy_val = Interp(Jy, Jy_stag, Ex_stag, coarsen, i, j, k, 0); - const Real jz_val = Interp(Jz, Jz_stag, Ex_stag, coarsen, i, j, k, 0); - jtot_val = std::sqrt(jx_val*jx_val + jy_val*jy_val + jz_val*jz_val); - } - - // safety condition since we divide by rho_val later - if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } - - // Get the gradient of the electron pressure if the longitudinal part of - // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 - Real grad_Pe = 0._rt; - if (!solve_for_Faraday) { grad_Pe = T_Algo::UpwardDx(Pe, coefs_x, n_coefs_x, i, j, k); } - - // interpolate the nodal neE values to the Yee grid - auto enE_x = Interp(enE, nodal, Ex_stag, coarsen, i, j, k, 0); if (rho_val < rho_floor && holmstrom_vacuum_region) { Ex(i, j, k) = 0._rt; } else { + // Get the gradient of the electron pressure if the longitudinal part of + // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 + const Real grad_Pe = (!solve_for_Faraday) ? + T_Algo::UpwardDx(Pe, coefs_x, n_coefs_x, i, j, k) + : 0._rt; + + // interpolate the nodal neE values to the Yee grid + const auto enE_x = Interp(enE, nodal, Ex_stag, coarsen, i, j, k, 0); + + // safety condition since we divide by rho + const auto rho_val_limited = std::max(rho_val, rho_floor); + Ex(i, j, k) = (enE_x - grad_Pe) / rho_val_limited; } // Add resistivity only if E field value is used to update B - if (solve_for_Faraday) { Ex(i, j, k) += eta(rho_val, jtot_val) * Jx(i, j, k); } + if (solve_for_Faraday) { + Real jtot_val = 0._rt; + if (resistivity_has_J_dependence) { + // Interpolate current to appropriate staggering to match E field + const Real jx_val = Jx(i, j, k); + const Real jy_val = Interp(Jy, Jy_stag, Ex_stag, coarsen, i, j, k, 0); + const Real jz_val = Interp(Jz, Jz_stag, Ex_stag, coarsen, i, j, k, 0); + jtot_val = std::sqrt(jx_val*jx_val + jy_val*jy_val + jz_val*jz_val); + } + + Ex(i, j, k) += eta(rho_val, jtot_val) * Jx(i, j, k); - if (include_hyper_resistivity_term) { - auto nabla2Jx = T_Algo::Dxx(Jx, coefs_x, n_coefs_x, i, j, k) - + T_Algo::Dyy(Jx, coefs_y, n_coefs_y, i, j, k) - + T_Algo::Dzz(Jx, coefs_z, n_coefs_z, i, j, k); - Ex(i, j, k) -= eta_h * nabla2Jx; + if (include_hyper_resistivity_term) { + auto nabla2Jx = T_Algo::Dxx(Jx, coefs_x, n_coefs_x, i, j, k) + + T_Algo::Dyy(Jx, coefs_y, n_coefs_y, i, j, k) + + T_Algo::Dzz(Jx, coefs_z, n_coefs_z, i, j, k); + Ex(i, j, k) -= eta_h * nabla2Jx; + } } if (include_external_fields && (rho_val >= rho_floor)) { @@ -1046,42 +1050,44 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Interpolate to get the appropriate charge density in space const Real rho_val = Interp(rho, nodal, Ey_stag, coarsen, i, j, k, 0); - Real rho_val_limited = rho_val; - - // Interpolate current to appropriate staggering to match E field - Real jtot_val = 0._rt; - if (solve_for_Faraday && resistivity_has_J_dependence) { - const Real jx_val = Interp(Jx, Jx_stag, Ey_stag, coarsen, i, j, k, 0); - const Real jy_val = Interp(Jy, Jy_stag, Ey_stag, coarsen, i, j, k, 0); - const Real jz_val = Interp(Jz, Jz_stag, Ey_stag, coarsen, i, j, k, 0); - jtot_val = std::sqrt(jx_val*jx_val + jy_val*jy_val + jz_val*jz_val); - } - - // safety condition since we divide by rho_val later - if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } - - // Get the gradient of the electron pressure if the longitudinal part of - // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 - Real grad_Pe = 0._rt; - if (!solve_for_Faraday) { grad_Pe = T_Algo::UpwardDy(Pe, coefs_y, n_coefs_y, i, j, k); } - - // interpolate the nodal neE values to the Yee grid - auto enE_y = Interp(enE, nodal, Ey_stag, coarsen, i, j, k, 1); if (rho_val < rho_floor && holmstrom_vacuum_region) { Ey(i, j, k) = 0._rt; } else { + // Get the gradient of the electron pressure if the longitudinal part of + // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 + const Real grad_Pe = (!solve_for_Faraday) ? + T_Algo::UpwardDy(Pe, coefs_y, n_coefs_y, i, j, k) + : 0._rt; + + // interpolate the nodal neE values to the Yee grid + const auto enE_y = Interp(enE, nodal, Ey_stag, coarsen, i, j, k, 1); + + // safety condition since we divide by rho + const auto rho_val_limited = std::max(rho_val, rho_floor); + Ey(i, j, k) = (enE_y - grad_Pe) / rho_val_limited; } // Add resistivity only if E field value is used to update B - if (solve_for_Faraday) { Ey(i, j, k) += eta(rho_val, jtot_val) * Jy(i, j, k); } + if (solve_for_Faraday) { + Real jtot_val = 0._rt; + if (resistivity_has_J_dependence) { + // Interpolate current to appropriate staggering to match E field + const Real jx_val = Interp(Jx, Jx_stag, Ey_stag, coarsen, i, j, k, 0); + const Real jy_val = Jy(i, j, k); + const Real jz_val = Interp(Jz, Jz_stag, Ey_stag, coarsen, i, j, k, 0); + jtot_val = std::sqrt(jx_val*jx_val + jy_val*jy_val + jz_val*jz_val); + } + + Ey(i, j, k) += eta(rho_val, jtot_val) * Jy(i, j, k); - if (include_hyper_resistivity_term) { - auto nabla2Jy = T_Algo::Dxx(Jy, coefs_x, n_coefs_x, i, j, k) - + T_Algo::Dyy(Jy, coefs_y, n_coefs_y, i, j, k) - + T_Algo::Dzz(Jy, coefs_z, n_coefs_z, i, j, k); - Ey(i, j, k) -= eta_h * nabla2Jy; + if (include_hyper_resistivity_term) { + auto nabla2Jy = T_Algo::Dxx(Jy, coefs_x, n_coefs_x, i, j, k) + + T_Algo::Dyy(Jy, coefs_y, n_coefs_y, i, j, k) + + T_Algo::Dzz(Jy, coefs_z, n_coefs_z, i, j, k); + Ey(i, j, k) -= eta_h * nabla2Jy; + } } if (include_external_fields && (rho_val >= rho_floor)) { @@ -1097,42 +1103,44 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( // Interpolate to get the appropriate charge density in space const Real rho_val = Interp(rho, nodal, Ez_stag, coarsen, i, j, k, 0); - Real rho_val_limited = rho_val; - - // Interpolate current to appropriate staggering to match E field - Real jtot_val = 0._rt; - if (solve_for_Faraday && resistivity_has_J_dependence) { - const Real jx_val = Interp(Jx, Jx_stag, Ez_stag, coarsen, i, j, k, 0); - const Real jy_val = Interp(Jy, Jy_stag, Ez_stag, coarsen, i, j, k, 0); - const Real jz_val = Interp(Jz, Jz_stag, Ez_stag, coarsen, i, j, k, 0); - jtot_val = std::sqrt(jx_val*jx_val + jy_val*jy_val + jz_val*jz_val); - } - - // safety condition since we divide by rho_val later - if (rho_val_limited < rho_floor) { rho_val_limited = rho_floor; } - - // Get the gradient of the electron pressure if the longitudinal part of - // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 - Real grad_Pe = 0._rt; - if (!solve_for_Faraday) { grad_Pe = T_Algo::UpwardDz(Pe, coefs_z, n_coefs_z, i, j, k); } - - // interpolate the nodal neE values to the Yee grid - auto enE_z = Interp(enE, nodal, Ez_stag, coarsen, i, j, k, 2); if (rho_val < rho_floor && holmstrom_vacuum_region) { Ez(i, j, k) = 0._rt; } else { + // Get the gradient of the electron pressure if the longitudinal part of + // the E-field should be included, otherwise ignore it since curl x (grad Pe) = 0 + const Real grad_Pe = (!solve_for_Faraday) ? + T_Algo::UpwardDz(Pe, coefs_z, n_coefs_z, i, j, k) + : 0._rt; + + // interpolate the nodal neE values to the Yee grid + const auto enE_z = Interp(enE, nodal, Ez_stag, coarsen, i, j, k, 2); + + // safety condition since we divide by rho + const auto rho_val_limited = std::max(rho_val, rho_floor); + Ez(i, j, k) = (enE_z - grad_Pe) / rho_val_limited; } // Add resistivity only if E field value is used to update B - if (solve_for_Faraday) { Ez(i, j, k) += eta(rho_val, jtot_val) * Jz(i, j, k); } + if (solve_for_Faraday) { + Real jtot_val = 0._rt; + if (resistivity_has_J_dependence) { + // Interpolate current to appropriate staggering to match E field + const Real jx_val = Interp(Jx, Jx_stag, Ez_stag, coarsen, i, j, k, 0); + const Real jy_val = Interp(Jy, Jy_stag, Ez_stag, coarsen, i, j, k, 0); + const Real jz_val = Jz(i, j, k); + jtot_val = std::sqrt(jx_val*jx_val + jy_val*jy_val + jz_val*jz_val); + } + + Ez(i, j, k) += eta(rho_val, jtot_val) * Jz(i, j, k); - if (include_hyper_resistivity_term) { - auto nabla2Jz = T_Algo::Dxx(Jz, coefs_x, n_coefs_x, i, j, k) - + T_Algo::Dyy(Jz, coefs_y, n_coefs_y, i, j, k) - + T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, k); - Ez(i, j, k) -= eta_h * nabla2Jz; + if (include_hyper_resistivity_term) { + auto nabla2Jz = T_Algo::Dxx(Jz, coefs_x, n_coefs_x, i, j, k) + + T_Algo::Dyy(Jz, coefs_y, n_coefs_y, i, j, k) + + T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, k); + Ez(i, j, k) -= eta_h * nabla2Jz; + } } if (include_external_fields && (rho_val >= rho_floor)) { From 8d285a8ada0e20d495a89c83927461773cd6a94c Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 19 Feb 2025 01:47:37 +0100 Subject: [PATCH 240/278] WarpX class: fuse together doFieldIonization with doFieldIonization(lev) and doQEDEvents with doQEDEvents(lev) (#5671) `doFieldIonization(lev) ` is called only once, inside `doFieldIonization` , which is simply a loop over the levels calling for each level `doFieldIonization(lev) `. The same happens for `doQEDEvents`. In order to simplify the interface of the WarpX class, I would like to propose to drop `doFieldIonization(lev) ` and `doQEDEvents(lev) `, and to integrate their code respectively in `doFieldIonization` and `doQEDEvents`. --- Source/Evolve/WarpXEvolve.cpp | 56 ++++++++++++++--------------------- Source/WarpX.H | 8 ----- 2 files changed, 22 insertions(+), 42 deletions(-) diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index a5ad9d4034e..5593642a944 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -1076,53 +1076,41 @@ WarpX::OneStep_sub1 (Real cur_time) void WarpX::doFieldIonization () -{ - for (int lev = 0; lev <= finest_level; ++lev) { - doFieldIonization(lev); - } -} - -void -WarpX::doFieldIonization (int lev) { using ablastr::fields::Direction; using warpx::fields::FieldType; - mypc->doFieldIonization( - lev, - *m_fields.get(FieldType::Efield_aux, Direction{0}, lev), - *m_fields.get(FieldType::Efield_aux, Direction{1}, lev), - *m_fields.get(FieldType::Efield_aux, Direction{2}, lev), - *m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), - *m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), - *m_fields.get(FieldType::Bfield_aux, Direction{2}, lev) - ); -} - -#ifdef WARPX_QED -void -WarpX::doQEDEvents () -{ for (int lev = 0; lev <= finest_level; ++lev) { - doQEDEvents(lev); + mypc->doFieldIonization( + lev, + *m_fields.get(FieldType::Efield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{2}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{2}, lev) + ); } } +#ifdef WARPX_QED void -WarpX::doQEDEvents (int lev) +WarpX::doQEDEvents () { using ablastr::fields::Direction; using warpx::fields::FieldType; - mypc->doQedEvents( - lev, - *m_fields.get(FieldType::Efield_aux, Direction{0}, lev), - *m_fields.get(FieldType::Efield_aux, Direction{1}, lev), - *m_fields.get(FieldType::Efield_aux, Direction{2}, lev), - *m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), - *m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), - *m_fields.get(FieldType::Bfield_aux, Direction{2}, lev) - ); + for (int lev = 0; lev <= finest_level; ++lev) { + mypc->doQedEvents( + lev, + *m_fields.get(FieldType::Efield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Efield_aux, Direction{2}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{0}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{1}, lev), + *m_fields.get(FieldType::Bfield_aux, Direction{2}, lev) + ); + } } #endif diff --git a/Source/WarpX.H b/Source/WarpX.H index 29439002a3a..f039a636498 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -663,18 +663,10 @@ public: /** Run the ionization module on all species */ void doFieldIonization (); - /** Run the ionization module on all species at level lev - * \param lev level - */ - void doFieldIonization (int lev); #ifdef WARPX_QED /** Run the QED module on all species */ void doQEDEvents (); - /** Run the QED module on all species at level lev - * \param lev level - */ - void doQEDEvents (int lev); #endif void PushParticlesandDeposit (int lev, amrex::Real cur_time, DtType a_dt_type=DtType::Full, bool skip_current=false, From bf4bd4a22d4669b94bb24c7dcc16c9a0d0fab244 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 19 Feb 2025 01:48:23 +0100 Subject: [PATCH 241/278] WarpX class: remove declaration of two unimplemented functions (#5670) `AverageAndPackFields` and `prepareFields` are not implemented. Therefore, this PR removes their declaration from the WarpX header. --- Source/WarpX.H | 8 -------- 1 file changed, 8 deletions(-) diff --git a/Source/WarpX.H b/Source/WarpX.H index f039a636498..638b6403cae 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -756,14 +756,6 @@ public: [[nodiscard]] amrex::Real stopTime () const {return stop_time;} void updateStopTime (const amrex::Real new_stop_time) {stop_time = new_stop_time;} - void AverageAndPackFields( amrex::Vector& varnames, - amrex::Vector& mf_avg, amrex::IntVect ngrow) const; - - void prepareFields( int step, amrex::Vector& varnames, - amrex::Vector& mf_avg, - amrex::Vector& output_mf, - amrex::Vector& output_geom ) const; - static std::array CellSize (int lev); static amrex::XDim3 InvCellSize (int lev); static amrex::RealBox getRealBox(const amrex::Box& bx, int lev); From 804a27340adddcd1163f58fdedf1d00a998a4fc8 Mon Sep 17 00:00:00 2001 From: Andrew Myers Date: Tue, 18 Feb 2025 16:52:56 -0800 Subject: [PATCH 242/278] Fix plot_distribution_mapping.py for 2D (#5660) The box metadata used in this script follows the AMReX conventions. We want "zyx" in 3D and "yx" in 2D. --- Tools/PostProcessing/plot_distribution_mapping.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Tools/PostProcessing/plot_distribution_mapping.py b/Tools/PostProcessing/plot_distribution_mapping.py index 899ea4678c4..07a353cdc3d 100644 --- a/Tools/PostProcessing/plot_distribution_mapping.py +++ b/Tools/PostProcessing/plot_distribution_mapping.py @@ -119,9 +119,9 @@ def _get_costs_reduced_diagnostics(self, directory, prange): kcoords = k.astype(int) // k_blocking_factor # Fill in cost array - shape = (kmax + 1, jmax + 1, imax + 1)[: 2 + self.is_3D] + shape = (kmax + 1, jmax + 1, imax + 1)[1 - self.is_3D :] coords = [ - coord[: 2 + self.is_3D] for coord in zip(kcoords, jcoords, icoords) + coord[1 - self.is_3D :] for coord in zip(kcoords, jcoords, icoords) ] cost_arr = np.full(shape, 0.0) From 216847203740dae3a2a3cf2577165c1d4218fcc2 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 19 Feb 2025 01:55:35 +0100 Subject: [PATCH 243/278] WarpX class: move PrintDtDxDyDz to an anonymous namespace in WarpXInitData.cpp (#5658) `PrintDtDxDyDz` is used only twice in `WarpXInitData.cpp`. Therefore, this PR turns it from a method of the WarpX class to a simple function inside an anonymous namespace in `WarpXInitData.cpp` --- Source/Evolve/WarpXComputeDt.cpp | 19 ------------------- Source/Initialization/WarpXInitData.cpp | 25 +++++++++++++++++++++++-- Source/WarpX.H | 3 --- 3 files changed, 23 insertions(+), 24 deletions(-) diff --git a/Source/Evolve/WarpXComputeDt.cpp b/Source/Evolve/WarpXComputeDt.cpp index 2b4db960ed6..f88b2044927 100644 --- a/Source/Evolve/WarpXComputeDt.cpp +++ b/Source/Evolve/WarpXComputeDt.cpp @@ -134,22 +134,3 @@ WarpX::UpdateDtFromParticleSpeeds () dt[lev] = dt[lev+1] * refRatio(lev)[0]; } } - -void -WarpX::PrintDtDxDyDz () -{ - for (int lev=0; lev <= max_level; lev++) { - const amrex::Real* dx_lev = geom[lev].CellSize(); - amrex::Print() << "Level " << lev << ": dt = " << dt[lev] -#if defined(WARPX_DIM_1D_Z) - << " ; dz = " << dx_lev[0] << '\n'; -#elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) - << " ; dx = " << dx_lev[0] - << " ; dz = " << dx_lev[1] << '\n'; -#elif defined(WARPX_DIM_3D) - << " ; dx = " << dx_lev[0] - << " ; dy = " << dx_lev[1] - << " ; dz = " << dx_lev[2] << '\n'; -#endif - } -} diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index 90b8d613898..c70188f07bc 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -93,6 +93,27 @@ using namespace amrex; namespace { + + /** Print dt and dx,dy,dz */ + void PrintDtDxDyDz ( + int max_level, const amrex::Vector& geom, const amrex::Vector& dt) + { + for (int lev=0; lev <= max_level; lev++) { + const amrex::Real* dx_lev = geom[lev].CellSize(); + amrex::Print() << "Level " << lev << ": dt = " << dt[lev] + #if defined(WARPX_DIM_1D_Z) + << " ; dz = " << dx_lev[0] << '\n'; + #elif defined(WARPX_DIM_XZ) || defined(WARPX_DIM_RZ) + << " ; dx = " << dx_lev[0] + << " ; dz = " << dx_lev[1] << '\n'; + #elif defined(WARPX_DIM_3D) + << " ; dx = " << dx_lev[0] + << " ; dy = " << dx_lev[1] + << " ; dz = " << dx_lev[2] << '\n'; + #endif + } + } + /** * \brief Check that the number of guard cells is smaller than the number of valid cells, * for a given MultiFab, and abort otherwise. @@ -539,14 +560,14 @@ WarpX::InitData () if (restart_chkfile.empty()) { ComputeDt(); - WarpX::PrintDtDxDyDz(); + ::PrintDtDxDyDz(max_level, geom, dt); InitFromScratch(); InitDiagnostics(); } else { InitFromCheckpoint(); - WarpX::PrintDtDxDyDz(); + ::PrintDtDxDyDz(max_level, geom, dt); PostRestart(); reduced_diags->InitData(); } diff --git a/Source/WarpX.H b/Source/WarpX.H index 638b6403cae..44423edb4bb 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -462,9 +462,6 @@ public: /** Write a file that record all inputs: inputs file + command line options */ void WriteUsedInputsFile () const; - /** Print dt and dx,dy,dz */ - void PrintDtDxDyDz (); - /** * \brief * Compute the last time step of the simulation From 04b0cb1177a9558abe451169406abc40bcd36c79 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 18 Feb 2025 18:20:05 -0800 Subject: [PATCH 244/278] AMReX/pyAMReX/PICSAR: Weekly Update (#5680) Weekly update to latest AMReX. Weekly update to latest pyAMReX (no changes). Weekly update to latest PICSAR (no changes). ```console ./Tools/Release/updateAMReX.py ./Tools/Release/updatepyAMReX.py ./Tools/Release/updatePICSAR.py ``` This pulls in https://github.com/AMReX-Codes/amrex/pull/4337, fixing regressions from #5669 (GPU segfaults on particle redistribute) Signed-off-by: Axel Huebl --- .azure-pipelines.yml | 16 ++++++---------- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- 3 files changed, 8 insertions(+), 12 deletions(-) diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index 77cc75a0264..427cf21600b 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -66,16 +66,6 @@ jobs: cacheHitVar: CCACHE_CACHE_RESTORED displayName: Cache Ccache Objects - - task: Cache@2 - continueOnError: true - inputs: - key: 'Python3 | "$(System.JobName)" | .azure-pipelines.yml' - restoreKeys: | - Python3 | "$(System.JobName)" | .azure-pipelines.yml - path: /home/vsts/.local/lib/python3.8 - cacheHitVar: PYTHON38_CACHE_RESTORED - displayName: Cache Python Libraries - - bash: | set -o nounset errexit pipefail cat /proc/cpuinfo | grep "model name" | sort -u @@ -176,3 +166,9 @@ jobs: -exec cat {} \; displayName: 'Logs' condition: always() + + - bash: | + # clean out so the Post-job Cache "tar" command has more disk space available + rm -rf build + displayName: 'Clean Build Directory' + condition: always() diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 3b65f406728..029d1e4db89 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -127,7 +127,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 275f55f25fec350dfedb54f75a19200b52ced93f && cd - + cd ../amrex && git checkout --detach b364becad939a490bca4e7f8b23f7392c558a311 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 813734282c7..7a249cd6c5b 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -294,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "275f55f25fec350dfedb54f75a19200b52ced93f" +set(WarpX_amrex_branch "b364becad939a490bca4e7f8b23f7392c558a311" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") From d38ebc75568234d1f603db23d61e4735d430cc47 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 19 Feb 2025 18:41:05 +0100 Subject: [PATCH 245/278] WarpX class: remove unused functions NodalSyncJ and NodalSyncRho (#5685) `NodalSyncJ` and `NodalSyncRho` are member functions of the WarpX class, but they are never used. Therefore, this PR removes them. --- Source/Parallelization/WarpXComm.cpp | 48 ---------------------------- Source/WarpX.H | 12 ------- 2 files changed, 60 deletions(-) diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index d5c36084467..3adf4389a46 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -1667,51 +1667,3 @@ void WarpX::AddRhoFromFineLevelandSumBoundary ( MultiFab::Add(*charge_fp[lev], mf, 0, icomp, ncomp, 0); } } - -void WarpX::NodalSyncJ ( - const ablastr::fields::MultiLevelVectorField& J_fp, - const ablastr::fields::MultiLevelVectorField& J_cp, - const int lev, - PatchType patch_type) -{ - if (!override_sync_intervals.contains(istep[0])) { return; } - - if (patch_type == PatchType::fine) - { - const amrex::Periodicity& period = Geom(lev).periodicity(); - ablastr::utils::communication::OverrideSync(*J_fp[lev][0], WarpX::do_single_precision_comms, period); - ablastr::utils::communication::OverrideSync(*J_fp[lev][1], WarpX::do_single_precision_comms, period); - ablastr::utils::communication::OverrideSync(*J_fp[lev][2], WarpX::do_single_precision_comms, period); - } - else if (patch_type == PatchType::coarse) - { - const amrex::Periodicity& cperiod = Geom(lev-1).periodicity(); - ablastr::utils::communication::OverrideSync(*J_cp[lev][0], WarpX::do_single_precision_comms, cperiod); - ablastr::utils::communication::OverrideSync(*J_cp[lev][1], WarpX::do_single_precision_comms, cperiod); - ablastr::utils::communication::OverrideSync(*J_cp[lev][2], WarpX::do_single_precision_comms, cperiod); - } -} - -void WarpX::NodalSyncRho ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, - const int lev, - PatchType patch_type, - const int icomp, - const int ncomp) -{ - if (!override_sync_intervals.contains(istep[0])) { return; } - - if (patch_type == PatchType::fine && charge_fp[lev]) - { - const amrex::Periodicity& period = Geom(lev).periodicity(); - MultiFab rhof(*charge_fp[lev], amrex::make_alias, icomp, ncomp); - ablastr::utils::communication::OverrideSync(rhof, WarpX::do_single_precision_comms, period); - } - else if (patch_type == PatchType::coarse && charge_cp[lev]) - { - const amrex::Periodicity& cperiod = Geom(lev-1).periodicity(); - MultiFab rhoc(*charge_cp[lev], amrex::make_alias, icomp, ncomp); - ablastr::utils::communication::OverrideSync(rhoc, WarpX::do_single_precision_comms, cperiod); - } -} diff --git a/Source/WarpX.H b/Source/WarpX.H index 44423edb4bb..00ab9080751 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -1162,11 +1162,6 @@ private: const ablastr::fields::MultiLevelVectorField& current, int lev, const amrex::Periodicity& period); - void NodalSyncJ ( - const ablastr::fields::MultiLevelVectorField& J_fp, - const ablastr::fields::MultiLevelVectorField& J_cp, - int lev, - PatchType patch_type); void RestrictRhoFromFineToCoarsePatch (int lev ); void ApplyFilterandSumBoundaryRho ( @@ -1183,13 +1178,6 @@ private: int lev, int icomp, int ncomp); - void NodalSyncRho ( - const amrex::Vector>& charge_fp, - const amrex::Vector>& charge_cp, - int lev, - PatchType patch_type, - int icomp, - int ncomp); void ReadParameters (); From 686ef38c16f77c1bfec3a153cc598c663e2046df Mon Sep 17 00:00:00 2001 From: Arianna Formenti Date: Wed, 19 Feb 2025 10:10:21 -0800 Subject: [PATCH 246/278] Small fix in Perlmutter GPU sbatch script (#5683) Changes in Perlmutter GPU job script: from `#SBATCH --cpus-per-task=16` to `#SBATCH --cpus-per-task=32`. This is to request (v)cores in consecutive blocks. GPU 3 is closest to CPU cores 0-15, 64-79, GPU 2 to CPU cores 16-31, 80-95, ... If `--cpus-per-task=16`, MPI ranks 0 and 1 are mapped to cores 0 and 8. If `--cpus-per-task=32`, MPI ranks 0 and 1 are mapped to cores 0 and 16. Visual representation ![pm_gpu_vcores_mpi](https://github.com/user-attachments/assets/edf0721f-7321-49ab-bf37-4b55a7c422cc) --------- Co-authored-by: Axel Huebl --- Tools/machines/perlmutter-nersc/perlmutter_gpu.sbatch | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Tools/machines/perlmutter-nersc/perlmutter_gpu.sbatch b/Tools/machines/perlmutter-nersc/perlmutter_gpu.sbatch index 37bd5d60c54..bd47fa3bd2a 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_gpu.sbatch +++ b/Tools/machines/perlmutter-nersc/perlmutter_gpu.sbatch @@ -17,7 +17,7 @@ # A100 80GB (256 nodes) #S BATCH -C gpu&hbm80g #SBATCH --exclusive -#SBATCH --cpus-per-task=16 +#SBATCH --cpus-per-task=32 # ideally single:1, but NERSC cgroups issue #SBATCH --gpu-bind=none #SBATCH --ntasks-per-node=4 @@ -34,7 +34,7 @@ export MPICH_OFI_NIC_POLICY=GPU # threads for OpenMP and threaded compressors per MPI rank # note: 16 avoids hyperthreading (32 virtual cores, 16 physical) -export OMP_NUM_THREADS=${SLURM_CPUS_PER_TASK} +export OMP_NUM_THREADS=16 # GPU-aware MPI optimizations GPU_AWARE_MPI="amrex.use_gpu_aware_mpi=1" From deef43533b9ccbee355327e9f023947dfd5ef909 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 19 Feb 2025 11:22:57 -0800 Subject: [PATCH 247/278] Doc: PoP on Ion-Acoustic Solitions (#5686) New PoP by Ashwyn Sam et al.: https://doi.org/10.1063/5.0249525 --- Docs/source/highlights.rst | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Docs/source/highlights.rst b/Docs/source/highlights.rst index b40ed16e945..81cc53c3eab 100644 --- a/Docs/source/highlights.rst +++ b/Docs/source/highlights.rst @@ -159,6 +159,11 @@ High Energy Astrophysical Plasma Physics Scientific works in astrophysical plasma modeling. +#. Sam A, Kumar P, Fletcher AC, Crabtree C, Lee N, Elschot S. + **Nonlinear evolution, propagation, electron-trapping, and damping effects of ion-acoustic solitons using fully kinetic PIC simulations**. + Phys. Plasmas **32** 022103, 2025 + `DOI:10.1063/5.0249525 `__ + #. Jambunathan R, Jones H, Corrales L, Klion H, Roward ME, Myers A, Zhang W, Vay J-L. **Application of mesh refinement to relativistic magnetic reconnection**. Physics of Plasmas ***32*** 1, 2025 From 346bebdb27928c1acad892c4ffee5251f3c9d5f5 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 21 Feb 2025 00:57:50 +0100 Subject: [PATCH 248/278] WarpX class: remove unused methods GetMacroscopicProperties and GetHybridPICModel (#5640) The methods `GetMacroscopicProperties` and `GetHybridPICModel` of the WarpX class are currently unused. We may consider to remove them. --- Source/WarpX.H | 2 -- 1 file changed, 2 deletions(-) diff --git a/Source/WarpX.H b/Source/WarpX.H index 00ab9080751..4f6024d426d 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -157,9 +157,7 @@ public: MultiParticleContainer& GetPartContainer () { return *mypc; } MultiFluidContainer& GetFluidContainer () { return *myfl; } - MacroscopicProperties& GetMacroscopicProperties () { return *m_macroscopic_properties; } ElectrostaticSolver& GetElectrostaticSolver () {return *m_electrostatic_solver;} - HybridPICModel& GetHybridPICModel () { return *m_hybrid_pic_model; } [[nodiscard]] HybridPICModel * get_pointer_HybridPICModel () const { return m_hybrid_pic_model.get(); } MultiDiagnostics& GetMultiDiags () {return *multi_diags;} ParticleBoundaryBuffer& GetParticleBoundaryBuffer () { return *m_particle_boundary_buffer; } From 826aa1b99c4d2f53290194f336b779ce6e131c12 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 21 Feb 2025 14:58:26 -0800 Subject: [PATCH 249/278] Fix: `AddPlasmaFlux` w/ RT Components (#5694) Temporary particle container did not use the same component names as the created particles yet. Follow-up #5481 --- Source/Particles/PhysicalParticleContainer.cpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Source/Particles/PhysicalParticleContainer.cpp b/Source/Particles/PhysicalParticleContainer.cpp index 88c9a2273fd..335d7370e75 100644 --- a/Source/Particles/PhysicalParticleContainer.cpp +++ b/Source/Particles/PhysicalParticleContainer.cpp @@ -1365,8 +1365,8 @@ PhysicalParticleContainer::AddPlasmaFlux (PlasmaInjector const& plasma_injector, // we will then call Redistribute on this new container and finally // add the new particles to the original container. PhysicalParticleContainer tmp_pc(&WarpX::GetInstance()); - for (int ic = 0; ic < NumRuntimeRealComps(); ++ic) { tmp_pc.AddRealComp(false); } - for (int ic = 0; ic < NumRuntimeIntComps(); ++ic) { tmp_pc.AddIntComp(false); } + for (int ic = 0; ic < NumRuntimeRealComps(); ++ic) { tmp_pc.AddRealComp(GetRealSoANames()[ic + NArrayReal], false); } + for (int ic = 0; ic < NumRuntimeIntComps(); ++ic) { tmp_pc.AddIntComp(GetIntSoANames()[ic + NArrayInt], false); } tmp_pc.defineAllParticleTiles(); Box fine_injection_box; From 2e0f9ab6aa903cea5a8d6ed4d93f0742a7e71eee Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Fri, 21 Feb 2025 15:16:12 -0800 Subject: [PATCH 250/278] Doc: Helion Credits (#5688) Scientists from Helion have and continue to contribute major features and bug fixes to WarpX. Add them to institutional acknowledgements. --- Docs/source/acknowledge_us.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Docs/source/acknowledge_us.rst b/Docs/source/acknowledge_us.rst index 8c9b8dcf15c..f648e9c1bbd 100644 --- a/Docs/source/acknowledge_us.rst +++ b/Docs/source/acknowledge_us.rst @@ -23,7 +23,7 @@ Please add the following sentence to your publications, it helps contributors ke **Plain text:** - This research used the open-source particle-in-cell code WarpX https://github.com/ECP-WarpX/WarpX. Primary WarpX contributors are with LBNL, LLNL, CEA-LIDYL, SLAC, DESY, CERN, and TAE Technologies. We acknowledge all WarpX contributors. + This research used the open-source particle-in-cell code WarpX https://github.com/ECP-WarpX/WarpX. Primary WarpX contributors are with LBNL, LLNL, CEA-LIDYL, SLAC, DESY, CERN, Helion Energy, and TAE Technologies. We acknowledge all WarpX contributors. **LaTeX:** @@ -31,7 +31,7 @@ Please add the following sentence to your publications, it helps contributors ke \usepackage{hyperref} This research used the open-source particle-in-cell code WarpX \url{https://github.com/ECP-WarpX/WarpX}. - Primary WarpX contributors are with LBNL, LLNL, CEA-LIDYL, SLAC, DESY, CERN, and TAE Technologies. + Primary WarpX contributors are with LBNL, LLNL, CEA-LIDYL, SLAC, DESY, CERN, Helion Energy, and TAE Technologies. We acknowledge all WarpX contributors. .. _acknowledge_warpx_ref: From 7e41abd3a2be44e872f8789e27567301c6874d5c Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 24 Feb 2025 09:13:42 -0800 Subject: [PATCH 251/278] Doc: Frontier Update (Feb 18, 2025) (#5695) There was a major system upgrade to Frontier this week. This updates to the latest modules. - https://docs.olcf.ornl.gov/systems/frontier_user_guide.html#id17 - https://docs.olcf.ornl.gov/software/software-news.html#frontier-system-software-update-february-18-2025 Tests run: - [x] compile-time - [x] runtime --- .../machines/frontier-olcf/frontier_warpx.profile.example | 6 +++--- Tools/machines/frontier-olcf/install_dependencies.sh | 7 ++----- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/Tools/machines/frontier-olcf/frontier_warpx.profile.example b/Tools/machines/frontier-olcf/frontier_warpx.profile.example index b51946ce832..89461cc3e8b 100644 --- a/Tools/machines/frontier-olcf/frontier_warpx.profile.example +++ b/Tools/machines/frontier-olcf/frontier_warpx.profile.example @@ -8,9 +8,9 @@ if [ -z ${proj-} ]; then echo "WARNING: The 'proj' variable is not yet set in yo # required dependencies module load cmake/3.27.9 module load craype-accel-amd-gfx90a -module load rocm/5.7.1 -module load cray-mpich/8.1.28 -module load cce/17.0.0 # must be loaded after rocm +module load rocm/6.2.4 +module load cray-mpich/8.1.31 +module load cce/18.0.1 # must be loaded after rocm # https://docs.olcf.ornl.gov/systems/frontier_user_guide.html#compatible-compiler-rocm-toolchain-versions # Fix for OpenMP Runtime (OLCFHELP-21543) diff --git a/Tools/machines/frontier-olcf/install_dependencies.sh b/Tools/machines/frontier-olcf/install_dependencies.sh index 8e8565788bc..17b4955e7c4 100755 --- a/Tools/machines/frontier-olcf/install_dependencies.sh +++ b/Tools/machines/frontier-olcf/install_dependencies.sh @@ -87,9 +87,7 @@ python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel python3 -m pip install --upgrade setuptools -# cupy needs an older Cython -# https://github.com/cupy/cupy/issues/4610 -python3 -m pip install --upgrade "cython<3.0" +python3 -m pip install --upgrade "cython>=3.0" # cupy for ROCm # https://docs.cupy.dev/en/stable/install.html#building-cupy-for-rocm-from-source # https://github.com/cupy/cupy/issues/7830 @@ -97,8 +95,7 @@ CC=cc CXX=CC \ CUPY_INSTALL_USE_HIP=1 \ ROCM_HOME=${ROCM_PATH} \ HCC_AMDGPU_TARGET=${AMREX_AMD_ARCH} \ - python3 -m pip install -v cupy -python3 -m pip install --upgrade "cython>=3.0" # for latest mpi4py and everything else + python3 -m pip install -v git+https://github.com/cupy/cupy.git@e669b994f976565bf2da4b1f82de51e10b58fbe1 python3 -m pip install --upgrade numpy python3 -m pip install --upgrade h5py python3 -m pip install --upgrade pandas From 46ae85592be3a89e84fe5099f5a03b1b97f2db94 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Mon, 24 Feb 2025 10:50:39 -0800 Subject: [PATCH 252/278] AMReX/pyAMReX/PICSAR: Weekly Update (#5699) Weekly update to latest AMReX. Weekly update to latest pyAMReX (no changes). Weekly update to latest PICSAR (no changes). ```console ./Tools/Release/updateAMReX.py ./Tools/Release/updatepyAMReX.py ./Tools/Release/updatePICSAR.py ``` Signed-off-by: Axel Huebl --- .github/workflows/cuda.yml | 2 +- cmake/dependencies/AMReX.cmake | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 029d1e4db89..30e4adf1b51 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -127,7 +127,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach b364becad939a490bca4e7f8b23f7392c558a311 && cd - + cd ../amrex && git checkout --detach 044d52f7d309e340939d7cae449fd83209da317f && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 7a249cd6c5b..3c389a0a01b 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -294,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "b364becad939a490bca4e7f8b23f7392c558a311" +set(WarpX_amrex_branch "044d52f7d309e340939d7cae449fd83209da317f" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") From 263fd5841c513c3bfe4cbd16cdd06e86ec2fd744 Mon Sep 17 00:00:00 2001 From: Yifan Wu Date: Tue, 25 Feb 2025 09:45:36 +0800 Subject: [PATCH 253/278] PICMI Diagnostics settings: added handling method for ParticleHistogram2D in Reduced Diagnostics (#5689) This PR is to implement the ParticleHistogram2D diagnostics to PICMI interface. As mentioned in #5664, ParticleHistogram2D is available from the input file settings, but not yet for PICMI python input. This PR adds the _handle_particle_histogram2d function, which is similar as _handle_particle_histogram function, into the ReducedDiagnostic class. This should make ParticleHistogram2D diagnostic functional from PICMI input. To create a ParticleHistogram2D diagnostics in PICMI input file, one needs to set these necessary parameters : hist2d = picmi.ReducedDiagnostic(diag_type='ParticleHistogram2D', ...), with species, histogram_function_abs, histogram_function_ord, bin_min_abs, bin_max_abs, bin_min_ord, bin_max_ord, bin_number_abs, bin_number_ord, value_function (optional, I set it to "1" by default) and filter_function (optional, default None) specified in kwargs. Finally, use the add_diagnostic(hist2d) to add it into simulation object, and a ParticleHistogram2D diagnostic will be recorded in simulation. --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Python/pywarpx/picmi.py | 72 +++++++++++++++++++++++++++++++++++++++-- 1 file changed, 70 insertions(+), 2 deletions(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index f660570ca7c..0d7ffd91b5d 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -4061,7 +4061,7 @@ class ReducedDiagnostic(picmistandard.base._ClassWithInit, WarpXDiagnosticBase): species: species instance The name of the species for which to calculate the diagnostic, required for - diagnostic types 'BeamRelevant', 'ParticleHistogram', and 'ParticleExtrema' + diagnostic types 'BeamRelevant', 'ParticleHistogram', 'ParticleHistogram2D', and 'ParticleExtrema' bin_number: integer For diagnostic type 'ParticleHistogram', the number of bins used for the histogram @@ -4079,7 +4079,34 @@ class ReducedDiagnostic(picmistandard.base._ClassWithInit, WarpXDiagnosticBase): For diagnostic type 'ParticleHistogram', the function evaluated to produce the histogram data filter_function: string, optional - For diagnostic type 'ParticleHistogram', the function to filter whether particles are included in the histogram + For diagnostic types 'ParticleHistogram' and 'ParticleHistogram2D', the function to filter whether particles are included in the histogram + + bin_max_abs: float + For diagnostic type 'ParticleHistogram2D', the maximum value of the bins for the abscissa axis. + + bin_max_ord: float + For diagnostic type 'ParticleHistogram2D', the maximum value of the bins for the ordinate axis. + + bin_min_abs: float + For diagnostic type 'ParticleHistogram2D', the minimum value of the bins for the abscissa axis. + + bin_min_ord: float + For diagnostic type 'ParticleHistogram2D', the minimum value of the bins for the ordinate axis. + + bin_number_abs: integer + For diagnostic type 'ParticleHistogram2D', the number of bins used for the histogram for the abscissa axis. + + bin_number_ord: integer + For diagnostic type 'ParticleHistogram2D', the number of bins used for the histogram for the ordinate axis. + + histogram_function_abs: string + For diagnostic type 'ParticleHistogram2D', the histogram function for the abscissa axis. + + histogram_function_ord: string + For diagnostic type 'ParticleHistogram2D', the histogram function for the ordinate axis. + + value_function: string, optional + For diagnostic type 'ParticleHistogram2D', the expression for the weight used to calculate the histogram. reduced_function: string For diagnostic type 'FieldReduction', the function of the fields to evaluate @@ -4162,6 +4189,7 @@ def __init__( self._species_reduced_diagnostics = [ "BeamRelevant", "ParticleHistogram", + "ParticleHistogram2D", "ParticleExtrema", ] @@ -4172,6 +4200,8 @@ def __init__( self.species = species.name if self.type == "ParticleHistogram": kw = self._handle_particle_histogram(**kw) + elif self.type == "ParticleHistogram2D": + kw = self._handle_particle_histogram2d(**kw) elif self.type == "FieldProbe": kw = self._handle_field_probe(**kw) elif self.type == "FieldReduction": @@ -4249,6 +4279,44 @@ def _handle_particle_histogram(self, **kw): return kw + def _handle_particle_histogram2d(self, **kw): + self.bin_number_abs = kw.pop("bin_number_abs") + self.bin_number_ord = kw.pop("bin_number_ord") + self.bin_min_abs = kw.pop("bin_min_abs") + self.bin_max_abs = kw.pop("bin_max_abs") + self.bin_min_ord = kw.pop("bin_min_ord") + self.bin_max_ord = kw.pop("bin_max_ord") + histogram_function_abs = kw.pop("histogram_function_abs") + histogram_function_ord = kw.pop("histogram_function_ord") + self.__setattr__( + "histogram_function_abs(t,x,y,z,ux,uy,uz,w)", histogram_function_abs + ) + self.__setattr__( + "histogram_function_ord(t,x,y,z,ux,uy,uz,w)", histogram_function_ord + ) + + filter_function = kw.pop("filter_function", None) + value_function = kw.pop("value_function", None) + + self.__setattr__("filter_function(t,x,y,z,ux,uy,uz,w)", filter_function) + self.__setattr__("value_function(t,x,y,z,ux,uy,uz,w)", value_function) + + # Check the function expressions for constants + for k in list(kw.keys()): + if any( + re.search(r"\b%s\b" % k, expr) + for expr in [ + histogram_function_abs, + histogram_function_ord, + filter_function, + value_function, + ] + if expr is not None + ): + self.user_defined_kw[k] = kw.pop(k) + + return kw + def _handle_field_reduction(self, **kw): self.reduction_type = kw.pop("reduction_type") reduced_function = kw.pop("reduced_function") From 608b0597773063fffd1c9ab9cf228cd09578521b Mon Sep 17 00:00:00 2001 From: "S. Eric Clark" <25495882+clarkse@users.noreply.github.com> Date: Mon, 24 Feb 2025 19:50:06 -0800 Subject: [PATCH 254/278] =?UTF-8?q?Fixing=20bug=20that=20only=20applied=20?= =?UTF-8?q?last=20field=20in=20the=20list=20of=20external=20field=E2=80=A6?= =?UTF-8?q?=20(#5690)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit …s. Changed to add all fields. This bug cropped up during code review and refactoring. I have re-implemented the field accumulations for multiply defined external fields. This was not caught by CI since the cylinder compression test only has a single uniform compression field. --------- Signed-off-by: S. Eric Clark <25495882+clarkse@users.noreply.github.com> --- ...d_ohm_solver_cylinder_compression_picmi.py | 14 +++++++--- ...z_ohm_solver_cylinder_compression_picmi.py | 14 +++++++--- ...ohm_solver_cylinder_compression_picmi.json | 28 +++++++++---------- ...ohm_solver_cylinder_compression_picmi.json | 26 ++++++++--------- .../HybridPICModel/ExternalVectorPotential.H | 2 +- .../ExternalVectorPotential.cpp | 24 ++++++++++------ 6 files changed, 64 insertions(+), 44 deletions(-) diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py index 4f05fd15d83..a871bc97108 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_3d_ohm_solver_cylinder_compression_picmi.py @@ -95,8 +95,8 @@ def __init__(self, test, verbose): RM = np.sqrt(XM**2 + YM**2) - Ax_data = -0.5 * YM * self.dB - Ay_data = 0.5 * XM * self.dB + Ax_data = -0.25 * YM * self.dB + Ay_data = 0.25 * XM * self.dB Az_data = np.zeros_like(RM) # Write vector potential to file to exercise field loading via OpenPMD @@ -261,11 +261,17 @@ def setup_run(self): ####################################################################### # External Field definition. Sigmoid starting around 2.5 us A_ext = { - "uniform": { + "uniform_file": { "read_from_file": True, "path": "Afield.h5", "A_time_external_function": "1/(1+exp(5*(1-(t-t0_ramp)*sqrt(2)/tau_ramp)))", - } + }, + "uniform_analytical": { + "Ax_external_function": f"-0.25*y*{self.dB}", + "Ay_external_function": f"0.25*x*{self.dB}", + "Az_external_function": "0", + "A_time_external_function": "1/(1+exp(5*(1-(t-t0_ramp)*sqrt(2)/tau_ramp)))", + }, } self.solver = picmi.HybridPICSolver( diff --git a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py index 8c65f88ae79..17fc6d68144 100644 --- a/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py +++ b/Examples/Tests/ohm_solver_cylinder_compression/inputs_test_rz_ohm_solver_cylinder_compression_picmi.py @@ -93,8 +93,8 @@ def __init__(self, test, verbose): Ar_data = np.zeros_like(RM) Az_data = np.zeros_like(RM) - # Zero padded outside of domain - At_data = 0.5 * RM * self.dB + # Only include half of the compression field here + At_data = 0.25 * RM * self.dB # Write vector potential to file to exercise field loading via series = io.Series("Afield.h5", io.Access.create) @@ -255,11 +255,17 @@ def setup_run(self): ####################################################################### # External Field definition. Sigmoid starting around 2.5 us A_ext = { - "uniform": { + "uniform_file": { "read_from_file": True, "path": "Afield.h5", "A_time_external_function": "1/(1+exp(5*(1-(t-t0_ramp)*sqrt(2)/tau_ramp)))", - } + }, + "uniform_analytical": { + "Ax_external_function": f"-0.25*y*{self.dB}", + "Ay_external_function": f"0.25*x*{self.dB}", + "Az_external_function": "0", + "A_time_external_function": "1/(1+exp(5*(1-(t-t0_ramp)*sqrt(2)/tau_ramp)))", + }, } self.solver = picmi.HybridPICSolver( diff --git a/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json index 6cde3a9450e..c61354de5a3 100644 --- a/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_3d_ohm_solver_cylinder_compression_picmi.json @@ -1,20 +1,20 @@ { "lev=0": { - "Bx": 0.5334253070691776, - "By": 0.5318560243634998, - "Bz": 2252.108905639938, - "Ex": 10509838.331420777, - "Ey": 10512676.798857061, - "Ez": 8848.113963901804, - "rho": 384112.2912140536 + "Bx": 0.5334251406746063, + "By": 0.5318559382056761, + "Bz": 2252.108858363565, + "Ex": 10509826.248254433, + "Ey": 10512665.210439455, + "Ez": 8848.110632517377, + "rho": 384112.2912140535 }, "ions": { - "particle_momentum_x": 2.161294367543349e-16, - "particle_momentum_y": 2.161870747294985e-16, - "particle_momentum_z": 2.0513400435256855e-16, - "particle_position_x": 769864.202585846, - "particle_position_y": 769908.6569812088, - "particle_position_z": 620721.1900338201, + "particle_momentum_x": 2.1612944051250247e-16, + "particle_momentum_y": 2.1618707843260381e-16, + "particle_momentum_z": 2.0513400435259629e-16, + "particle_position_x": 769864.2025705199, + "particle_position_y": 769908.6569665589, + "particle_position_z": 620721.1900338195, "particle_weight": 1.008292384042714e+19 } -} \ No newline at end of file +} diff --git a/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json index 6fd2ca04fce..ae86b3d4168 100644 --- a/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_rz_ohm_solver_cylinder_compression_picmi.json @@ -1,20 +1,20 @@ { "lev=0": { - "Br": 0.01190012639573578, - "Bt": 0.011313481779415917, - "Bz": 11.684908684984164, - "Er": 154581.58512851578, - "Et": 4798.276941148807, - "Ez": 193.22344271401872, - "rho": 7968.182346905438 + "Br": 0.011900125915334049, + "Bt": 0.011313482081775999, + "Bz": 11.684907956225278, + "Er": 154581.64325434464, + "Et": 4797.794963571249, + "Ez": 193.22336541793413, + "rho": 7968.182346878508 }, "ions": { - "particle_momentum_x": 3.1125151786241107e-18, - "particle_momentum_y": 3.119385993047207e-18, - "particle_momentum_z": 3.0289560038617916e-18, - "particle_position_x": 13628.662686419664, - "particle_position_y": 2285.6952310457755, - "particle_theta": 115055.48935725243, + "particle_momentum_x": 3.112515238421571e-18, + "particle_momentum_y": 3.1193860531312334e-18, + "particle_momentum_z": 3.0289560038609835e-18, + "particle_position_x": 13628.662686094893, + "particle_position_y": 2285.6952310456554, + "particle_theta": 115055.48935714104, "particle_weight": 2.525423582445981e+18 } } \ No newline at end of file diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H index 632ff2bd785..a5088e31b9d 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.H @@ -86,7 +86,7 @@ public: void CalculateExternalCurlA (std::string& coil_name); AMREX_FORCE_INLINE - void PopulateExternalFieldFromVectorPotential ( + void AddExternalFieldFromVectorPotential ( ablastr::fields::VectorField const& dstField, amrex::Real scale_factor, ablastr::fields::VectorField const& srcField, diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp index 50a62335b57..22b5a7e2c3f 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/ExternalVectorPotential.cpp @@ -46,7 +46,7 @@ ExternalVectorPotential::ReadParameters () m_A_external.resize(m_nFields); m_A_ext_time_function.resize(m_nFields); - for (std::string & field_time : m_A_ext_time_function) {field_time = "1.0"; } + for (std::string & field_time : m_A_ext_time_function) { field_time = "1.0"; } m_A_external_time_parser.resize(m_nFields); m_A_time_scale.resize(m_nFields); @@ -272,7 +272,7 @@ ExternalVectorPotential::CalculateExternalCurlA (std::string& coil_name) AMREX_FORCE_INLINE void -ExternalVectorPotential::PopulateExternalFieldFromVectorPotential ( +ExternalVectorPotential::AddExternalFieldFromVectorPotential ( ablastr::fields::VectorField const& dstField, amrex::Real scale_factor, ablastr::fields::VectorField const& srcField, @@ -313,21 +313,21 @@ ExternalVectorPotential::PopulateExternalFieldFromVectorPotential ( // Skip field update in the embedded boundaries if (update_Fx_arr && update_Fx_arr(i, j, k) == 0) { return; } - Fx(i,j,k) = scale_factor * Sx(i,j,k); + Fx(i,j,k) += scale_factor * Sx(i,j,k); }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ // Skip field update in the embedded boundaries if (update_Fy_arr && update_Fy_arr(i, j, k) == 0) { return; } - Fy(i,j,k) = scale_factor * Sy(i,j,k); + Fy(i,j,k) += scale_factor * Sy(i,j,k); }, [=] AMREX_GPU_DEVICE (int i, int j, int k){ // Skip field update in the embedded boundaries if (update_Fz_arr && update_Fz_arr(i, j, k) == 0) { return; } - Fz(i,j,k) = scale_factor * Sz(i,j,k); + Fz(i,j,k) += scale_factor * Sz(i,j,k); } ); } @@ -339,12 +339,20 @@ ExternalVectorPotential::UpdateHybridExternalFields (const amrex::Real t, const using ablastr::fields::Direction; auto& warpx = WarpX::GetInstance(); - ablastr::fields::MultiLevelVectorField B_ext = warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_B_fp_external, warpx.finestLevel()); ablastr::fields::MultiLevelVectorField E_ext = warpx.m_fields.get_mr_levels_alldirs(FieldType::hybrid_E_fp_external, warpx.finestLevel()); + // Zero E and B external fields prior to accumulating external fields + for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { + for (int idir = 0; idir < 3; ++idir) { + B_ext[lev][Direction{idir}]->setVal(0.0_rt); + E_ext[lev][Direction{idir}]->setVal(0.0_rt); + } + } + + // Iterate over external fields and add together with individual time functions. for (int i = 0; i < m_nFields; ++i) { const std::string Aext_field = m_field_names[i] + std::string{"_Aext"}; const std::string curlAext_field = m_field_names[i] + std::string{"_curlAext"}; @@ -363,8 +371,8 @@ ExternalVectorPotential::UpdateHybridExternalFields (const amrex::Real t, const warpx.m_fields.get_mr_levels_alldirs(curlAext_field, warpx.finestLevel()); for (int lev = 0; lev <= warpx.finestLevel(); ++lev) { - PopulateExternalFieldFromVectorPotential(E_ext[lev], scale_factor_E, A_ext[lev], warpx.GetEBUpdateEFlag()[lev]); - PopulateExternalFieldFromVectorPotential(B_ext[lev], scale_factor_B, curlA_ext[lev], warpx.GetEBUpdateBFlag()[lev]); + AddExternalFieldFromVectorPotential(E_ext[lev], scale_factor_E, A_ext[lev], warpx.GetEBUpdateEFlag()[lev]); + AddExternalFieldFromVectorPotential(B_ext[lev], scale_factor_B, curlA_ext[lev], warpx.GetEBUpdateBFlag()[lev]); for (int idir = 0; idir < 3; ++idir) { E_ext[lev][Direction{idir}]->FillBoundary(warpx.Geom(lev).periodicity()); From 85482ffac9caa3e4f91e45b586d754b7423f9203 Mon Sep 17 00:00:00 2001 From: mattobin <114018255+mattobin@users.noreply.github.com> Date: Tue, 25 Feb 2025 01:32:57 -0500 Subject: [PATCH 255/278] Adding hyper-resistivity expression, function of rho, B (#5692) Adding capability to define the hyper-resistivity as a function of the charge density and magnetic field strength. --------- Co-authored-by: Matt Tobin --- Python/pywarpx/picmi.py | 7 +- .../HybridPICModel/HybridPICModel.H | 6 +- .../HybridPICModel/HybridPICModel.cpp | 11 ++- .../HybridPICSolveE.cpp | 92 +++++++++++++++++-- 4 files changed, 102 insertions(+), 14 deletions(-) diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 0d7ffd91b5d..9606fc70136 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -1951,7 +1951,12 @@ def solver_initialize_inputs(self): self.plasma_resistivity, self.mangle_dict ), ) - pywarpx.hybridpicmodel.plasma_hyper_resistivity = self.plasma_hyper_resistivity + pywarpx.hybridpicmodel.__setattr__( + "plasma_hyper_resistivity(rho,B)", + pywarpx.my_constants.mangle_expression( + self.plasma_hyper_resistivity, self.mangle_dict + ), + ) pywarpx.hybridpicmodel.substeps = self.substeps pywarpx.hybridpicmodel.holmstrom_vacuum_region = self.holmstrom_vacuum_region pywarpx.hybridpicmodel.__setattr__( diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H index 2a489e1c806..fabba056733 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.H @@ -194,7 +194,11 @@ public: bool m_resistivity_has_J_dependence = false; /** Plasma hyper-resisitivity */ - amrex::Real m_eta_h = 0.0; + std::string m_eta_h_expression = "0.0"; + std::unique_ptr m_hyper_resistivity_parser; + amrex::ParserExecutor<2> m_eta_h; + bool m_include_hyper_resistivity_term = false; + bool m_hyper_resistivity_has_B_dependence = false; /** External current */ std::string m_Jx_ext_grid_function = "0.0"; diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp index 3e5c04e9794..87b5be3c0b7 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICModel/HybridPICModel.cpp @@ -48,9 +48,9 @@ void HybridPICModel::ReadParameters () } pp_hybrid.query("plasma_resistivity(rho,J)", m_eta_expression); - utils::parser::queryWithParser(pp_hybrid, "n_floor", m_n_floor); + pp_hybrid.query("plasma_hyper_resistivity(rho,B)", m_eta_h_expression); - utils::parser::queryWithParser(pp_hybrid, "plasma_hyper_resistivity", m_eta_h); + utils::parser::queryWithParser(pp_hybrid, "n_floor", m_n_floor); // convert electron temperature from eV to J m_elec_temp *= PhysConst::q_e; @@ -160,6 +160,13 @@ void HybridPICModel::InitData () const std::set resistivity_symbols = m_resistivity_parser->symbols(); m_resistivity_has_J_dependence += resistivity_symbols.count("J"); + m_include_hyper_resistivity_term = (m_eta_h_expression != "0.0"); + m_hyper_resistivity_parser = std::make_unique( + utils::parser::makeParser(m_eta_h_expression, {"rho","B"})); + m_eta_h = m_hyper_resistivity_parser->compile<2>(); + const std::set hyper_resistivity_symbols = m_hyper_resistivity_parser->symbols(); + m_hyper_resistivity_has_B_dependence += hyper_resistivity_symbols.count("B"); + m_J_external_parser[0] = std::make_unique( utils::parser::makeParser(m_Jx_ext_grid_function,{"x","y","z","t"})); m_J_external_parser[1] = std::make_unique( diff --git a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp index f46b2f73e41..e3f46c686ea 100644 --- a/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp +++ b/Source/FieldSolver/FiniteDifferenceSolver/HybridPICSolveE.cpp @@ -428,8 +428,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( const auto eta_h = hybrid_model->m_eta_h; const auto rho_floor = hybrid_model->m_n_floor * PhysConst::q_e; const auto resistivity_has_J_dependence = hybrid_model->m_resistivity_has_J_dependence; - - const bool include_hyper_resistivity_term = (eta_h > 0.0) && solve_for_Faraday; + const auto hyper_resistivity_has_B_dependence = hybrid_model->m_hyper_resistivity_has_B_dependence; + const bool include_hyper_resistivity_term = hybrid_model->m_include_hyper_resistivity_term; const bool include_external_fields = hybrid_model->m_add_external_fields; @@ -574,6 +574,9 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Array4 const& enE = enE_nodal_mf.const_array(mfi); Array4 const& rho = rhofield.const_array(mfi); Array4 const& Pe = Pefield.const_array(mfi); + Array4 const& Br = Bfield[0]->array(mfi); + Array4 const& Bt = Bfield[1]->array(mfi); + Array4 const& Bz = Bfield[2]->array(mfi); // Extract structures indicating where the fields // should be updated, given the position of the embedded boundaries @@ -649,11 +652,22 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Er(i, j, 0) += eta(rho_val, jtot_val) * Jr(i, j, 0); if (include_hyper_resistivity_term) { + + // Interpolate B field to appropriate staggering to match E field + Real btot_val = 0._rt; + if (hyper_resistivity_has_B_dependence) { + const Real br_val = Interp(Br, Br_stag, Er_stag, coarsen, i, j, 0, 0); + const Real bt_val = Interp(Bt, Bt_stag, Er_stag, coarsen, i, j, 0, 0); + const Real bz_val = Interp(Bz, Bz_stag, Er_stag, coarsen, i, j, 0, 0); + btot_val = std::sqrt(br_val*br_val + bt_val*bt_val + bz_val*bz_val); + } + // r on cell-centered point (Jr is cell-centered in r) const Real r = rmin + (i + 0.5_rt)*dr; auto nabla2Jr = T_Algo::Dr_rDr_over_r(Jr, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) + T_Algo::Dzz(Jr, coefs_z, n_coefs_z, i, j, 0, 0) - Jr(i, j, 0)/(r*r); - Er(i, j, 0) -= eta_h * nabla2Jr; + + Er(i, j, 0) -= eta_h(rho_val, btot_val) * nabla2Jr; } } @@ -709,9 +723,20 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Et(i, j, 0) += eta(rho_val, jtot_val) * Jt(i, j, 0); if (include_hyper_resistivity_term) { + + // Interpolate B field to appropriate staggering to match E field + Real btot_val = 0._rt; + if (hyper_resistivity_has_B_dependence) { + const Real br_val = Interp(Br, Br_stag, Et_stag, coarsen, i, j, 0, 0); + const Real bt_val = Interp(Bt, Bt_stag, Et_stag, coarsen, i, j, 0, 0); + const Real bz_val = Interp(Bz, Bz_stag, Et_stag, coarsen, i, j, 0, 0); + btot_val = std::sqrt(br_val*br_val + bt_val*bt_val + bz_val*bz_val); + } + auto nabla2Jt = T_Algo::Dr_rDr_over_r(Jt, r, dr, coefs_r, n_coefs_r, i, j, 0, 0) + T_Algo::Dzz(Jt, coefs_z, n_coefs_z, i, j, 0, 0) - Jt(i, j, 0)/(r*r); - Et(i, j, 0) -= eta_h * nabla2Jt; + + Et(i, j, 0) -= eta_h(rho_val, btot_val) * nabla2Jt; } } @@ -761,6 +786,16 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( Ez(i, j, 0) += eta(rho_val, jtot_val) * Jz(i, j, 0); if (include_hyper_resistivity_term) { + + // Interpolate B field to appropriate staggering to match E field + Real btot_val = 0._rt; + if (hyper_resistivity_has_B_dependence) { + const Real br_val = Interp(Br, Br_stag, Ez_stag, coarsen, i, j, 0, 0); + const Real bt_val = Interp(Bt, Bt_stag, Ez_stag, coarsen, i, j, 0, 0); + const Real bz_val = Interp(Bz, Bz_stag, Ez_stag, coarsen, i, j, 0, 0); + btot_val = std::sqrt(br_val*br_val + bt_val*bt_val + bz_val*bz_val); + } + // r on nodal point (Jz is nodal in r) const Real r = rmin + i*dr; @@ -768,7 +803,8 @@ void FiniteDifferenceSolver::HybridPICSolveECylindrical ( if (r > 0.5_rt*dr) { nabla2Jz += T_Algo::Dr_rDr_over_r(Jz, r, dr, coefs_r, n_coefs_r, i, j, 0, 0); } - Ez(i, j, 0) -= eta_h * nabla2Jz; + + Ez(i, j, 0) -= eta_h(rho_val, btot_val) * nabla2Jz; } } @@ -811,8 +847,8 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( const auto eta_h = hybrid_model->m_eta_h; const auto rho_floor = hybrid_model->m_n_floor * PhysConst::q_e; const auto resistivity_has_J_dependence = hybrid_model->m_resistivity_has_J_dependence; - - const bool include_hyper_resistivity_term = (eta_h > 0.) && solve_for_Faraday; + const auto hyper_resistivity_has_B_dependence = hybrid_model->m_hyper_resistivity_has_B_dependence; + const bool include_hyper_resistivity_term = hybrid_model->m_include_hyper_resistivity_term; const bool include_external_fields = hybrid_model->m_add_external_fields; @@ -957,6 +993,9 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Array4 const& enE = enE_nodal_mf.const_array(mfi); Array4 const& rho = rhofield.const_array(mfi); Array4 const& Pe = Pefield.array(mfi); + Array4 const& Bx = Bfield[0]->array(mfi); + Array4 const& By = Bfield[1]->array(mfi); + Array4 const& Bz = Bfield[2]->array(mfi); // Extract structures indicating where the fields // should be updated, given the position of the embedded boundaries @@ -1030,10 +1069,21 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ex(i, j, k) += eta(rho_val, jtot_val) * Jx(i, j, k); if (include_hyper_resistivity_term) { + + // Interpolate B field to appropriate staggering to match E field + Real btot_val = 0._rt; + if (hyper_resistivity_has_B_dependence) { + const Real bx_val = Interp(Bx, Bx_stag, Ex_stag, coarsen, i, j, k, 0); + const Real by_val = Interp(By, By_stag, Ex_stag, coarsen, i, j, k, 0); + const Real bz_val = Interp(Bz, Bz_stag, Ex_stag, coarsen, i, j, k, 0); + btot_val = std::sqrt(bx_val*bx_val + by_val*by_val + bz_val*bz_val); + } + auto nabla2Jx = T_Algo::Dxx(Jx, coefs_x, n_coefs_x, i, j, k) + T_Algo::Dyy(Jx, coefs_y, n_coefs_y, i, j, k) + T_Algo::Dzz(Jx, coefs_z, n_coefs_z, i, j, k); - Ex(i, j, k) -= eta_h * nabla2Jx; + + Ex(i, j, k) -= eta_h(rho_val, btot_val) * nabla2Jx; } } @@ -1083,10 +1133,21 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ey(i, j, k) += eta(rho_val, jtot_val) * Jy(i, j, k); if (include_hyper_resistivity_term) { + + // Interpolate B field to appropriate staggering to match E field + Real btot_val = 0._rt; + if (hyper_resistivity_has_B_dependence) { + const Real bx_val = Interp(Bx, Bx_stag, Ey_stag, coarsen, i, j, k, 0); + const Real by_val = Interp(By, By_stag, Ey_stag, coarsen, i, j, k, 0); + const Real bz_val = Interp(Bz, Bz_stag, Ey_stag, coarsen, i, j, k, 0); + btot_val = std::sqrt(bx_val*bx_val + by_val*by_val + bz_val*bz_val); + } + auto nabla2Jy = T_Algo::Dxx(Jy, coefs_x, n_coefs_x, i, j, k) + T_Algo::Dyy(Jy, coefs_y, n_coefs_y, i, j, k) + T_Algo::Dzz(Jy, coefs_z, n_coefs_z, i, j, k); - Ey(i, j, k) -= eta_h * nabla2Jy; + + Ey(i, j, k) -= eta_h(rho_val, btot_val) * nabla2Jy; } } @@ -1136,10 +1197,21 @@ void FiniteDifferenceSolver::HybridPICSolveECartesian ( Ez(i, j, k) += eta(rho_val, jtot_val) * Jz(i, j, k); if (include_hyper_resistivity_term) { + + // Interpolate B field to appropriate staggering to match E field + Real btot_val = 0._rt; + if (hyper_resistivity_has_B_dependence) { + const Real bx_val = Interp(Bx, Bx_stag, Ez_stag, coarsen, i, j, k, 0); + const Real by_val = Interp(By, By_stag, Ez_stag, coarsen, i, j, k, 0); + const Real bz_val = Interp(Bz, Bz_stag, Ez_stag, coarsen, i, j, k, 0); + btot_val = std::sqrt(bx_val*bx_val + by_val*by_val + bz_val*bz_val); + } + auto nabla2Jz = T_Algo::Dxx(Jz, coefs_x, n_coefs_x, i, j, k) + T_Algo::Dyy(Jz, coefs_y, n_coefs_y, i, j, k) + T_Algo::Dzz(Jz, coefs_z, n_coefs_z, i, j, k); - Ez(i, j, k) -= eta_h * nabla2Jz; + + Ez(i, j, k) -= eta_h(rho_val, btot_val) * nabla2Jz; } } From 6969114e964ddd2ad7b8b2c9c512c474cd340aee Mon Sep 17 00:00:00 2001 From: David Grote Date: Tue, 25 Feb 2025 06:12:09 -0800 Subject: [PATCH 256/278] [WIP]Add diagnostic file output for the implicit solvers (#5464) This PR adds the option to write out information about the implicit solvers to a file during the simulation. Note that this PR uses `std::filesystem` directly. This does not work easily for older gcc compilers, from before version 9.1. As a side effect, this PR removes builds using the older gcc and bumps up the comment in the dependencies documentation. This should not be an issue since all of the installs on HPC systems use newer versions. --- Docs/source/install/dependencies.rst | 2 +- .../inputs_test_2d_theta_implicit_jfnk_vandb | 2 + .../ImplicitSolvers/SemiImplicitEM.cpp | 2 +- .../StrangImplicitSpectralEM.cpp | 2 +- .../ImplicitSolvers/ThetaImplicitEM.cpp | 2 +- Source/NonlinearSolvers/NewtonSolver.H | 59 ++++++++++++++++++- Source/NonlinearSolvers/NonlinearSolver.H | 4 +- Source/NonlinearSolvers/PicardSolver.H | 51 +++++++++++++++- Tools/machines/desktop/spack-ubuntu-cuda.yaml | 13 ---- .../machines/desktop/spack-ubuntu-openmp.yaml | 13 ---- Tools/machines/desktop/spack-ubuntu-rocm.yaml | 13 ---- 11 files changed, 114 insertions(+), 49 deletions(-) diff --git a/Docs/source/install/dependencies.rst b/Docs/source/install/dependencies.rst index facaa3a5614..a06ea10a86d 100644 --- a/Docs/source/install/dependencies.rst +++ b/Docs/source/install/dependencies.rst @@ -6,7 +6,7 @@ Dependencies WarpX depends on the following popular third party software. Please see installation instructions below. -- a mature `C++17 `__ compiler, e.g., GCC 8.4+, Clang 7, NVCC 11.0, MSVC 19.15 or newer +- a mature `C++17 `__ compiler, e.g., GCC 9.1+, Clang 7, NVCC 11.0, MSVC 19.15 or newer - `CMake 3.24.0+ `__ - `Git 2.18+ `__ - `AMReX `__: we automatically download and compile a copy of AMReX diff --git a/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb index bab9a03878c..c0cd729f50c 100644 --- a/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb +++ b/Examples/Tests/implicit/inputs_test_2d_theta_implicit_jfnk_vandb @@ -52,6 +52,7 @@ implicit_evolve.particle_tolerance = 1.0e-12 #picard.relative_tolerance = 0.0 #1.0e-12 #picard.absolute_tolerance = 0.0 #1.0e-24 #picard.require_convergence = false +#picard.diagnostic_file = "diags/picard_solver.txt" implicit_evolve.nonlinear_solver = "newton" newton.verbose = true @@ -59,6 +60,7 @@ newton.max_iterations = 20 newton.relative_tolerance = 1.0e-12 newton.absolute_tolerance = 0.0 newton.require_convergence = false +newton.diagnostic_file = "diags/newton_solver.txt" gmres.verbose_int = 2 gmres.max_iterations = 1000 diff --git a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp index bf8441e1992..aae10b978ee 100644 --- a/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/SemiImplicitEM.cpp @@ -80,7 +80,7 @@ void SemiImplicitEM::OneStep ( amrex::Real start_time, // Solve nonlinear system for Eg at t_{n+1/2} // Particles will be advanced to t_{n+1/2} m_E.Copy(m_Eold); // initial guess for Eg^{n+1/2} - m_nlsolver->Solve( m_E, m_Eold, half_time, 0.5_rt*m_dt ); + m_nlsolver->Solve( m_E, m_Eold, half_time, 0.5_rt*m_dt, a_step ); // Update WarpX owned Efield_fp to t_{n+1/2} m_WarpX->SetElectricFieldAndApplyBCs( m_E, half_time ); diff --git a/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp index b8be6b93c63..debbff1b35e 100644 --- a/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/StrangImplicitSpectralEM.cpp @@ -81,7 +81,7 @@ void StrangImplicitSpectralEM::OneStep ( amrex::Real start_time, // Solve nonlinear system for E at t_{n+1/2} // Particles will be advanced to t_{n+1/2} - m_nlsolver->Solve( m_E, m_Eold, half_time, 0.5_rt*m_dt ); + m_nlsolver->Solve( m_E, m_Eold, half_time, 0.5_rt*m_dt, a_step ); // Update WarpX owned Efield_fp and Bfield_fp to t_{n+1/2} UpdateWarpXFields( m_E, half_time ); diff --git a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp index 1e6596f5eaa..54644d357d2 100644 --- a/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp +++ b/Source/FieldSolver/ImplicitSolvers/ThetaImplicitEM.cpp @@ -106,7 +106,7 @@ void ThetaImplicitEM::OneStep ( const amrex::Real start_time, // Solve nonlinear system for Eg at t_{n+theta} // Particles will be advanced to t_{n+1/2} m_E.Copy(m_Eold); // initial guess for Eg^{n+theta} - m_nlsolver->Solve( m_E, m_Eold, start_time, m_theta*m_dt ); + m_nlsolver->Solve( m_E, m_Eold, start_time, m_theta*m_dt, a_step ); // Update WarpX owned Efield_fp and Bfield_fp to t_{n+theta} UpdateWarpXFields( m_E, start_time ); diff --git a/Source/NonlinearSolvers/NewtonSolver.H b/Source/NonlinearSolvers/NewtonSolver.H index f92687d6b34..9a1a7d5034a 100644 --- a/Source/NonlinearSolvers/NewtonSolver.H +++ b/Source/NonlinearSolvers/NewtonSolver.H @@ -16,6 +16,8 @@ #include #include +#include +#include /** * \brief Newton method to solve nonlinear equation of form: @@ -45,7 +47,8 @@ public: void Solve ( Vec& a_U, const Vec& a_b, amrex::Real a_time, - amrex::Real a_dt ) const override; + amrex::Real a_dt, + int a_step) const override; void GetSolverParams ( amrex::Real& a_rtol, amrex::Real& a_atol, @@ -200,6 +203,34 @@ void NewtonSolver::Define ( const Vec& a_U, this->m_is_defined = true; + // Create diagnostic file and write header + if (!this->m_diagnostic_file.empty() && amrex::ParallelDescriptor::IOProcessor()) { + + std::filesystem::path const diagnostic_path(this->m_diagnostic_file); + std::filesystem::path const diagnostic_dir = diagnostic_path.parent_path(); + if (!diagnostic_dir.empty()) { + std::filesystem::create_directories(diagnostic_dir); + } + + std::ofstream diagnostic_file{this->m_diagnostic_file, std::ofstream::out | std::ofstream::trunc}; + int c = 0; + diagnostic_file << "#"; + diagnostic_file << "[" << c++ << "]step()"; + diagnostic_file << " "; + diagnostic_file << "[" << c++ << "]time(s)"; + diagnostic_file << " "; + diagnostic_file << "[" << c++ << "]iters"; + diagnostic_file << " "; + diagnostic_file << "[" << c++ << "]norm_abs"; + diagnostic_file << " "; + diagnostic_file << "[" << c++ << "]norm_rel"; + diagnostic_file << " "; + diagnostic_file << "[" << c++ << "]gmres_iters"; + diagnostic_file << " "; + diagnostic_file << "[" << c++ << "]gmres_last_res"; + diagnostic_file << "\n"; + diagnostic_file.close(); + } } template @@ -211,6 +242,7 @@ void NewtonSolver::ParseParameters () pp_newton.query("relative_tolerance", m_rtol); pp_newton.query("max_iterations", m_maxits); pp_newton.query("require_convergence", m_require_convergence); + pp_newton.query("diagnostic_file", this->m_diagnostic_file); const amrex::ParmParse pp_gmres("gmres"); pp_gmres.query("verbose_int", m_gmres_verbose_int); @@ -227,7 +259,8 @@ template void NewtonSolver::Solve ( Vec& a_U, const Vec& a_b, amrex::Real a_time, - amrex::Real a_dt ) const + amrex::Real a_dt, + int a_step) const { BL_PROFILE("NewtonSolver::Solve()"); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -248,6 +281,7 @@ void NewtonSolver::Solve ( Vec& a_U, amrex::Real norm_rel = 0.; int iter; + int linear_solver_iters = 0; for (iter = 0; iter < m_maxits;) { // Compute residual: F(U) = U - b - R(U) @@ -293,6 +327,7 @@ void NewtonSolver::Solve ( Vec& a_U, // Solve linear system for Newton step [Jac]*dU = F m_dU.zero(); m_linear_solver->solve( m_dU, m_F, m_gmres_rtol, m_gmres_atol ); + linear_solver_iters += m_linear_solver->getNumIters(); // Update solution a_U -= m_dU; @@ -321,6 +356,26 @@ void NewtonSolver::Solve ( Vec& a_U, } } + if (!this->m_diagnostic_file.empty() && amrex::ParallelDescriptor::IOProcessor()) { + std::ofstream diagnostic_file{this->m_diagnostic_file, std::ofstream::out | std::ofstream::app}; + diagnostic_file << std::setprecision(14); + diagnostic_file << a_step; + diagnostic_file << " ";; + diagnostic_file << a_time; + diagnostic_file << " ";; + diagnostic_file << iter; + diagnostic_file << " ";; + diagnostic_file << norm_abs; + diagnostic_file << " ";; + diagnostic_file << norm_rel; + diagnostic_file << " ";; + diagnostic_file << linear_solver_iters; + diagnostic_file << " ";; + diagnostic_file << m_linear_solver->getResidualNorm(); + diagnostic_file << "\n"; + diagnostic_file.close(); + } + } template diff --git a/Source/NonlinearSolvers/NonlinearSolver.H b/Source/NonlinearSolvers/NonlinearSolver.H index 9daa3489f11..d7175d8ea56 100644 --- a/Source/NonlinearSolvers/NonlinearSolver.H +++ b/Source/NonlinearSolvers/NonlinearSolver.H @@ -59,7 +59,8 @@ public: virtual void Solve ( Vec&, const Vec&, amrex::Real, - amrex::Real ) const = 0; + amrex::Real, + int) const = 0; /** * \brief Print parameters used by the nonlinear solver. @@ -81,6 +82,7 @@ protected: bool m_is_defined = false; mutable bool m_verbose = true; + std::string m_diagnostic_file; }; diff --git a/Source/NonlinearSolvers/PicardSolver.H b/Source/NonlinearSolvers/PicardSolver.H index 62323b64a23..448fea7e75d 100644 --- a/Source/NonlinearSolvers/PicardSolver.H +++ b/Source/NonlinearSolvers/PicardSolver.H @@ -13,6 +13,8 @@ #include "Utils/TextMsg.H" #include +#include +#include /** * \brief Picard fixed-point iteration method to solve nonlinear @@ -42,7 +44,8 @@ public: void Solve ( Vec& a_U, const Vec& a_b, amrex::Real a_time, - amrex::Real a_dt ) const override; + amrex::Real a_dt, + int a_step) const override; void GetSolverParams ( amrex::Real& a_rtol, amrex::Real& a_atol, @@ -114,6 +117,31 @@ void PicardSolver::Define ( const Vec& a_U, this->m_is_defined = true; + // Create diagnostic file and write header + if (!this->m_diagnostic_file.empty() && amrex::ParallelDescriptor::IOProcessor()) { + + std::filesystem::path const diagnostic_path(this->m_diagnostic_file); + std::filesystem::path const diagnostic_dir = diagnostic_path.parent_path(); + if (!diagnostic_dir.empty()) { + std::filesystem::create_directories(diagnostic_dir); + } + + std::ofstream diagnostic_file{this->m_diagnostic_file, std::ofstream::out | std::ofstream::trunc}; + int c = 0; + diagnostic_file << "#"; + diagnostic_file << "[" << c++ << "]step()"; + diagnostic_file << " "; + diagnostic_file << "[" << c++ << "]time(s)"; + diagnostic_file << " "; + diagnostic_file << "[" << c++ << "]iters"; + diagnostic_file << " "; + diagnostic_file << "[" << c++ << "]norm_abs"; + diagnostic_file << " "; + diagnostic_file << "[" << c++ << "]norm_rel"; + diagnostic_file << "\n"; + diagnostic_file.close(); + } + } template @@ -125,14 +153,15 @@ void PicardSolver::ParseParameters () pp_picard.query("relative_tolerance", m_rtol); pp_picard.query("max_iterations", m_maxits); pp_picard.query("require_convergence", m_require_convergence); - + pp_picard.query("diagnostic_file", this->m_diagnostic_file); } template void PicardSolver::Solve ( Vec& a_U, const Vec& a_b, amrex::Real a_time, - amrex::Real a_dt ) const + amrex::Real a_dt, + int a_step) const { BL_PROFILE("PicardSolver::Solve()"); WARPX_ALWAYS_ASSERT_WITH_MESSAGE( @@ -213,6 +242,22 @@ void PicardSolver::Solve ( Vec& a_U, } } + if (!this->m_diagnostic_file.empty() && amrex::ParallelDescriptor::IOProcessor()) { + std::ofstream diagnostic_file{this->m_diagnostic_file, std::ofstream::out | std::ofstream::app}; + diagnostic_file << std::setprecision(14); + diagnostic_file << a_step; + diagnostic_file << " "; + diagnostic_file << a_time; + diagnostic_file << " "; + diagnostic_file << iter; + diagnostic_file << " "; + diagnostic_file << norm_abs; + diagnostic_file << " "; + diagnostic_file << norm_rel; + diagnostic_file << "\n"; + diagnostic_file.close(); + } + } #endif diff --git a/Tools/machines/desktop/spack-ubuntu-cuda.yaml b/Tools/machines/desktop/spack-ubuntu-cuda.yaml index 08d0c95ee4b..460f271a24c 100644 --- a/Tools/machines/desktop/spack-ubuntu-cuda.yaml +++ b/Tools/machines/desktop/spack-ubuntu-cuda.yaml @@ -120,19 +120,6 @@ spack: modules: [] environment: {} extra_rpaths: [] - - compiler: - spec: gcc@8.3.0 - paths: - cc: /usr/bin/gcc - cxx: /usr/bin/g++ - f77: /usr/bin/gfortran - fc: /usr/bin/gfortran - flags: {} - operating_system: debian10 - target: x86_64 - modules: [] - environment: {} - extra_rpaths: [] # binary caches mirrors: diff --git a/Tools/machines/desktop/spack-ubuntu-openmp.yaml b/Tools/machines/desktop/spack-ubuntu-openmp.yaml index b658f1e009d..90f8a048686 100644 --- a/Tools/machines/desktop/spack-ubuntu-openmp.yaml +++ b/Tools/machines/desktop/spack-ubuntu-openmp.yaml @@ -110,19 +110,6 @@ spack: modules: [] environment: {} extra_rpaths: [] - - compiler: - spec: gcc@8.3.0 - paths: - cc: /usr/bin/gcc - cxx: /usr/bin/g++ - f77: /usr/bin/gfortran - fc: /usr/bin/gfortran - flags: {} - operating_system: debian10 - target: x86_64 - modules: [] - environment: {} - extra_rpaths: [] # binary caches mirrors: diff --git a/Tools/machines/desktop/spack-ubuntu-rocm.yaml b/Tools/machines/desktop/spack-ubuntu-rocm.yaml index 45c9b0f776e..851ed04c0f9 100644 --- a/Tools/machines/desktop/spack-ubuntu-rocm.yaml +++ b/Tools/machines/desktop/spack-ubuntu-rocm.yaml @@ -114,19 +114,6 @@ spack: modules: [] environment: {} extra_rpaths: [] - - compiler: - spec: gcc@8.3.0 - paths: - cc: /usr/bin/gcc - cxx: /usr/bin/g++ - f77: /usr/bin/gfortran - fc: /usr/bin/gfortran - flags: {} - operating_system: debian10 - target: x86_64 - modules: [] - environment: {} - extra_rpaths: [] # binary caches mirrors: From f95e0a794f2fc35d6248c74ca06d7c642eb2a496 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Wed, 26 Feb 2025 15:18:29 +0100 Subject: [PATCH 257/278] Documentation: update profile for Adastra supercomputer (CINES, France) (#5709) This PR updates the profile to compile WarpX on the Adastra supercomputer (CINES, France) --- .../machines/adastra-cines/adastra_warpx.profile.example | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/Tools/machines/adastra-cines/adastra_warpx.profile.example b/Tools/machines/adastra-cines/adastra_warpx.profile.example index 8aaff6e4450..b5cfffbf727 100644 --- a/Tools/machines/adastra-cines/adastra_warpx.profile.example +++ b/Tools/machines/adastra-cines/adastra_warpx.profile.example @@ -4,12 +4,12 @@ # required dependencies module purge -module load cpe/23.12 +module load cpe/24.07 module load craype-accel-amd-gfx90a craype-x86-trento module load PrgEnv-cray -module load CCE-GPU-3.0.0 -module load amd-mixed/5.7.1 module load develop +module load CCE-GPU-4.0.0 +module load amd-mixed/6.2.1 module load cmake/3.27.9 # optional: for PSATD in RZ geometry support @@ -19,7 +19,7 @@ export LD_LIBRARY_PATH=${SHAREDHOMEDIR}/sw/adastra/gpu/blaspp-2024.05.31/lib64:$ export LD_LIBRARY_PATH=${SHAREDHOMEDIR}/sw/adastra/gpu/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH # optional: for QED lookup table generation support -module load boost/1.83.0-mpi-python3 +module load boost/1.86.0-mpi # optional: for openPMD support module load cray-hdf5-parallel From a888b60ee6b6ef1f76c6e39cf8cf4f4999aed10b Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 26 Feb 2025 06:22:09 -0800 Subject: [PATCH 258/278] Doc: ADIOS2 v2.10.2 (#5706) Use the latest ADIOS2 release (v2.10.2) on all HPC machines. --- .../adastra-cines/adastra_warpx.profile.example | 4 ++-- Tools/machines/adastra-cines/install_dependencies.sh | 12 +++++++----- Tools/machines/dane-llnl/dane_warpx.profile.example | 4 ++-- Tools/machines/dane-llnl/install_dependencies.sh | 6 +++--- .../frontier-olcf/frontier_warpx.profile.example | 2 +- .../fugaku-riken/fugaku_warpx.profile.example | 2 +- Tools/machines/fugaku-riken/install_dependencies.sh | 6 +++--- .../greatlakes_v100_warpx.profile.example | 6 +++--- .../greatlakes-umich/install_v100_dependencies.sh | 6 +++--- .../machines/hpc3-uci/hpc3_gpu_warpx.profile.example | 6 +++--- Tools/machines/hpc3-uci/install_gpu_dependencies.sh | 6 +++--- .../lassen-llnl/install_v100_dependencies_toss3.sh | 6 +++--- .../lassen_v100_warpx_toss3.profile.example | 6 +++--- .../lawrencium-lbnl/lawrencium_warpx.profile.example | 4 ++-- .../leonardo-cineca/install_gpu_dependencies.sh | 9 ++++----- .../leonardo_gpu_warpx.profile.example | 6 +++--- .../lonestar6-tacc/install_a100_dependencies.sh | 6 +++--- .../lonestar6_warpx_a100.profile.example | 6 +++--- Tools/machines/lumi-csc/install_dependencies.sh | 6 +++--- Tools/machines/lumi-csc/lumi_warpx.profile.example | 4 ++-- .../perlmutter-nersc/install_cpu_dependencies.sh | 6 +++--- .../perlmutter-nersc/install_gpu_dependencies.sh | 6 +++--- .../perlmutter_cpu_warpx.profile.example | 6 +++--- .../perlmutter_gpu_warpx.profile.example | 6 +++--- .../machines/pitzer-osc/install_cpu_dependencies.sh | 6 +++--- .../machines/pitzer-osc/install_v100_dependencies.sh | 6 +++--- .../pitzer-osc/pitzer_cpu_warpx.profile.example | 6 +++--- .../pitzer-osc/pitzer_v100_warpx.profile.example | 6 +++--- .../polaris-alcf/install_gpu_dependencies.sh | 6 +++--- .../polaris-alcf/polaris_gpu_warpx.profile.example | 6 +++--- .../tioga-llnl/install_mi300a_dependencies.sh | 6 +++--- .../tioga-llnl/tioga_mi300a_warpx.profile.example | 6 +++--- 32 files changed, 93 insertions(+), 92 deletions(-) diff --git a/Tools/machines/adastra-cines/adastra_warpx.profile.example b/Tools/machines/adastra-cines/adastra_warpx.profile.example index b5cfffbf727..de7a325139a 100644 --- a/Tools/machines/adastra-cines/adastra_warpx.profile.example +++ b/Tools/machines/adastra-cines/adastra_warpx.profile.example @@ -24,9 +24,9 @@ module load boost/1.86.0-mpi # optional: for openPMD support module load cray-hdf5-parallel export CMAKE_PREFIX_PATH=${SHAREDHOMEDIR}/sw/adastra/gpu/c-blosc-1.21.1:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${SHAREDHOMEDIR}/sw/adastra/gpu/adios2-2.8.3:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SHAREDHOMEDIR}/sw/adastra/gpu/adios2-2.10.2:$CMAKE_PREFIX_PATH -export PATH=${HOME}/sw/adastra/gpu/adios2-2.8.3/bin:${PATH} +export PATH=${SHAREDHOMEDIR}/sw/adastra/gpu/adios2-2.10.2/bin:${PATH} # optional: for Python bindings or libEnsemble module load cray-python/3.11.5 diff --git a/Tools/machines/adastra-cines/install_dependencies.sh b/Tools/machines/adastra-cines/install_dependencies.sh index 242f5fc664c..896b775db3b 100755 --- a/Tools/machines/adastra-cines/install_dependencies.sh +++ b/Tools/machines/adastra-cines/install_dependencies.sh @@ -76,16 +76,18 @@ cmake -S $SHAREDHOMEDIR/src/c-blosc -B $SHAREDHOMEDIR/src/c-blosc-ad-build -DBUI cmake --build $SHAREDHOMEDIR/src/c-blosc-ad-build --target install --parallel 16 rm -rf $SHAREDHOMEDIR/src/c-blosc-ad-build -# ADIOS2 v. 2.8.3 (for OpenPMD) +# ADIOS2 v. 2.10.2 (for OpenPMD) if [ -d $SHAREDHOMEDIR/src/adios2 ] then - # git repository is already there - : + cd $SHAREDHOMEDIR/src/adios2 + git fetch --prune + git checkout v2.10.2 + cd - else - git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git $SHAREDHOMEDIR/src/adios2 + git clone -b v2.10.2 https://github.com/ornladios/ADIOS2.git $SHAREDHOMEDIR/src/adios2 fi rm -rf $SHAREDHOMEDIR/src/adios2-ad-build -cmake -S $SHAREDHOMEDIR/src/adios2 -B $SHAREDHOMEDIR/src/adios2-ad-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 +cmake -S $SHAREDHOMEDIR/src/adios2 -B $SHAREDHOMEDIR/src/adios2-ad-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.2 cmake --build $SHAREDHOMEDIR/src/adios2-ad-build --target install -j 16 rm -rf $SHAREDHOMEDIR/src/adios2-ad-build diff --git a/Tools/machines/dane-llnl/dane_warpx.profile.example b/Tools/machines/dane-llnl/dane_warpx.profile.example index dcb895509cc..8992871f0e5 100644 --- a/Tools/machines/dane-llnl/dane_warpx.profile.example +++ b/Tools/machines/dane-llnl/dane_warpx.profile.example @@ -17,8 +17,8 @@ module load hdf5-parallel/1.14.0 SW_DIR="/usr/workspace/${USER}/dane" export CMAKE_PREFIX_PATH=${SW_DIR}/install/c-blosc-1.21.6:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${SW_DIR}/install/adios2-2.8.3:$CMAKE_PREFIX_PATH -export PATH=${SW_DIR}/install/adios2-2.8.3/bin:${PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/install/adios2-2.10.2:$CMAKE_PREFIX_PATH +export PATH=${SW_DIR}/install/adios2-2.10.2/bin:${PATH} # optional: for PSATD in RZ geometry support export CMAKE_PREFIX_PATH=${SW_DIR}/install/blaspp-2024.10.26:$CMAKE_PREFIX_PATH diff --git a/Tools/machines/dane-llnl/install_dependencies.sh b/Tools/machines/dane-llnl/install_dependencies.sh index 06bee0cead8..25e8e965777 100755 --- a/Tools/machines/dane-llnl/install_dependencies.sh +++ b/Tools/machines/dane-llnl/install_dependencies.sh @@ -54,12 +54,12 @@ if [ -d ${SW_DIR}/src/adios2 ] then cd ${SW_DIR}/src/adios2 git fetch --prune - git checkout v2.8.3 + git checkout v2.10.2 cd - else - git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git ${SW_DIR}/src/adios2 + git clone -b v2.10.2 https://github.com/ornladios/ADIOS2.git ${SW_DIR}/src/adios2 fi -cmake -S ${SW_DIR}/src/adios2 -B ${build_dir}/adios2-dane-build -DBUILD_TESTING=OFF -DADIOS2_BUILD_EXAMPLES=OFF -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_SST=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/install/adios2-2.8.3 +cmake -S ${SW_DIR}/src/adios2 -B ${build_dir}/adios2-dane-build -DBUILD_TESTING=OFF -DADIOS2_BUILD_EXAMPLES=OFF -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_SST=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/install/adios2-2.10.2 cmake --build ${build_dir}/adios2-dane-build --target install -j 6 # BLAS++ (for PSATD+RZ) diff --git a/Tools/machines/frontier-olcf/frontier_warpx.profile.example b/Tools/machines/frontier-olcf/frontier_warpx.profile.example index 89461cc3e8b..16f28a0fca2 100644 --- a/Tools/machines/frontier-olcf/frontier_warpx.profile.example +++ b/Tools/machines/frontier-olcf/frontier_warpx.profile.example @@ -33,7 +33,7 @@ export LD_LIBRARY_PATH=${HOME}/sw/frontier/gpu/lapackpp-2024.05.31/lib64:$LD_LIB module load boost/1.85.0 # optional: for openPMD support -module load adios2/2.10.0-mpi +module load adios2/2.10.2-mpi module load hdf5/1.14.3-mpi # optional: for Python bindings or libEnsemble diff --git a/Tools/machines/fugaku-riken/fugaku_warpx.profile.example b/Tools/machines/fugaku-riken/fugaku_warpx.profile.example index caf85983b58..7041846ebbc 100644 --- a/Tools/machines/fugaku-riken/fugaku_warpx.profile.example +++ b/Tools/machines/fugaku-riken/fugaku_warpx.profile.example @@ -18,7 +18,7 @@ spack load boost@1.80.0%fj@4.8.1/zc5pwgc # optional: for openPMD support spack load hdf5@1.12.2%fj@4.8.1/im6lxev export CMAKE_PREFIX_PATH=${HOME}/sw/fugaku/a64fx/c-blosc-1.21.1-install:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${HOME}/sw/fugaku/a64fx/adios2-2.8.3-install:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${HOME}/sw/fugaku/a64fx/adios2-2.10.2-install:$CMAKE_PREFIX_PATH # compiler environment hints export CC=$(which mpifcc) diff --git a/Tools/machines/fugaku-riken/install_dependencies.sh b/Tools/machines/fugaku-riken/install_dependencies.sh index 3ceb45e4558..eb5098fb43f 100755 --- a/Tools/machines/fugaku-riken/install_dependencies.sh +++ b/Tools/machines/fugaku-riken/install_dependencies.sh @@ -42,12 +42,12 @@ if [ -d ${SRC_DIR}/c-blosc ] then cd ${SRC_DIR}/adios2 git fetch --prune - git checkout v2.8.3 + git checkout v2.10.2 cd - else - git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git ${SRC_DIR}/adios2 + git clone -b v2.10.2 https://github.com/ornladios/ADIOS2.git ${SRC_DIR}/adios2 fi rm -rf ${SRC_DIR}/adios2-fugaku-build -cmake -S ${SRC_DIR}/adios2 -B ${SRC_DIR}/adios2-fugaku-build -DBUILD_SHARED_LIBS=OFF -DADIOS2_USE_Blosc=ON -DBUILD_TESTING=OFF -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3-install +cmake -S ${SRC_DIR}/adios2 -B ${SRC_DIR}/adios2-fugaku-build -DBUILD_SHARED_LIBS=OFF -DADIOS2_USE_Blosc=ON -DBUILD_TESTING=OFF -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.2-install cmake --build ${SRC_DIR}/adios2-fugaku-build --target install -j 48 rm -rf ${SRC_DIR}/adios2-fugaku-build diff --git a/Tools/machines/greatlakes-umich/greatlakes_v100_warpx.profile.example b/Tools/machines/greatlakes-umich/greatlakes_v100_warpx.profile.example index c08255e7962..140593deb55 100644 --- a/Tools/machines/greatlakes-umich/greatlakes_v100_warpx.profile.example +++ b/Tools/machines/greatlakes-umich/greatlakes_v100_warpx.profile.example @@ -21,16 +21,16 @@ module load phdf5/1.12.1 SW_DIR="${HOME}/sw/greatlakes/v100" export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc2-2.14.4:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.10.0:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.10.2:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.05.31:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.05.31:$CMAKE_PREFIX_PATH export LD_LIBRARY_PATH=${SW_DIR}/c-blosc2-2.14.4/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.10.0/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.10.2/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH -export PATH=${SW_DIR}/adios2-2.10.0/bin:${PATH} +export PATH=${SW_DIR}/adios2-2.10.2/bin:${PATH} # optional: for Python bindings or libEnsemble module load python/3.12.1 diff --git a/Tools/machines/greatlakes-umich/install_v100_dependencies.sh b/Tools/machines/greatlakes-umich/install_v100_dependencies.sh index 30faec52421..c6925442d9f 100755 --- a/Tools/machines/greatlakes-umich/install_v100_dependencies.sh +++ b/Tools/machines/greatlakes-umich/install_v100_dependencies.sh @@ -57,10 +57,10 @@ if [ -d $HOME/src/adios2 ] then cd $HOME/src/adios2 git fetch --prune - git checkout v2.10.0 + git checkout v2.10.2 cd - else - git clone -b v2.10.0 https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 + git clone -b v2.10.2 https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 fi rm -rf $HOME/src/adios2-v100-build cmake \ @@ -71,7 +71,7 @@ cmake \ -DADIOS2_USE_Fortran=OFF \ -DADIOS2_USE_Python=OFF \ -DADIOS2_USE_ZeroMQ=OFF \ - -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.0 + -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.2 cmake --build ${build_dir}/adios2-v100-build --target install -j 8 rm -rf ${build_dir}/adios2-v100-build diff --git a/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example b/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example index 970dc980347..ddb70fbf255 100644 --- a/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example +++ b/Tools/machines/hpc3-uci/hpc3_gpu_warpx.profile.example @@ -18,16 +18,16 @@ module load boost/1.78.0/gcc.11.2.0 module load OpenBLAS/0.3.21 module load hdf5/1.13.1/gcc.11.2.0-openmpi.4.1.2 export CMAKE_PREFIX_PATH=${HOME}/sw/hpc3/gpu/c-blosc-1.21.1:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${HOME}/sw/hpc3/gpu/adios2-2.8.3:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${HOME}/sw/hpc3/gpu/adios2-2.10.2:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${HOME}/sw/hpc3/gpu/blaspp-2024.05.31:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${HOME}/sw/hpc3/gpu/lapackpp-2024.05.31:$CMAKE_PREFIX_PATH export LD_LIBRARY_PATH=${HOME}/sw/hpc3/gpu/c-blosc-1.21.1/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${HOME}/sw/hpc3/gpu/adios2-2.8.3/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${HOME}/sw/hpc3/gpu/adios2-2.10.2/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${HOME}/sw/hpc3/gpu/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${HOME}/sw/hpc3/gpu/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH -export PATH=${HOME}/sw/hpc3/gpu/adios2-2.8.3/bin:${PATH} +export PATH=${HOME}/sw/hpc3/gpu/adios2-2.10.2/bin:${PATH} # optional: CCache #module load ccache # missing diff --git a/Tools/machines/hpc3-uci/install_gpu_dependencies.sh b/Tools/machines/hpc3-uci/install_gpu_dependencies.sh index c4c31dd4066..e4b9f4caa5a 100755 --- a/Tools/machines/hpc3-uci/install_gpu_dependencies.sh +++ b/Tools/machines/hpc3-uci/install_gpu_dependencies.sh @@ -64,13 +64,13 @@ if [ -d $HOME/src/adios2 ] then cd $HOME/src/adios2 git fetch --prune - git checkout v2.8.3 + git checkout v2.10.2 cd - else - git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 + git clone -b v2.10.2 https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 fi rm -rf $HOME/src/adios2-pm-gpu-build -cmake -S $HOME/src/adios2 -B $HOME/src/adios2-pm-gpu-build -DBUILD_TESTING=OFF -DADIOS2_BUILD_EXAMPLES=OFF -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_HDF5=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 +cmake -S $HOME/src/adios2 -B $HOME/src/adios2-pm-gpu-build -DBUILD_TESTING=OFF -DADIOS2_BUILD_EXAMPLES=OFF -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_HDF5=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.2 cmake --build $HOME/src/adios2-pm-gpu-build --target install --parallel 8 rm -rf $HOME/src/adios2-pm-gpu-build diff --git a/Tools/machines/lassen-llnl/install_v100_dependencies_toss3.sh b/Tools/machines/lassen-llnl/install_v100_dependencies_toss3.sh index 86f330060f6..b4e80f2ddea 100644 --- a/Tools/machines/lassen-llnl/install_v100_dependencies_toss3.sh +++ b/Tools/machines/lassen-llnl/install_v100_dependencies_toss3.sh @@ -69,12 +69,12 @@ if [ -d ${SRC_DIR}/adios2 ] then cd ${SRC_DIR}/adios2 git fetch --prune - git checkout v2.8.3 + git checkout v2.10.2 cd - else - git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git ${SRC_DIR}/adios2 + git clone -b v2.10.2 https://github.com/ornladios/ADIOS2.git ${SRC_DIR}/adios2 fi -cmake -S ${SRC_DIR}/adios2 -B ${build_dir}/adios2-lassen-build -DBUILD_TESTING=OFF -DADIOS2_BUILD_EXAMPLES=OFF -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_SST=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 +cmake -S ${SRC_DIR}/adios2 -B ${build_dir}/adios2-lassen-build -DBUILD_TESTING=OFF -DADIOS2_BUILD_EXAMPLES=OFF -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_SST=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.2 cmake --build ${build_dir}/adios2-lassen-build --target install -j 10 # BLAS++ (for PSATD+RZ) diff --git a/Tools/machines/lassen-llnl/lassen_v100_warpx_toss3.profile.example b/Tools/machines/lassen-llnl/lassen_v100_warpx_toss3.profile.example index 99e61a2fbf6..a2b9356104b 100644 --- a/Tools/machines/lassen-llnl/lassen_v100_warpx_toss3.profile.example +++ b/Tools/machines/lassen-llnl/lassen_v100_warpx_toss3.profile.example @@ -14,12 +14,12 @@ SRC_DIR="/usr/workspace/${USER}/lassen/src" SW_DIR="/usr/workspace/${USER}/lassen-toss3/gpu" export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.1:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${SW_DIR}/hdf5-1.14.1.2:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.8.3:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.10.2:$CMAKE_PREFIX_PATH export LD_LIBRARY_PATH=${SW_DIR}/c-blosc-1.21.1/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${SW_DIR}/hdf5-1.14.1.2/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.8.3/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.10.2/lib64:$LD_LIBRARY_PATH export PATH=${SW_DIR}/hdf5-1.14.1.2/bin:${PATH} -export PATH=${SW_DIR}/adios2-2.8.3/bin:${PATH} +export PATH=${SW_DIR}/adios2-2.10.2/bin:${PATH} # optional: for PSATD in RZ geometry support export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.05.31:$CMAKE_PREFIX_PATH diff --git a/Tools/machines/lawrencium-lbnl/lawrencium_warpx.profile.example b/Tools/machines/lawrencium-lbnl/lawrencium_warpx.profile.example index 62f80433233..fbbf0267e1e 100644 --- a/Tools/machines/lawrencium-lbnl/lawrencium_warpx.profile.example +++ b/Tools/machines/lawrencium-lbnl/lawrencium_warpx.profile.example @@ -14,11 +14,11 @@ module load boost/1.83.0 module load hdf5/1.14.3 export CMAKE_PREFIX_PATH=$HOME/sw/v100/c-blosc-1.21.1:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=$HOME/sw/v100/adios2-2.8.3:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=$HOME/sw/v100/adios2-2.10.2:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=$HOME/sw/v100/blaspp-2024.05.31:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=$HOME/sw/v100/lapackpp-2024.05.31:$CMAKE_PREFIX_PATH -export PATH=$HOME/sw/v100/adios2-2.8.3/bin:$PATH +export PATH=$HOME/sw/v100/adios2-2.10.2/bin:$PATH # optional: CCache #module load ccache # missing diff --git a/Tools/machines/leonardo-cineca/install_gpu_dependencies.sh b/Tools/machines/leonardo-cineca/install_gpu_dependencies.sh index 4d89e30cd29..2df123ba0ee 100644 --- a/Tools/machines/leonardo-cineca/install_gpu_dependencies.sh +++ b/Tools/machines/leonardo-cineca/install_gpu_dependencies.sh @@ -30,15 +30,14 @@ mkdir -p ${SW_DIR} if [ -d $HOME/src/adios2 ] then cd $HOME/src/adios2 - git fetch - git checkout master - git pull + git fetch --prune + git checkout v2.10.2 cd - else - git clone https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 + git clone -b v2.10.2 https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 fi rm -rf $HOME/src/adios2-gpu-build -cmake -S $HOME/src/adios2 -B $HOME/src/adios2-gpu-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-master +cmake -S $HOME/src/adios2 -B $HOME/src/adios2-gpu-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.2 cmake --build $HOME/src/adios2-gpu-build --target install -j 16 rm -rf $HOME/src/adios2-gpu-build diff --git a/Tools/machines/leonardo-cineca/leonardo_gpu_warpx.profile.example b/Tools/machines/leonardo-cineca/leonardo_gpu_warpx.profile.example index dd8e79ffb37..763bb07ad17 100644 --- a/Tools/machines/leonardo-cineca/leonardo_gpu_warpx.profile.example +++ b/Tools/machines/leonardo-cineca/leonardo_gpu_warpx.profile.example @@ -15,16 +15,16 @@ module load boost/1.80.0--openmpi--4.1.4--gcc--11.3.0 # optional: for openPMD and PSATD+RZ support module load openblas/0.3.21--gcc--11.3.0 export CMAKE_PREFIX_PATH=/leonardo/prod/spack/03/install/0.19/linux-rhel8-icelake/gcc-11.3.0/c-blosc-1.21.1-aifmix6v5lwxgt7rigwoebalrgbcnv26:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=$HOME/sw/adios2-master:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=$HOME/sw/adios2-2.10.2:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=$HOME/sw/blaspp-2024.05.31:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=$HOME/sw/lapackpp-2024.05.31:$CMAKE_PREFIX_PATH export LD_LIBRARY_PATH=/leonardo/prod/spack/03/install/0.19/linux-rhel8-icelake/gcc-11.3.0/c-blosc-1.21.1-aifmix6v5lwxgt7rigwoebalrgbcnv26/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=$HOME/sw/adios2-master/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=$HOME/sw/adios2-2.10.2/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=$HOME/sw/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=$HOME/sw/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH -export PATH=$HOME/sw/adios2-master/bin:$PATH +export PATH=$HOME/sw/adios2-2.10.2/bin:$PATH # optional: for Python bindings or libEnsemble module load python/3.10.8--gcc--11.3.0 diff --git a/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh b/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh index fd3a2d3f756..1ade3fe77d4 100755 --- a/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh +++ b/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh @@ -56,13 +56,13 @@ if [ -d $HOME/src/adios2 ] then cd $HOME/src/adios2 git fetch --prune - git checkout v2.8.3 + git checkout v2.10.2 cd - else - git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 + git clone -b v2.10.2 https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 fi rm -rf $HOME/src/adios2-a100-build -cmake -S $HOME/src/adios2 -B ${build_dir}/adios2-a100-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 +cmake -S $HOME/src/adios2 -B ${build_dir}/adios2-a100-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.2 cmake --build ${build_dir}/adios2-a100-build --target install -j 16 rm -rf ${build_dir}/adios2-a100-build diff --git a/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example b/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example index 57c98da9b4a..5b6c91b6a4d 100644 --- a/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example +++ b/Tools/machines/lonestar6-tacc/lonestar6_warpx_a100.profile.example @@ -17,16 +17,16 @@ module load phdf5/1.10.4 SW_DIR="${WORK}/sw/lonestar6/sw/lonestar6/a100" export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.1:${CMAKE_PREFIX_PATH} -export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.8.3:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.10.2:${CMAKE_PREFIX_PATH} export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.05.31:${CMAKE_PREFIX_PATH} export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.05.31:${CMAKE_PREFIX_PATH} export LD_LIBRARY_PATH=${SW_DIR}/c-blosc-1.21.1/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.8.3/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.10.2/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH -export PATH=${SW_DIR}/adios2-2.8.3/bin:${PATH} +export PATH=${SW_DIR}/adios2-2.10.2/bin:${PATH} # optional: CCache #module load ccache # TODO: request from support diff --git a/Tools/machines/lumi-csc/install_dependencies.sh b/Tools/machines/lumi-csc/install_dependencies.sh index 2fd31b79bce..e149b8abd43 100755 --- a/Tools/machines/lumi-csc/install_dependencies.sh +++ b/Tools/machines/lumi-csc/install_dependencies.sh @@ -124,10 +124,10 @@ if [ -d ${SRC_DIR}/adios2 ] then cd ${SRC_DIR}/adios2 git fetch --prune - git checkout v2.8.3 + git checkout v2.10.2 cd - else - git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git ${SRC_DIR}/adios2 + git clone -b v2.10.2 https://github.com/ornladios/ADIOS2.git ${SRC_DIR}/adios2 fi rm -rf ${build_dir}/adios2-lu-build cmake -S ${SRC_DIR}/adios2 \ @@ -137,7 +137,7 @@ cmake -S ${SRC_DIR}/adios2 \ -DADIOS2_USE_HDF5=OFF \ -DADIOS2_USE_Python=OFF \ -DADIOS2_USE_ZeroMQ=OFF \ - -DCMAKE_INSTALL_PREFIX=${HOME}/sw/lumi/gpu/adios2-2.8.3 + -DCMAKE_INSTALL_PREFIX=${HOME}/sw/lumi/gpu/adios2-2.10.2 cmake --build ${build_dir}/adios2-lu-build --target install -j 16 rm -rf ${build_dir}/adios2-lu-build diff --git a/Tools/machines/lumi-csc/lumi_warpx.profile.example b/Tools/machines/lumi-csc/lumi_warpx.profile.example index 915f976f4ab..967638e8461 100644 --- a/Tools/machines/lumi-csc/lumi_warpx.profile.example +++ b/Tools/machines/lumi-csc/lumi_warpx.profile.example @@ -22,9 +22,9 @@ module load Boost/1.82.0-cpeCray-23.09 # optional: for openPMD support export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.1:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${SW_DIR}/hdf5-1.14.1.2:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.8.3:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.10.2:$CMAKE_PREFIX_PATH export PATH=${SW_DIR}/hdf5-1.14.1.2/bin:${PATH} -export PATH=${SW_DIR}/adios2-2.8.3/bin:${PATH} +export PATH=${SW_DIR}/adios2-2.10.2/bin:${PATH} # optional: for Python bindings or libEnsemble module load cray-python/3.11.7 diff --git a/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh b/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh index 0ef14844493..5be1ef7b9aa 100755 --- a/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh +++ b/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh @@ -87,13 +87,13 @@ if [ -d $HOME/src/adios2 ] then cd $HOME/src/adios2 git fetch --prune - git checkout v2.8.3 + git checkout v2.10.2 cd - else - git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 + git clone -b v2.10.2 https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 fi rm -rf $HOME/src/adios2-pm-cpu-build -cmake -S $HOME/src/adios2 -B ${build_dir}/adios2-pm-cpu-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_CUDA=OFF -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 +cmake -S $HOME/src/adios2 -B ${build_dir}/adios2-pm-cpu-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_CUDA=OFF -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.2 cmake --build ${build_dir}/adios2-pm-cpu-build --target install -j ${PARALLEL} rm -rf ${build_dir}/adios2-pm-cpu-build diff --git a/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh b/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh index ffa3d0f0714..a029b428e8a 100755 --- a/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh +++ b/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh @@ -87,13 +87,13 @@ if [ -d $HOME/src/adios2 ] then cd $HOME/src/adios2 git fetch --prune - git checkout v2.8.3 + git checkout v2.10.2 cd - else - git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 + git clone -b v2.10.2 https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 fi rm -rf $HOME/src/adios2-pm-gpu-build -cmake -S $HOME/src/adios2 -B ${build_dir}/adios2-pm-gpu-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 +cmake -S $HOME/src/adios2 -B ${build_dir}/adios2-pm-gpu-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.2 cmake --build ${build_dir}/adios2-pm-gpu-build --target install -j ${PARALLEL} rm -rf ${build_dir}/adios2-pm-gpu-build diff --git a/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example b/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example index fe665e87130..860c5a9915f 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example +++ b/Tools/machines/perlmutter-nersc/perlmutter_cpu_warpx.profile.example @@ -20,16 +20,16 @@ export LD_LIBRARY_PATH=${SW_DIR}/boost-1.82.0/lib:${LD_LIBRARY_PATH} # optional: for openPMD and PSATD+RZ support module load cray-hdf5-parallel/1.12.2.9 export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.1:${CMAKE_PREFIX_PATH} -export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.8.3:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.10.2:${CMAKE_PREFIX_PATH} export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.05.31:${CMAKE_PREFIX_PATH} export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.05.31:${CMAKE_PREFIX_PATH} export LD_LIBRARY_PATH=${SW_DIR}/c-blosc-1.21.1/lib64:${LD_LIBRARY_PATH} -export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.8.3/lib64:${LD_LIBRARY_PATH} +export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.10.2/lib64:${LD_LIBRARY_PATH} export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.05.31/lib64:${LD_LIBRARY_PATH} export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.05.31/lib64:${LD_LIBRARY_PATH} -export PATH=${SW_DIR}/adios2-2.8.3/bin:${PATH} +export PATH=${SW_DIR}/adios2-2.10.2/bin:${PATH} # optional: CCache export PATH=${SW_DIR}/ccache-4.10.2:$PATH diff --git a/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example b/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example index dd78bc8ecf3..5119c44d327 100644 --- a/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example +++ b/Tools/machines/perlmutter-nersc/perlmutter_gpu_warpx.profile.example @@ -24,16 +24,16 @@ export LD_LIBRARY_PATH=${SW_DIR}/boost-1.82.0/lib:${LD_LIBRARY_PATH} # optional: for openPMD and PSATD+RZ support module load cray-hdf5-parallel/1.12.2.9 export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.1:${CMAKE_PREFIX_PATH} -export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.8.3:${CMAKE_PREFIX_PATH} +export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.10.2:${CMAKE_PREFIX_PATH} export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.05.31:${CMAKE_PREFIX_PATH} export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.05.31:${CMAKE_PREFIX_PATH} export LD_LIBRARY_PATH=${SW_DIR}/c-blosc-1.21.1/lib64:${LD_LIBRARY_PATH} -export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.8.3/lib64:${LD_LIBRARY_PATH} +export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.10.2/lib64:${LD_LIBRARY_PATH} export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.05.31/lib64:${LD_LIBRARY_PATH} export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.05.31/lib64:${LD_LIBRARY_PATH} -export PATH=${SW_DIR}/adios2-2.8.3/bin:${PATH} +export PATH=${SW_DIR}/adios2-2.10.2/bin:${PATH} # optional: CCache export PATH=${SW_DIR}/ccache-4.10.2:$PATH diff --git a/Tools/machines/pitzer-osc/install_cpu_dependencies.sh b/Tools/machines/pitzer-osc/install_cpu_dependencies.sh index f0b6ce4b950..cc74adb1a52 100644 --- a/Tools/machines/pitzer-osc/install_cpu_dependencies.sh +++ b/Tools/machines/pitzer-osc/install_cpu_dependencies.sh @@ -107,10 +107,10 @@ rm -rf ${build_dir}/c-blosc-pitzer-build if [ -d ${SRC_DIR}/adios2 ]; then cd ${SRC_DIR}/adios2 git fetch --prune - git checkout v2.10.1 + git checkout v2.10.2 cd - else - git clone -b v2.10.1 https://github.com/ornladios/ADIOS2.git ${SRC_DIR}/adios2 + git clone -b v2.10.2 https://github.com/ornladios/ADIOS2.git ${SRC_DIR}/adios2 fi rm -rf ${build_dir}/adios2-pitzer-build cmake -S ${SRC_DIR}/adios2 \ @@ -122,7 +122,7 @@ cmake -S ${SRC_DIR}/adios2 \ -DADIOS2_USE_Python=OFF \ -DADIOS2_USE_SST=OFF \ -DADIOS2_USE_ZeroMQ=OFF \ - -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.1 + -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.2 cmake --build ${build_dir}/adios2-pitzer-build --target install -j 16 rm -rf ${build_dir}/adios2-pitzer-build diff --git a/Tools/machines/pitzer-osc/install_v100_dependencies.sh b/Tools/machines/pitzer-osc/install_v100_dependencies.sh index 5601b4d76c9..eae1a01e39e 100644 --- a/Tools/machines/pitzer-osc/install_v100_dependencies.sh +++ b/Tools/machines/pitzer-osc/install_v100_dependencies.sh @@ -107,10 +107,10 @@ rm -rf ${build_dir}/c-blosc-pitzer-build if [ -d ${SRC_DIR}/adios2 ]; then cd ${SRC_DIR}/adios2 git fetch --prune - git checkout v2.10.1 + git checkout v2.10.2 cd - else - git clone -b v2.10.1 https://github.com/ornladios/ADIOS2.git ${SRC_DIR}/adios2 + git clone -b v2.10.2 https://github.com/ornladios/ADIOS2.git ${SRC_DIR}/adios2 fi rm -rf ${build_dir}/adios2-pitzer-build cmake -S ${SRC_DIR}/adios2 \ @@ -122,7 +122,7 @@ cmake -S ${SRC_DIR}/adios2 \ -DADIOS2_USE_Python=OFF \ -DADIOS2_USE_SST=OFF \ -DADIOS2_USE_ZeroMQ=OFF \ - -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.1 + -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.2 cmake --build ${build_dir}/adios2-pitzer-build --target install -j 16 rm -rf ${build_dir}/adios2-pitzer-build diff --git a/Tools/machines/pitzer-osc/pitzer_cpu_warpx.profile.example b/Tools/machines/pitzer-osc/pitzer_cpu_warpx.profile.example index d99c16b6cb6..233bbde63d6 100644 --- a/Tools/machines/pitzer-osc/pitzer_cpu_warpx.profile.example +++ b/Tools/machines/pitzer-osc/pitzer_cpu_warpx.profile.example @@ -43,10 +43,10 @@ export LD_LIBRARY_PATH=${SW_DIR}/boost-1.82.0/lib:$LD_LIBRARY_PATH # optional: for openPMD support (hdf5 and adios2) module load hdf5/1.12.2 export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.6:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.10.1:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.10.2:$CMAKE_PREFIX_PATH export LD_LIBRARY_PATH=${SW_DIR}/c-blosc-1.21.6/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.10.1/lib64:$LD_LIBRARY_PATH -export PATH=${SW_DIR}/adios2-2.10.1/bin:${PATH} +export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.10.2/lib64:$LD_LIBRARY_PATH +export PATH=${SW_DIR}/adios2-2.10.2/bin:${PATH} # compiler environment hints export CC=$(which gcc) diff --git a/Tools/machines/pitzer-osc/pitzer_v100_warpx.profile.example b/Tools/machines/pitzer-osc/pitzer_v100_warpx.profile.example index 061794f5f68..06c117160bf 100644 --- a/Tools/machines/pitzer-osc/pitzer_v100_warpx.profile.example +++ b/Tools/machines/pitzer-osc/pitzer_v100_warpx.profile.example @@ -47,10 +47,10 @@ export LD_LIBRARY_PATH=${SW_DIR}/boost-1.82.0/lib:$LD_LIBRARY_PATH module load hdf5/1.12.0 export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-1.21.6:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.10.1:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.10.2:$CMAKE_PREFIX_PATH export LD_LIBRARY_PATH=${SW_DIR}/c-blosc-1.21.6/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.10.1/lib64:$LD_LIBRARY_PATH -export PATH=${SW_DIR}/adios2-2.10.1/bin:${PATH} +export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.10.2/lib64:$LD_LIBRARY_PATH +export PATH=${SW_DIR}/adios2-2.10.2/bin:${PATH} # avoid relocation truncation error which result from large executable size export CUDAFLAGS="--host-linker-script=use-lcs" # https://github.com/ECP-WarpX/WarpX/pull/3673 diff --git a/Tools/machines/polaris-alcf/install_gpu_dependencies.sh b/Tools/machines/polaris-alcf/install_gpu_dependencies.sh index 18f94e6fd3b..48744741a21 100755 --- a/Tools/machines/polaris-alcf/install_gpu_dependencies.sh +++ b/Tools/machines/polaris-alcf/install_gpu_dependencies.sh @@ -50,13 +50,13 @@ if [ -d $HOME/src/adios2 ] then cd $HOME/src/adios2 git fetch --prune - git checkout v2.8.3 + git checkout v2.10.2 cd - else - git clone -b v2.8.3 https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 + git clone -b v2.10.2 https://github.com/ornladios/ADIOS2.git $HOME/src/adios2 fi rm -rf $HOME/src/adios2-pm-gpu-build -cmake -S $HOME/src/adios2 -B $HOME/src/adios2-pm-gpu-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.8.3 +cmake -S $HOME/src/adios2 -B $HOME/src/adios2-pm-gpu-build -DADIOS2_USE_Blosc=ON -DADIOS2_USE_Fortran=OFF -DADIOS2_USE_Python=OFF -DADIOS2_USE_ZeroMQ=OFF -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.2 cmake --build $HOME/src/adios2-pm-gpu-build --target install -j 16 rm -rf $HOME/src/adios2-pm-gpu-build diff --git a/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example b/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example index e1bd4e0fdd3..5cdda701bc7 100644 --- a/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example +++ b/Tools/machines/polaris-alcf/polaris_gpu_warpx.profile.example @@ -22,16 +22,16 @@ module load boost # optional: for openPMD and PSATD+RZ support module load hdf5/1.14.3 export CMAKE_PREFIX_PATH=/home/${USER}/sw/polaris/gpu/c-blosc-1.21.1:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=/home/${USER}/sw/polaris/gpu/adios2-2.8.3:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=/home/${USER}/sw/polaris/gpu/adios2-2.10.2:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=/home/${USER}/sw/polaris/gpu/blaspp-2024.05.31:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=/home/${USER}/sw/polaris/gpu/lapackpp-2024.05.31:$CMAKE_PREFIX_PATH export LD_LIBRARY_PATH=/home/${USER}/sw/polaris/gpu/c-blosc-1.21.1/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=/home/${USER}/sw/polaris/gpu/adios2-2.8.3/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=/home/${USER}/sw/polaris/gpu/adios2-2.10.2/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=/home/${USER}/sw/polaris/gpu/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=/home/${USER}/sw/polaris/gpu/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH -export PATH=/home/${USER}/sw/polaris/gpu/adios2-2.8.3/bin:${PATH} +export PATH=/home/${USER}/sw/polaris/gpu/adios2-2.10.2/bin:${PATH} # optional: for Python bindings or libEnsemble module load python/3.10.9 diff --git a/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh b/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh index 95633549698..d3ade42fca9 100644 --- a/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh +++ b/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh @@ -73,10 +73,10 @@ if [ -d ${SRC_DIR}/adios2 ] then cd ${SRC_DIR}/adios2 git fetch --prune - git checkout v2.10.1 + git checkout v2.10.2 cd - else - git clone -b v2.10.1 https://github.com/ornladios/ADIOS2.git ${SRC_DIR}/adios2 + git clone -b v2.10.2 https://github.com/ornladios/ADIOS2.git ${SRC_DIR}/adios2 fi cmake \ --fresh \ @@ -87,7 +87,7 @@ cmake \ -DADIOS2_USE_Fortran=OFF \ -DADIOS2_USE_Python=OFF \ -DADIOS2_USE_ZeroMQ=OFF \ - -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.1 + -DCMAKE_INSTALL_PREFIX=${SW_DIR}/adios2-2.10.2 cmake \ --build ${build_dir}/adios2-build \ --target install \ diff --git a/Tools/machines/tioga-llnl/tioga_mi300a_warpx.profile.example b/Tools/machines/tioga-llnl/tioga_mi300a_warpx.profile.example index 53fe21844c1..db6aa2e5b8c 100644 --- a/Tools/machines/tioga-llnl/tioga_mi300a_warpx.profile.example +++ b/Tools/machines/tioga-llnl/tioga_mi300a_warpx.profile.example @@ -28,16 +28,16 @@ module load ninja/1.10.2 SW_DIR="/p/lustre1/${USER}/tioga/warpx/mi300a" module load cray-hdf5-parallel/1.12.2.11 export CMAKE_PREFIX_PATH=${SW_DIR}/c-blosc-2.15.1:$CMAKE_PREFIX_PATH -export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.10.1:$CMAKE_PREFIX_PATH +export CMAKE_PREFIX_PATH=${SW_DIR}/adios2-2.10.2:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${SW_DIR}/blaspp-2024.05.31:$CMAKE_PREFIX_PATH export CMAKE_PREFIX_PATH=${SW_DIR}/lapackpp-2024.05.31:$CMAKE_PREFIX_PATH export LD_LIBRARY_PATH=${SW_DIR}/c-blosc-2.15.1/lib64:$LD_LIBRARY_PATH -export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.10.1/lib64:$LD_LIBRARY_PATH +export LD_LIBRARY_PATH=${SW_DIR}/adios2-2.10.2/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${SW_DIR}/blaspp-2024.05.31/lib64:$LD_LIBRARY_PATH export LD_LIBRARY_PATH=${SW_DIR}/lapackpp-2024.05.31/lib64:$LD_LIBRARY_PATH -export PATH=${SW_DIR}/adios2-2.10.1/bin:${PATH} +export PATH=${SW_DIR}/adios2-2.10.2/bin:${PATH} # python module load cray-python/3.11.7 From 8bdbd69fd5d79c43e00ad86d1222c6a0cc291730 Mon Sep 17 00:00:00 2001 From: "G. RD" <48356331+grobertdautun@users.noreply.github.com> Date: Wed, 26 Feb 2025 23:42:19 +0100 Subject: [PATCH 259/278] Ionization.H : remove duplicate call to `m_get_externalEB` (#5710) While working on another PR I uncovered what I think to be a bug where `m_get_externalEB` is called two times. This PR fixes this bug --------- Co-authored-by: Remi Lehe --- Source/Particles/ElementaryProcess/Ionization.H | 1 - 1 file changed, 1 deletion(-) diff --git a/Source/Particles/ElementaryProcess/Ionization.H b/Source/Particles/ElementaryProcess/Ionization.H index 6f98c18959a..92039bdcd61 100644 --- a/Source/Particles/ElementaryProcess/Ionization.H +++ b/Source/Particles/ElementaryProcess/Ionization.H @@ -119,7 +119,6 @@ struct IonizationFilterFunc m_ex_type, m_ey_type, m_ez_type, m_bx_type, m_by_type, m_bz_type, m_dinv, m_xyzmin, m_lo, m_n_rz_azimuthal_modes, m_nox, m_galerkin_interpolation); - m_get_externalEB(i, ex, ey, ez, bx, by, bz); // Compute electric field amplitude in the particle's frame of // reference (particularly important when in boosted frame). From cfd9d1d7e04fe291161106910d6077d9a32222b8 Mon Sep 17 00:00:00 2001 From: Remi Lehe Date: Wed, 26 Feb 2025 19:00:57 -0800 Subject: [PATCH 260/278] Add Python function to extract particles scraped during the last step. (#5711) This adds a function that extracts the particles that were scraped at the current timestep. This is useful in callback functions, where we often want to re-inject particles that hit the boundary, and therefore need to select the ones that were scraped at the current timestep. This also avoids calling `clear_buffer`, which potentially interferes with the `BoundaryScrapingDiagnostic` --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- ..._rz_particle_boundary_interaction_picmi.py | 28 ++++++------ ...ts_test_rz_secondary_ion_emission_picmi.py | 30 +++++++------ Python/pywarpx/particle_containers.py | 45 +++++++++++++++++++ 3 files changed, 76 insertions(+), 27 deletions(-) diff --git a/Examples/Tests/particle_boundary_interaction/inputs_test_rz_particle_boundary_interaction_picmi.py b/Examples/Tests/particle_boundary_interaction/inputs_test_rz_particle_boundary_interaction_picmi.py index 4b491ac6873..44eef4df0b9 100644 --- a/Examples/Tests/particle_boundary_interaction/inputs_test_rz_particle_boundary_interaction_picmi.py +++ b/Examples/Tests/particle_boundary_interaction/inputs_test_rz_particle_boundary_interaction_picmi.py @@ -128,20 +128,24 @@ def mirror_reflection(): # STEP 1: extract the different parameters of the boundary buffer (normal, time, position) lev = 0 # level 0 (no mesh refinement here) delta_t = concat( - buffer.get_particle_boundary_buffer("electrons", "eb", "deltaTimeScraped", lev) + buffer.get_particle_scraped_this_step( + "electrons", "eb", "deltaTimeScraped", lev + ) ) - r = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "x", lev)) - theta = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "theta", lev)) - z = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "z", lev)) + r = concat(buffer.get_particle_scraped_this_step("electrons", "eb", "x", lev)) + theta = concat( + buffer.get_particle_scraped_this_step("electrons", "eb", "theta", lev) + ) + z = concat(buffer.get_particle_scraped_this_step("electrons", "eb", "z", lev)) x = r * np.cos(theta) # from RZ coordinates to 3D coordinates y = r * np.sin(theta) - ux = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "ux", lev)) - uy = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "uy", lev)) - uz = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "uz", lev)) - w = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "w", lev)) - nx = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "nx", lev)) - ny = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "ny", lev)) - nz = concat(buffer.get_particle_boundary_buffer("electrons", "eb", "nz", lev)) + ux = concat(buffer.get_particle_scraped_this_step("electrons", "eb", "ux", lev)) + uy = concat(buffer.get_particle_scraped_this_step("electrons", "eb", "uy", lev)) + uz = concat(buffer.get_particle_scraped_this_step("electrons", "eb", "uz", lev)) + w = concat(buffer.get_particle_scraped_this_step("electrons", "eb", "w", lev)) + nx = concat(buffer.get_particle_scraped_this_step("electrons", "eb", "nx", lev)) + ny = concat(buffer.get_particle_scraped_this_step("electrons", "eb", "ny", lev)) + nz = concat(buffer.get_particle_scraped_this_step("electrons", "eb", "nz", lev)) # STEP 2: use these parameters to inject particle from the same position in the plasma elect_pc = particle_containers.ParticleContainerWrapper( @@ -164,8 +168,6 @@ def mirror_reflection(): ) # adds the particle in the general particle container at the next step #### Can be modified depending on the model of interaction. - buffer.clear_buffer() # reinitialise the boundary buffer - callbacks.installafterstep( mirror_reflection diff --git a/Examples/Tests/secondary_ion_emission/inputs_test_rz_secondary_ion_emission_picmi.py b/Examples/Tests/secondary_ion_emission/inputs_test_rz_secondary_ion_emission_picmi.py index 5b6248da33c..97ad6e09628 100644 --- a/Examples/Tests/secondary_ion_emission/inputs_test_rz_secondary_ion_emission_picmi.py +++ b/Examples/Tests/secondary_ion_emission/inputs_test_rz_secondary_ion_emission_picmi.py @@ -188,26 +188,29 @@ def secondary_emission(): elect_pc = particle_containers.ParticleContainerWrapper("electrons") if n != 0: - r = concat(buffer.get_particle_boundary_buffer("ions", "eb", "x", lev)) - theta = concat(buffer.get_particle_boundary_buffer("ions", "eb", "theta", lev)) - z = concat(buffer.get_particle_boundary_buffer("ions", "eb", "z", lev)) + r = concat(buffer.get_particle_scraped_this_step("ions", "eb", "x", lev)) + theta = concat( + buffer.get_particle_scraped_this_step("ions", "eb", "theta", lev) + ) + z = concat(buffer.get_particle_scraped_this_step("ions", "eb", "z", lev)) x = r * np.cos(theta) # from RZ coordinates to 3D coordinates y = r * np.sin(theta) - ux = concat(buffer.get_particle_boundary_buffer("ions", "eb", "ux", lev)) - uy = concat(buffer.get_particle_boundary_buffer("ions", "eb", "uy", lev)) - uz = concat(buffer.get_particle_boundary_buffer("ions", "eb", "uz", lev)) - w = concat(buffer.get_particle_boundary_buffer("ions", "eb", "w", lev)) - nx = concat(buffer.get_particle_boundary_buffer("ions", "eb", "nx", lev)) - ny = concat(buffer.get_particle_boundary_buffer("ions", "eb", "ny", lev)) - nz = concat(buffer.get_particle_boundary_buffer("ions", "eb", "nz", lev)) + ux = concat(buffer.get_particle_scraped_this_step("ions", "eb", "ux", lev)) + uy = concat(buffer.get_particle_scraped_this_step("ions", "eb", "uy", lev)) + uz = concat(buffer.get_particle_scraped_this_step("ions", "eb", "uz", lev)) + w = concat(buffer.get_particle_scraped_this_step("ions", "eb", "w", lev)) + nx = concat(buffer.get_particle_scraped_this_step("ions", "eb", "nx", lev)) + ny = concat(buffer.get_particle_scraped_this_step("ions", "eb", "ny", lev)) + nz = concat(buffer.get_particle_scraped_this_step("ions", "eb", "nz", lev)) delta_t = concat( - buffer.get_particle_boundary_buffer("ions", "eb", "deltaTimeScraped", lev) + buffer.get_particle_scraped_this_step("ions", "eb", "deltaTimeScraped", lev) ) + energy_ions = 0.5 * proton_mass * w * (ux**2 + uy**2 + uz**2) energy_ions_in_kEv = energy_ions / (e * 1000) sigma_nascap_ions = sigma_nascap(energy_ions_in_kEv, delta_H, E_HMax) - # Loop over all ions in the EB buffer - for i in range(0, n): + # Loop over all ions that have been scraped in the last timestep + for i in range(0, len(w)): sigma = sigma_nascap_ions[i] # Ne_sec is number of the secondary electrons to be emitted Ne_sec = int(sigma + np.random.uniform()) @@ -258,7 +261,6 @@ def secondary_emission(): uz=uze, w=we, ) - buffer.clear_buffer() # reinitialise the boundary buffer # using the new particle container modified at the last step diff --git a/Python/pywarpx/particle_containers.py b/Python/pywarpx/particle_containers.py index a66fd131aed..0488d262fc8 100644 --- a/Python/pywarpx/particle_containers.py +++ b/Python/pywarpx/particle_containers.py @@ -863,6 +863,51 @@ def get_particle_boundary_buffer(self, species_name, boundary, comp_name, level) raise RuntimeError("Name %s not found" % comp_name) return data_array + def get_particle_scraped_this_step(self, species_name, boundary, comp_name, level): + """ + This returns a list of numpy or cupy arrays containing the particle array data + for particles that have been scraped at the current timestep, + for a specific species and simulation boundary. + + The data for the arrays is a view of the underlying boundary buffer in WarpX ; + writing to these arrays will therefore also modify the underlying boundary buffer. + + Parameters + ---------- + + species_name : str + The species name that the data will be returned for. + + boundary : str + The boundary from which to get the scraped particle data in the + form x/y/z_hi/lo or eb. + + comp_name : str + The component of the array data that will be returned. + "x", "y", "z", "ux", "uy", "uz", "w" + "stepScraped","deltaTimeScraped", + if boundary='eb': "nx", "ny", "nz" + + level : int + Which AMR level to retrieve scraped particle data from. + """ + # Extract the integer number of the current timestep + current_step = libwarpx.libwarpx_so.get_instance().getistep(level) + + # Extract the data requested by the user + data_array = self.get_particle_boundary_buffer( + species_name, boundary, comp_name, level + ) + step_scraped_array = self.get_particle_boundary_buffer( + species_name, boundary, "stepScraped", level + ) + + # Select on the particles from the previous step + data_array_this_step = [] + for data, step in zip(data_array, step_scraped_array): + data_array_this_step.append(data[step == current_step]) + return data_array_this_step + def clear_buffer(self): """ From 2386c18dc14aff56ab17a49d05692a2eb954e659 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 28 Feb 2025 16:40:23 +0100 Subject: [PATCH 261/278] Move linear interpolation functions to ablastr (#5714) Pure functions like `linear_interp`, `bilinear_interp`, and `trilinear_interp` are very general. Therefore, we can consider to move them to `ablastr::math`. Besides, I will need some of these functions to move `picsar_qed` inside `ablastr` (https://github.com/ECP-WarpX/WarpX/pull/5677) --- Source/Initialization/WarpXInitData.cpp | 6 +- .../LaserProfileFromFile.cpp | 58 ++++++++++--------- .../math}/LinearInterpolation.H | 19 +++--- 3 files changed, 44 insertions(+), 39 deletions(-) rename Source/{Utils/Algorithms => ablastr/math}/LinearInterpolation.H (79%) diff --git a/Source/Initialization/WarpXInitData.cpp b/Source/Initialization/WarpXInitData.cpp index c70188f07bc..3089fd7304c 100644 --- a/Source/Initialization/WarpXInitData.cpp +++ b/Source/Initialization/WarpXInitData.cpp @@ -29,7 +29,6 @@ #include "Initialization/ExternalField.H" #include "Initialization/DivCleaner/ProjectionDivCleaner.H" #include "Particles/MultiParticleContainer.H" -#include "Utils/Algorithms/LinearInterpolation.H" #include "Utils/Logo/GetLogo.H" #include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" @@ -40,6 +39,7 @@ #include "Python/callbacks.H" #include +#include #include #include #include @@ -1698,7 +1698,7 @@ WarpX::ReadExternalFieldFromFile ( f01 = fc_array(0, iz , ir+1), f10 = fc_array(0, iz+1, ir ), f11 = fc_array(0, iz+1, ir+1); - mffab(i,j,k) = static_cast(utils::algorithms::bilinear_interp + mffab(i,j,k) = static_cast(ablastr::math::bilinear_interp (xx0, xx0+file_dr, xx1, xx1+file_dz, f00, f01, f10, f11, x0, x1)); @@ -1713,7 +1713,7 @@ WarpX::ReadExternalFieldFromFile ( f101 = fc_array(iz+1, iy , ix+1), f110 = fc_array(iz , iy+1, ix+1), f111 = fc_array(iz+1, iy+1, ix+1); - mffab(i,j,k) = static_cast(utils::algorithms::trilinear_interp + mffab(i,j,k) = static_cast(ablastr::math::trilinear_interp (xx0, xx0+file_dx, xx1, xx1+file_dy, xx2, xx2+file_dz, f000, f001, f010, f011, f100, f101, f110, f111, x0, x1, x2)); diff --git a/Source/Laser/LaserProfilesImpl/LaserProfileFromFile.cpp b/Source/Laser/LaserProfilesImpl/LaserProfileFromFile.cpp index 934a537be5a..0ce5e71337f 100644 --- a/Source/Laser/LaserProfilesImpl/LaserProfileFromFile.cpp +++ b/Source/Laser/LaserProfilesImpl/LaserProfileFromFile.cpp @@ -6,12 +6,12 @@ */ #include "Laser/LaserProfiles.H" -#include "Utils/Algorithms/LinearInterpolation.H" #include "Utils/Parser/ParserUtils.H" #include "Utils/TextMsg.H" #include "Utils/WarpX_Complex.H" #include "Utils/WarpXConst.H" +#include #include #include @@ -489,7 +489,7 @@ WarpXLaserProfiles::FromFileLaserProfile::internal_fill_amplitude_uniform_cartes (i_interp-tmp_idx_first_time)*tmp_nx*tmp_ny+ j_interp*tmp_nx + k_interp; }; - const Complex val = utils::algorithms::trilinear_interp( + const Complex val = ablastr::math::trilinear_interp( t_left, t_right, x_0, x_1, y_0, y_1, @@ -574,33 +574,35 @@ WarpXLaserProfiles::FromFileLaserProfile::internal_fill_amplitude_uniform_cylind Complex fact = Complex{costheta, sintheta}; // azimuthal mode 0 - val += utils::algorithms::bilinear_interp( - t_left, t_right, - r_0, r_1, - p_E_lasy_data[idx(0, idx_t_left, idx_r_left)], - p_E_lasy_data[idx(0, idx_t_left, idx_r_right)], - p_E_lasy_data[idx(0, idx_t_right, idx_r_left)], - p_E_lasy_data[idx(0, idx_t_right, idx_r_right)], - t, Rp_i); + val += + ablastr::math::bilinear_interp( + t_left, t_right, + r_0, r_1, + p_E_lasy_data[idx(0, idx_t_left, idx_r_left)], + p_E_lasy_data[idx(0, idx_t_left, idx_r_right)], + p_E_lasy_data[idx(0, idx_t_right, idx_r_left)], + p_E_lasy_data[idx(0, idx_t_right, idx_r_right)], + t, Rp_i); // higher modes for (int m=1 ; m <= tmp_n_rz_azimuthal_components/2; m++) { - val += utils::algorithms::bilinear_interp( - t_left, t_right, - r_0, r_1, - p_E_lasy_data[idx(2*m-1, idx_t_left, idx_r_left)], - p_E_lasy_data[idx(2*m-1, idx_t_left, idx_r_right)], - p_E_lasy_data[idx(2*m-1, idx_t_right, idx_r_left)], - p_E_lasy_data[idx(2*m-1, idx_t_right, idx_r_right)], - t, Rp_i)*(fact.real()) + - utils::algorithms::bilinear_interp( - t_left, t_right, - r_0, r_1, - p_E_lasy_data[idx(2*m, idx_t_left, idx_r_left)], - p_E_lasy_data[idx(2*m, idx_t_left, idx_r_right)], - p_E_lasy_data[idx(2*m, idx_t_right, idx_r_left)], - p_E_lasy_data[idx(2*m, idx_t_right, idx_r_right)], - t, Rp_i)*(fact.imag()) ; + val += + ablastr::math::bilinear_interp( + t_left, t_right, + r_0, r_1, + p_E_lasy_data[idx(2*m-1, idx_t_left, idx_r_left)], + p_E_lasy_data[idx(2*m-1, idx_t_left, idx_r_right)], + p_E_lasy_data[idx(2*m-1, idx_t_right, idx_r_left)], + p_E_lasy_data[idx(2*m-1, idx_t_right, idx_r_right)], + t, Rp_i)*(fact.real()) + + ablastr::math::bilinear_interp( + t_left, t_right, + r_0, r_1, + p_E_lasy_data[idx(2*m, idx_t_left, idx_r_left)], + p_E_lasy_data[idx(2*m, idx_t_left, idx_r_right)], + p_E_lasy_data[idx(2*m, idx_t_right, idx_r_left)], + p_E_lasy_data[idx(2*m, idx_t_right, idx_r_right)], + t, Rp_i)*(fact.imag()) ; fact = fact*Complex{costheta, sintheta}; } amplitude[i] = (val*exp_omega_t).real(); @@ -683,7 +685,7 @@ WarpXLaserProfiles::FromFileLaserProfile::internal_fill_amplitude_uniform_binary (i_interp-tmp_idx_first_time)*tmp_nx*tmp_ny+ j_interp*tmp_ny + k_interp; }; - amplitude[i] = utils::algorithms::trilinear_interp( + amplitude[i] = ablastr::math::trilinear_interp( t_left, t_right, x_0, x_1, y_0, y_1, @@ -702,7 +704,7 @@ WarpXLaserProfiles::FromFileLaserProfile::internal_fill_amplitude_uniform_binary const auto idx = [=](int i_interp, int j_interp){ return (i_interp-tmp_idx_first_time) * tmp_nx + j_interp; }; - amplitude[i] = utils::algorithms::bilinear_interp( + amplitude[i] = ablastr::math::bilinear_interp( t_left, t_right, x_0, x_1, p_E_binary_data[idx(idx_t_left, idx_x_left)], diff --git a/Source/Utils/Algorithms/LinearInterpolation.H b/Source/ablastr/math/LinearInterpolation.H similarity index 79% rename from Source/Utils/Algorithms/LinearInterpolation.H rename to Source/ablastr/math/LinearInterpolation.H index 716c2d829b6..502ef68c080 100644 --- a/Source/Utils/Algorithms/LinearInterpolation.H +++ b/Source/ablastr/math/LinearInterpolation.H @@ -1,24 +1,25 @@ -/* Copyright 2022 Luca Fedeli +/* Copyright 2022-2025 Luca Fedeli * * This file is part of WarpX. * * License: BSD-3-Clause-LBNL */ -#ifndef WARPX_UTILS_ALGORITHMS_LINEAR_INTERPOLATION_H_ -#define WARPX_UTILS_ALGORITHMS_LINEAR_INTERPOLATION_H_ +#ifndef ABLASTR_MATH_LINEAR_INTERPOLATION_H_ +#define ABLASTR_MATH_LINEAR_INTERPOLATION_H_ #include #include -namespace utils::algorithms +namespace ablastr::math { /** \brief Performs a linear interpolation * * Performs a linear interpolation at x given the 2 points * (x0, f0) and (x1, f1) */ - template AMREX_GPU_DEVICE AMREX_FORCE_INLINE + template + AMREX_GPU_DEVICE AMREX_FORCE_INLINE constexpr auto linear_interp( TCoord x0, TCoord x1, TVal f0, TVal f1, @@ -32,7 +33,8 @@ namespace utils::algorithms * Performs a bilinear interpolation at (x,y) given the 4 points * (x0, y0, f00), (x0, y1, f01), (x1, y0, f10), (x1, y1, f11). */ - template AMREX_GPU_DEVICE AMREX_FORCE_INLINE + template + AMREX_GPU_DEVICE AMREX_FORCE_INLINE constexpr auto bilinear_interp( TCoord x0, TCoord x1, TCoord y0, TCoord y1, TVal f00, TVal f01, TVal f10, TVal f11, @@ -49,7 +51,8 @@ namespace utils::algorithms * (x0, y0, z0, f000), (x0, y0, z1, f001), (x0, y1, z0, f010), (x0, y1, z1, f011), * (x1, y0, z0, f100), (x1, y0, z1, f101), (x1, y1, z0, f110), (x1, y1, z1, f111) */ - template AMREX_GPU_DEVICE AMREX_FORCE_INLINE + template + AMREX_GPU_DEVICE AMREX_FORCE_INLINE constexpr auto trilinear_interp( TCoord x0, TCoord x1, TCoord y0, TCoord y1, TCoord z0, TCoord z1, TVal f000, TVal f001, TVal f010, TVal f011, TVal f100, TVal f101, TVal f110, TVal f111, @@ -63,4 +66,4 @@ namespace utils::algorithms } } -#endif //WARPX_UTILS_ALGORITHMS_LINEAR_INTERPOLATION_H_ +#endif //ABLASTR_MATH_LINEAR_INTERPOLATION_H_ From 716d7cbc4e9d6ddcb0cea247e40aecacf861b00b Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Fri, 28 Feb 2025 14:19:40 -0800 Subject: [PATCH 262/278] [pre-commit.ci] pre-commit autoupdate (#5701) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.6 → v0.9.7](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.6...v0.9.7) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index e113fa4c8e5..c3d87264820 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.6 + rev: v0.9.7 hooks: # Run the linter - id: ruff From ba2c3c78d13a11e5ff487e5a0a9da41e4d60e484 Mon Sep 17 00:00:00 2001 From: Andrew Myers Date: Fri, 28 Feb 2025 16:04:44 -0800 Subject: [PATCH 263/278] CI: Upgrade HIP Workflows, Fix New Bug (#5707) - [x] Update HIP CI workflow to run on Ubuntu 24.04 - [x] Add HIP version to CLI, default 6.3.2 - [x] Fix bug caused by using `std::optional` on device --------- Co-authored-by: Edoardo Zoni Co-authored-by: David Grote --- .github/workflows/dependencies/hip.sh | 25 ++++++++++--------- .github/workflows/hip.yml | 18 +++---------- .../AcceleratorLattice/LatticeElementFinder.H | 3 +++ .../LatticeElementFinder.cpp | 2 ++ Source/Particles/Gather/GetExternalFields.H | 13 ++++------ 5 files changed, 27 insertions(+), 34 deletions(-) diff --git a/.github/workflows/dependencies/hip.sh b/.github/workflows/dependencies/hip.sh index bf15c2f7101..e07349ce63f 100755 --- a/.github/workflows/dependencies/hip.sh +++ b/.github/workflows/dependencies/hip.sh @@ -28,7 +28,9 @@ sudo apt-key add rocm.gpg.key source /etc/os-release # set UBUNTU_CODENAME: focal or jammy or ... -echo "deb [arch=amd64] https://repo.radeon.com/rocm/apt/${1-latest} ${UBUNTU_CODENAME} main" \ +VERSION=${1-6.3.2} + +echo "deb [arch=amd64] https://repo.radeon.com/rocm/apt/${VERSION} ${UBUNTU_CODENAME} main" \ | sudo tee /etc/apt/sources.list.d/rocm.list echo 'export PATH=/opt/rocm/llvm/bin:/opt/rocm/bin:/opt/rocm/profiler/bin:/opt/rocm/opencl/bin:$PATH' \ | sudo tee -a /etc/profile.d/rocm.sh @@ -50,12 +52,16 @@ sudo apt-get install -y --no-install-recommends \ libzstd-dev \ ninja-build \ openmpi-bin \ - rocm-dev \ - rocfft-dev \ - rocprim-dev \ - rocsparse-dev \ - rocrand-dev \ - hiprand-dev + rocm-dev${VERSION} \ + roctracer-dev${VERSION} \ + rocprofiler-dev${VERSION} \ + rocrand-dev${VERSION} \ + rocfft-dev${VERSION} \ + rocprim-dev${VERSION} \ + rocsparse-dev${VERSION} + +# hiprand-dev is a new package that does not exist in old versions +sudo apt-get install -y --no-install-recommends hiprand-dev${VERSION} || true # ccache $(dirname "$0")/ccache.sh @@ -69,11 +75,6 @@ which clang++ export CXX=$(which clang++) export CC=$(which clang) -# "mpic++ --showme" forgets open-pal in Ubuntu 20.04 + OpenMPI 4.0.3 -# https://bugs.launchpad.net/ubuntu/+source/openmpi/+bug/1941786 -# https://github.com/open-mpi/ompi/issues/9317 -export LDFLAGS="-lopen-pal" - # cmake-easyinstall # sudo curl -L -o /usr/local/bin/cmake-easyinstall https://raw.githubusercontent.com/ax3l/cmake-easyinstall/main/cmake-easyinstall diff --git a/.github/workflows/hip.yml b/.github/workflows/hip.yml index f61c8fe1313..cf679f67ca7 100644 --- a/.github/workflows/hip.yml +++ b/.github/workflows/hip.yml @@ -16,7 +16,7 @@ concurrency: jobs: build_hip_3d_sp: name: HIP 3D SP - runs-on: ubuntu-20.04 + runs-on: ubuntu-24.04 env: CXXFLAGS: "-Werror -Wno-deprecated-declarations -Wno-error=pass-failed" CMAKE_GENERATOR: Ninja @@ -25,7 +25,7 @@ jobs: - uses: actions/checkout@v4 - name: install dependencies shell: bash - run: .github/workflows/dependencies/hip.sh + run: .github/workflows/dependencies/hip.sh 6.3.2 - name: CCache Cache uses: actions/cache@v4 with: @@ -48,11 +48,6 @@ jobs: export CXX=$(which clang++) export CC=$(which clang) - # "mpic++ --showme" forgets open-pal in Ubuntu 20.04 + OpenMPI 4.0.3 - # https://bugs.launchpad.net/ubuntu/+source/openmpi/+bug/1941786 - # https://github.com/open-mpi/ompi/issues/9317 - export LDFLAGS="-lopen-pal" - cmake -S . -B build_sp \ -DCMAKE_VERBOSE_MAKEFILE=ON \ -DAMReX_AMD_ARCH=gfx900 \ @@ -75,7 +70,7 @@ jobs: build_hip_2d_dp: name: HIP 2D DP - runs-on: ubuntu-20.04 + runs-on: ubuntu-24.04 env: CXXFLAGS: "-Werror -Wno-deprecated-declarations -Wno-error=pass-failed" CMAKE_GENERATOR: Ninja @@ -84,7 +79,7 @@ jobs: - uses: actions/checkout@v4 - name: install dependencies shell: bash - run: .github/workflows/dependencies/hip.sh + run: .github/workflows/dependencies/hip.sh 6.3.2 - name: CCache Cache uses: actions/cache@v4 with: @@ -107,11 +102,6 @@ jobs: export CXX=$(which clang++) export CC=$(which clang) - # "mpic++ --showme" forgets open-pal in Ubuntu 20.04 + OpenMPI 4.0.3 - # https://bugs.launchpad.net/ubuntu/+source/openmpi/+bug/1941786 - # https://github.com/open-mpi/ompi/issues/9317 - export LDFLAGS="-lopen-pal" - cmake -S . -B build_2d \ -DCMAKE_VERBOSE_MAKEFILE=ON \ -DAMReX_AMD_ARCH=gfx900 \ diff --git a/Source/AcceleratorLattice/LatticeElementFinder.H b/Source/AcceleratorLattice/LatticeElementFinder.H index f7eb5c66531..68a90906dbc 100644 --- a/Source/AcceleratorLattice/LatticeElementFinder.H +++ b/Source/AcceleratorLattice/LatticeElementFinder.H @@ -114,6 +114,9 @@ struct LatticeElementFinderDevice AcceleratorLattice const& accelerator_lattice, LatticeElementFinder const & h_finder); + /* Whether the class has been initialized */ + bool m_initialized = false; + /* Size and location of the index lookup table */ amrex::Real m_zmin; amrex::Real m_dz; diff --git a/Source/AcceleratorLattice/LatticeElementFinder.cpp b/Source/AcceleratorLattice/LatticeElementFinder.cpp index 64e593aee30..93aa70e4242 100644 --- a/Source/AcceleratorLattice/LatticeElementFinder.cpp +++ b/Source/AcceleratorLattice/LatticeElementFinder.cpp @@ -92,6 +92,8 @@ LatticeElementFinderDevice::InitLatticeElementFinderDevice (WarpXParIter const& LatticeElementFinder const & h_finder) { + m_initialized = true; + auto& warpx = WarpX::GetInstance(); int const lev = a_pti.GetLevel(); diff --git a/Source/Particles/Gather/GetExternalFields.H b/Source/Particles/Gather/GetExternalFields.H index 90a61bd25db..c3c8f992b07 100644 --- a/Source/Particles/Gather/GetExternalFields.H +++ b/Source/Particles/Gather/GetExternalFields.H @@ -15,8 +15,6 @@ #include #include -#include - /** \brief Functor class that assigns external * field values (E and B) to particles. @@ -56,10 +54,10 @@ struct GetExternalEBField const amrex::ParticleReal* AMREX_RESTRICT m_uy = nullptr; const amrex::ParticleReal* AMREX_RESTRICT m_uz = nullptr; - std::optional d_lattice_element_finder; + LatticeElementFinderDevice d_lattice_element_finder; [[nodiscard]] AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE - bool isNoOp () const { return (m_Etype == None && m_Btype == None && !d_lattice_element_finder.has_value()); } + bool isNoOp () const { return (m_Etype == None && m_Btype == None && !d_lattice_element_finder.m_initialized); } AMREX_GPU_HOST_DEVICE AMREX_FORCE_INLINE void operator () (long i, @@ -72,10 +70,9 @@ struct GetExternalEBField { using namespace amrex::literals; - if (d_lattice_element_finder) { - // Note that the "*" is needed since d_lattice_element_finder is optional - (*d_lattice_element_finder)(i, field_Ex, field_Ey, field_Ez, - field_Bx, field_By, field_Bz); + if (d_lattice_element_finder.m_initialized) { + d_lattice_element_finder(i, field_Ex, field_Ey, field_Ez, + field_Bx, field_By, field_Bz); } if (m_Etype == None && m_Btype == None) { return; } From 66fe1154a952089b28af693c10bce92694bf8dfb Mon Sep 17 00:00:00 2001 From: Junmin Gu Date: Mon, 3 Mar 2025 13:11:20 -0800 Subject: [PATCH 264/278] Added support to use adios2's flatten_step (#5634) To enable it, put it in the openPMD option through input file: e.g. ``` diag1.openpmd_backend = bp5 diag1.adios2_engine.parameters.FlattenSteps = on ``` This feature is useful for BTD use case. Data can be flushed after each buffered writes of a snapshot To check weather this feature is in use, try "bpls -V your_bp_file_name" Also adds a fix as in https://github.com/openPMD/openPMD-api/issues/1655 for BP5 with file-based encoding, i.e., when some ranks have no particles. --------- Co-authored-by: Junmin Gu Co-authored-by: Luca Fedeli Co-authored-by: Junmin Gu Co-authored-by: Axel Huebl --- Docs/source/usage/parameters.rst | 25 +++++++++++------- Source/Diagnostics/WarpXOpenPMD.H | 13 ++++++++++ Source/Diagnostics/WarpXOpenPMD.cpp | 40 +++++++++++++++++++++++++---- 3 files changed, 64 insertions(+), 14 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 77f99044448..3d24afdfa4d 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2796,7 +2796,7 @@ In-situ capabilities can be used by turning on Sensei or Ascent (provided they a Only read if ``.format = sensei``. When 1 lower left corner of the mesh is pinned to 0.,0.,0. -* ``.openpmd_backend`` (``bp``, ``h5`` or ``json``) optional, only used if ``.format = openpmd`` +* ``.openpmd_backend`` (``bp5``, ``bp4``, ``h5`` or ``json``) optional, only used if ``.format = openpmd`` `I/O backend `_ for `openPMD `_ data dumps. ``bp`` is the `ADIOS I/O library `_, ``h5`` is the `HDF5 format `_, and ``json`` is a `simple text format `_. ``json`` only works with serial/single-rank jobs. @@ -2818,19 +2818,26 @@ In-situ capabilities can be used by turning on Sensei or Ascent (provided they a .. code-block:: text - .adios2_operator.type = blosc - .adios2_operator.parameters.compressor = zstd - .adios2_operator.parameters.clevel = 1 - .adios2_operator.parameters.doshuffle = BLOSC_BITSHUFFLE - .adios2_operator.parameters.threshold = 2048 - .adios2_operator.parameters.nthreads = 6 # per MPI rank (and thus per GPU) + .adios2_operator.type = blosc + .adios2_operator.parameters.compressor = zstd + .adios2_operator.parameters.clevel = 1 + .adios2_operator.parameters.doshuffle = BLOSC_BITSHUFFLE + .adios2_operator.parameters.threshold = 2048 + .adios2_operator.parameters.nthreads = 6 # per MPI rank (and thus per GPU) or for the lossy ZFP compressor using very strong compression per scalar: .. code-block:: text - .adios2_operator.type = zfp - .adios2_operator.parameters.precision = 3 + .adios2_operator.type = zfp + .adios2_operator.parameters.precision = 3 + + For back-transformed diagnostics with ADIOS BP5, we are experimenting with a new option for variable-based encoding that "flattens" the output steps, aiming to increase write and read performance: + + .. code-block:: text + + .openpmd_backend = bp5 + .adios2_engine.parameters.FlattenSteps = on * ``.adios2_engine.type`` (``bp4``, ``sst``, ``ssc``, ``dataman``) optional, `ADIOS2 Engine type `__ for `openPMD `_ data dumps. diff --git a/Source/Diagnostics/WarpXOpenPMD.H b/Source/Diagnostics/WarpXOpenPMD.H index 99d6e0682ab..a25b1057da9 100644 --- a/Source/Diagnostics/WarpXOpenPMD.H +++ b/Source/Diagnostics/WarpXOpenPMD.H @@ -176,6 +176,19 @@ private: } } + /** Flushing out data of the current openPMD iteration + * + * @param[in] isBTD if the current diagnostic is BTD + * + * if isBTD=false, apply the default flush behaviour + * if isBTD=true, advice to use ADIOS Put() instead of PDW for better performance. + * + * iteration.seriesFlush() is used instead of series.flush() + * because the latter flushes only if data is dirty + * this causes trouble when the underlying writing function is collective (like PDW) + * + */ + void flushCurrent (bool isBTD) const; /** This function does initial setup for the fields when interation is newly created * @param[in] meshes The meshes in a series diff --git a/Source/Diagnostics/WarpXOpenPMD.cpp b/Source/Diagnostics/WarpXOpenPMD.cpp index 96e8bb846bb..91b187b05e2 100644 --- a/Source/Diagnostics/WarpXOpenPMD.cpp +++ b/Source/Diagnostics/WarpXOpenPMD.cpp @@ -401,6 +401,24 @@ WarpXOpenPMDPlot::~WarpXOpenPMDPlot () } } +void WarpXOpenPMDPlot::flushCurrent (bool isBTD) const +{ + WARPX_PROFILE("WarpXOpenPMDPlot::flushCurrent"); + + auto hasOption = m_OpenPMDoptions.find("FlattenSteps"); + const bool flattenSteps = isBTD && (m_Series->backend() == "ADIOS2") && (hasOption != std::string::npos); + + openPMD::Iteration currIteration = GetIteration(m_CurrentStep, isBTD); + if (flattenSteps) { + // delayed until all fields and particles are registered for flush + // and dumped once via flattenSteps + currIteration.seriesFlush( "adios2.engine.preferred_flush_target = \"buffer\"" ); + } + else { + currIteration.seriesFlush(); + } +} + std::string WarpXOpenPMDPlot::GetFileName (std::string& filepath) { @@ -531,7 +549,6 @@ WarpXOpenPMDPlot::WriteOpenPMDParticles (const amrex::Vector& part WARPX_PROFILE("WarpXOpenPMDPlot::WriteOpenPMDParticles()"); for (const auto & particle_diag : particle_diags) { - WarpXParticleContainer* pc = particle_diag.getParticleContainer(); PinnedMemoryParticleContainer* pinned_pc = particle_diag.getPinnedParticleContainer(); if (isBTD || use_pinned_pc) { @@ -649,6 +666,17 @@ for (const auto & particle_diag : particle_diags) { pc->getCharge(), pc->getMass(), isBTD, isLastBTDFlush); } + + auto hasOption = m_OpenPMDoptions.find("FlattenSteps"); + const bool flattenSteps = isBTD && (m_Series->backend() == "ADIOS2") && (hasOption != std::string::npos); + + if (flattenSteps) + { + // forcing new step so data from each btd batch in + // preferred_flush_target="buffer" can be flushed out + openPMD::Iteration currIteration = GetIteration(m_CurrentStep, isBTD); + currIteration.seriesFlush(R"(adios2.engine.preferred_flush_target = "new_step")"); + } } void @@ -665,6 +693,7 @@ WarpXOpenPMDPlot::DumpToFile (ParticleContainer* pc, const bool isLastBTDFlush ) { + WARPX_PROFILE("WarpXOpenPMDPlot::DumpToFile()"); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(m_Series != nullptr, "openPMD: series must be initialized"); AMREX_ALWAYS_ASSERT(write_real_comp.size() == pc->NumRealComps()); @@ -723,8 +752,7 @@ WarpXOpenPMDPlot::DumpToFile (ParticleContainer* pc, SetConstParticleRecordsEDPIC(currSpecies, positionComponents, NewParticleVectorSize, charge, mass); } - // open files from all processors, in case some will not contribute below - m_Series->flush(); + flushCurrent(isBTD); // dump individual particles bool contributed_particles = false; // did the local MPI rank contribute particles? @@ -765,6 +793,7 @@ WarpXOpenPMDPlot::DumpToFile (ParticleContainer* pc, // BP4 (ADIOS 2.8): last MPI rank's `Put` meta-data wins // BP5 (ADIOS 2.8): everyone has to write an empty block if (is_resizing_flush && !contributed_particles && isBTD && m_Series->backend() == "ADIOS2") { + WARPX_PROFILE("WarpXOpenPMDPlot::ResizeInADIOS()"); for( auto & [record_name, record] : currSpecies ) { for( auto & [comp_name, comp] : record ) { if (comp.constant()) { continue; } @@ -804,7 +833,7 @@ WarpXOpenPMDPlot::DumpToFile (ParticleContainer* pc, } } - m_Series->flush(); + flushCurrent(isBTD); } void @@ -1476,7 +1505,7 @@ WarpXOpenPMDPlot::WriteOpenPMDFieldsAll ( //const std::string& filename, amrex::Gpu::streamSynchronize(); #endif // Flush data to disk after looping over all components - m_Series->flush(); + flushCurrent(isBTD); } // levels loop (i) } #endif // WARPX_USE_OPENPMD @@ -1490,6 +1519,7 @@ WarpXParticleCounter::WarpXParticleCounter (ParticleContainer* pc): m_MPIRank{amrex::ParallelDescriptor::MyProc()}, m_MPISize{amrex::ParallelDescriptor::NProcs()} { + WARPX_PROFILE("WarpXOpenPMDPlot::ParticleCounter()"); m_ParticleCounterByLevel.resize(pc->finestLevel()+1); m_ParticleOffsetAtRank.resize(pc->finestLevel()+1); m_ParticleSizeAtRank.resize(pc->finestLevel()+1); From a0f983e9cf05215c0715b8ebd0bfa42887ade519 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Mon, 3 Mar 2025 21:59:35 +0000 Subject: [PATCH 265/278] [pre-commit.ci] pre-commit autoupdate (#5725) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit updates: - [github.com/astral-sh/ruff-pre-commit: v0.9.7 → v0.9.9](https://github.com/astral-sh/ruff-pre-commit/compare/v0.9.7...v0.9.9) Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c3d87264820..f3ccdcbc635 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -69,7 +69,7 @@ repos: # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.9.7 + rev: v0.9.9 hooks: # Run the linter - id: ruff From 74f7c1d0e2a5ca48d2dc8a9c05d4a3abea7ae4f1 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Mon, 3 Mar 2025 16:38:22 -0800 Subject: [PATCH 266/278] pre-commit: set up clang-format hook (#5687) This PR introduces a prototype for adding the `clang-format` hook to `pre-commit`. To disable formatting, e.g., on blocks of mathematical formulas, use: ```C++ // clang-format off amrex::Real x = my * custom + pretty * alignment; // clang-format on ``` Currently, the hook is applied only to the `Source/main.cpp` file to demonstrate its functionality. If this approach is deemed useful, we can gradually extend it to all C++ files in our codebase, one PR at a time. _We could make this into a GitHub "project" to easily keep track of the progress._ If not, please feel free to close the PR without merging. The `.clang-format` configuration file has been generated based on the `LLVM` style using the command ```bash clang-format -style=llvm -dump-config > .clang-format ``` and has been modified in the following ways: - ``` AlwaysBreakAfterDefinitionReturnType: All # instead of None ``` - ``` IndentWidth: 4 # instead of 2 ``` - ``` PointerAlignment: Left # instead of Right ``` - ``` SpaceBeforeParens: Custom # instead of ControlStatements SpaceBeforeParensOptions: ... AfterFunctionDefinitionName: true # instead of false AfterFunctionDeclarationName: true # instead of false ... ``` A different base style could be chosen and/or further customization could be done in future PRs as needed, when the formatting is applied to more code. --- .clang-format | 273 ++++++++++++++++++++++++++++++++++++++++ .pre-commit-config.yaml | 6 +- Source/main.cpp | 6 +- 3 files changed, 281 insertions(+), 4 deletions(-) create mode 100644 .clang-format diff --git a/.clang-format b/.clang-format new file mode 100644 index 00000000000..d6b4419f5d5 --- /dev/null +++ b/.clang-format @@ -0,0 +1,273 @@ +--- +Language: Cpp +AccessModifierOffset: -2 +AlignAfterOpenBracket: Align +AlignArrayOfStructures: None +AlignConsecutiveAssignments: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + AlignFunctionPointers: false + PadOperators: true +AlignConsecutiveBitFields: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + AlignFunctionPointers: false + PadOperators: false +AlignConsecutiveDeclarations: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + AlignFunctionPointers: false + PadOperators: false +AlignConsecutiveMacros: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + AlignFunctionPointers: false + PadOperators: false +AlignConsecutiveShortCaseStatements: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCaseArrows: false + AlignCaseColons: false +AlignConsecutiveTableGenBreakingDAGArgColons: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + AlignFunctionPointers: false + PadOperators: false +AlignConsecutiveTableGenCondOperatorColons: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + AlignFunctionPointers: false + PadOperators: false +AlignConsecutiveTableGenDefinitionColons: + Enabled: false + AcrossEmptyLines: false + AcrossComments: false + AlignCompound: false + AlignFunctionPointers: false + PadOperators: false +AlignEscapedNewlines: Right +AlignOperands: Align +AlignTrailingComments: + Kind: Always + OverEmptyLines: 0 +AllowAllArgumentsOnNextLine: true +AllowAllParametersOfDeclarationOnNextLine: true +AllowBreakBeforeNoexceptSpecifier: Never +AllowShortBlocksOnASingleLine: Never +AllowShortCaseExpressionOnASingleLine: true +AllowShortCaseLabelsOnASingleLine: false +AllowShortCompoundRequirementOnASingleLine: true +AllowShortEnumsOnASingleLine: true +AllowShortFunctionsOnASingleLine: All +AllowShortIfStatementsOnASingleLine: Never +AllowShortLambdasOnASingleLine: All +AllowShortLoopsOnASingleLine: false +AlwaysBreakAfterDefinitionReturnType: All +AlwaysBreakBeforeMultilineStrings: false +AttributeMacros: + - __capability +BinPackArguments: true +BinPackParameters: true +BitFieldColonSpacing: Both +BraceWrapping: + AfterCaseLabel: false + AfterClass: false + AfterControlStatement: Never + AfterEnum: false + AfterExternBlock: false + AfterFunction: false + AfterNamespace: false + AfterObjCDeclaration: false + AfterStruct: false + AfterUnion: false + BeforeCatch: false + BeforeElse: false + BeforeLambdaBody: false + BeforeWhile: false + IndentBraces: false + SplitEmptyFunction: true + SplitEmptyRecord: true + SplitEmptyNamespace: true +BreakAdjacentStringLiterals: true +BreakAfterAttributes: Leave +BreakAfterJavaFieldAnnotations: false +BreakAfterReturnType: None +BreakArrays: true +BreakBeforeBinaryOperators: None +BreakBeforeConceptDeclarations: Always +BreakBeforeBraces: Attach +BreakBeforeInlineASMColon: OnlyMultiline +BreakBeforeTernaryOperators: true +BreakConstructorInitializers: BeforeColon +BreakFunctionDefinitionParameters: false +BreakInheritanceList: BeforeColon +BreakStringLiterals: true +BreakTemplateDeclarations: MultiLine +ColumnLimit: 80 +CommentPragmas: '^ IWYU pragma:' +CompactNamespaces: false +ConstructorInitializerIndentWidth: 4 +ContinuationIndentWidth: 4 +Cpp11BracedListStyle: true +DerivePointerAlignment: false +DisableFormat: false +EmptyLineAfterAccessModifier: Never +EmptyLineBeforeAccessModifier: LogicalBlock +ExperimentalAutoDetectBinPacking: false +FixNamespaceComments: true +ForEachMacros: + - foreach + - Q_FOREACH + - BOOST_FOREACH +IfMacros: + - KJ_IF_MAYBE +IncludeBlocks: Preserve +IncludeCategories: + - Regex: '^"(llvm|llvm-c|clang|clang-c)/' + Priority: 2 + SortPriority: 0 + CaseSensitive: false + - Regex: '^(<|"(gtest|gmock|isl|json)/)' + Priority: 3 + SortPriority: 0 + CaseSensitive: false + - Regex: '.*' + Priority: 1 + SortPriority: 0 + CaseSensitive: false +IncludeIsMainRegex: '(Test)?$' +IncludeIsMainSourceRegex: '' +IndentAccessModifiers: false +IndentCaseBlocks: false +IndentCaseLabels: false +IndentExternBlock: AfterExternBlock +IndentGotoLabels: true +IndentPPDirectives: None +IndentRequiresClause: true +IndentWidth: 4 +IndentWrappedFunctionNames: false +InsertBraces: false +InsertNewlineAtEOF: false +InsertTrailingCommas: None +IntegerLiteralSeparator: + Binary: 0 + BinaryMinDigits: 0 + Decimal: 0 + DecimalMinDigits: 0 + Hex: 0 + HexMinDigits: 0 +JavaScriptQuotes: Leave +JavaScriptWrapImports: true +KeepEmptyLines: + AtEndOfFile: false + AtStartOfBlock: true + AtStartOfFile: true +LambdaBodyIndentation: Signature +LineEnding: DeriveLF +MacroBlockBegin: '' +MacroBlockEnd: '' +MainIncludeChar: Quote +MaxEmptyLinesToKeep: 1 +NamespaceIndentation: None +ObjCBinPackProtocolList: Auto +ObjCBlockIndentWidth: 2 +ObjCBreakBeforeNestedBlockParam: true +ObjCSpaceAfterProperty: false +ObjCSpaceBeforeProtocolList: true +PackConstructorInitializers: BinPack +PenaltyBreakAssignment: 2 +PenaltyBreakBeforeFirstCallParameter: 19 +PenaltyBreakComment: 300 +PenaltyBreakFirstLessLess: 120 +PenaltyBreakOpenParenthesis: 0 +PenaltyBreakScopeResolution: 500 +PenaltyBreakString: 1000 +PenaltyBreakTemplateDeclaration: 10 +PenaltyExcessCharacter: 1000000 +PenaltyIndentedWhitespace: 0 +PenaltyReturnTypeOnItsOwnLine: 60 +PointerAlignment: Left +PPIndentWidth: -1 +QualifierAlignment: Leave +ReferenceAlignment: Pointer +ReflowComments: true +RemoveBracesLLVM: false +RemoveParentheses: Leave +RemoveSemicolon: false +RequiresClausePosition: OwnLine +RequiresExpressionIndentation: OuterScope +SeparateDefinitionBlocks: Leave +ShortNamespaceLines: 1 +SkipMacroDefinitionBody: false +SortIncludes: CaseSensitive +SortJavaStaticImport: Before +SortUsingDeclarations: LexicographicNumeric +SpaceAfterCStyleCast: false +SpaceAfterLogicalNot: false +SpaceAfterTemplateKeyword: true +SpaceAroundPointerQualifiers: Default +SpaceBeforeAssignmentOperators: true +SpaceBeforeCaseColon: false +SpaceBeforeCpp11BracedList: false +SpaceBeforeCtorInitializerColon: true +SpaceBeforeInheritanceColon: true +SpaceBeforeJsonColon: false +SpaceBeforeParens: Custom +SpaceBeforeParensOptions: + AfterControlStatements: true + AfterForeachMacros: true + AfterFunctionDefinitionName: true + AfterFunctionDeclarationName: true + AfterIfMacros: true + AfterOverloadedOperator: false + AfterPlacementOperator: true + AfterRequiresInClause: false + AfterRequiresInExpression: false + BeforeNonEmptyParentheses: false +SpaceBeforeRangeBasedForLoopColon: true +SpaceBeforeSquareBrackets: false +SpaceInEmptyBlock: false +SpacesBeforeTrailingComments: 1 +SpacesInAngles: Never +SpacesInContainerLiterals: true +SpacesInLineCommentPrefix: + Minimum: 1 + Maximum: -1 +SpacesInParens: Never +SpacesInParensOptions: + ExceptDoubleParentheses: false + InCStyleCasts: false + InConditionalStatements: false + InEmptyParentheses: false + Other: false +SpacesInSquareBrackets: false +Standard: Latest +StatementAttributeLikeMacros: + - Q_EMIT +StatementMacros: + - Q_UNUSED + - QT_REQUIRE_VERSION +TableGenBreakInsideDAGArg: DontBreak +TabWidth: 8 +UseTab: Never +VerilogBreakBetweenInstancePorts: true +WhitespaceSensitiveMacros: + - BOOST_PP_STRINGIZE + - CF_SWIFT_NAME + - NS_SWIFT_NAME + - PP_STRINGIZE + - STRINGIZE +... diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index f3ccdcbc635..078d4802b7f 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -64,7 +64,11 @@ repos: # files: (\.cmake|CMakeLists.txt)(.in)?$ # C++ formatting -# clang-format +- repo: https://github.com/pre-commit/mirrors-clang-format + rev: v19.1.7 + hooks: + - id: clang-format + files: '^Source/main.cpp' # Python: Ruff linter & formatter # https://docs.astral.sh/ruff/ diff --git a/Source/main.cpp b/Source/main.cpp index 9273cd3928b..af7f0b857c9 100644 --- a/Source/main.cpp +++ b/Source/main.cpp @@ -15,8 +15,8 @@ #include -int main(int argc, char* argv[]) -{ +int +main (int argc, char* argv[]) { warpx::initialization::initialize_external_libraries(argc, argv); { WARPX_PROFILE_VAR("main()", pmain); @@ -31,7 +31,7 @@ int main(int argc, char* argv[]) WarpX::Finalize(); timer.record_stop_time(); - if (is_warpx_verbose){ + if (is_warpx_verbose) { amrex::Print() << "Total Time : " << timer.get_global_duration() << '\n'; } From 546a972430af7adcfa38300598aeace8e28617b6 Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 3 Mar 2025 16:47:06 -0800 Subject: [PATCH 267/278] Add Villasenor and Buneman current deposition with explicit scheme (#5700) The Villasenor and Buneman current deposition was moved to a separate kernel routine and added implicit and explicit callers. A test case is a 2D uniform plasma, run for a number of plasma periods. The images below show the relative change in the energgy for the parts of the system. The total energy is conversed to roughly 1.e-5. The results are shown for both the Villasenor and the Esirkepov current deposition. The results are essentially the same, with the differences in the two cases below the resolution of the images. ![energy_explicit_VandB](https://github.com/user-attachments/assets/104ae3e8-608f-48ac-952b-bd3029e3c0e6) ![energy_explicit_Esirkepov](https://github.com/user-attachments/assets/793fd495-feff-42dd-85df-b6b7acc934ea) --- Docs/source/usage/parameters.rst | 9 +- .../Particles/Deposition/CurrentDeposition.H | 1181 +++++++++-------- Source/Particles/WarpXParticleContainer.cpp | 26 +- Source/WarpX.cpp | 9 - 4 files changed, 687 insertions(+), 538 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 3d24afdfa4d..735504fc7d7 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2254,7 +2254,7 @@ Particle push, charge and current deposition, field gathering * ``algo.current_deposition`` (`string`, optional) This parameter selects the algorithm for the deposition of the current density. - Available options are: ``direct``, ``esirkepov``, and ``vay``. The default choice + Available options are: ``direct``, ``esirkepov``, ``villasenor``, and ``vay``. The default choice is ``esirkepov`` for FDTD maxwell solvers but ``direct`` for standard or Galilean PSATD solver (i.e. with ``algo.maxwell_solver = psatd``) and for the hybrid-PIC solver (i.e. with ``algo.maxwell_solver = hybrid``) and for @@ -2273,7 +2273,12 @@ Particle push, charge and current deposition, field gathering :cite:t:`param-Esirkepovcpc01`. This deposition scheme guarantees charge conservation for shape factors of arbitrary order. - 3. ``vay`` + 3. ``villasenor`` + + This uses the Villasenor-Buneman algorithm which guarantees charge conservation. + The algorithm is described in :cite:t:`pt-Villasenorcpc92`. + + 4. ``vay`` The current density is deposited as described in :cite:t:`param-VayJCP2013` (see section :ref:`current_deposition` for more details). This option guarantees charge conservation only when used in combination diff --git a/Source/Particles/Deposition/CurrentDeposition.H b/Source/Particles/Deposition/CurrentDeposition.H index bc870257d8f..e0c5baffbc3 100644 --- a/Source/Particles/Deposition/CurrentDeposition.H +++ b/Source/Particles/Deposition/CurrentDeposition.H @@ -1306,6 +1306,647 @@ void doChargeConservingDepositionShapeNImplicit ([[maybe_unused]]const amrex::Pa ); } +/** + * \brief Villasenor and Buneman Current Deposition for thread thread_num kernal. + * This is a charge-conserving deposition. The difference from Esirkepov is that the deposit is done segment + * by segment, where the segments are determined by cell crossings. In general, this results + * in a tighter stencil. The implementation is valid for an arbitrary number of cell crossings. + * + * \param depos_order deposition order + * \param xp_old,yp_old,zp_old Old particle positions (nominally at start of step) + * \param xp_new,yp_new,zp_new New particle positions (nominally at end of step) + * \param wp Pointer to array of particle weights. + * \param uxp_mid,uyp_mid,uzp_mid Particle momentum at middle of step + * \param gaminv One over gamma for particle at middle of step + * \param Jx_arr,Jy_arr,Jz_arr Array4 of current density, either full array or tile + * \param dt Time step for particle level + * \param dinv 3D cell size inverse + * \param xyzmin Physical lower bounds of domain + * \param lo Index lower bounds of domain + * \param invvol One over cell volume + * \param q Species charge + * \param n_rz_azimuthal_modes Number of azimuthal modes when using RZ geometry + */ +template +AMREX_GPU_HOST_DEVICE AMREX_INLINE +void VillasenorDepositionShapeNKernel ([[maybe_unused]]amrex::ParticleReal const xp_old, + [[maybe_unused]]amrex::ParticleReal const yp_old, + [[maybe_unused]]amrex::ParticleReal const zp_old, + [[maybe_unused]]amrex::ParticleReal const xp_new, + [[maybe_unused]]amrex::ParticleReal const yp_new, + [[maybe_unused]]amrex::ParticleReal const zp_new, + amrex::ParticleReal const wq, + [[maybe_unused]]amrex::ParticleReal const uxp_mid, + [[maybe_unused]]amrex::ParticleReal const uyp_mid, + [[maybe_unused]]amrex::ParticleReal const uzp_mid, + [[maybe_unused]]amrex::ParticleReal const gaminv, + amrex::Array4const & Jx_arr, + amrex::Array4const & Jy_arr, + amrex::Array4const & Jz_arr, + amrex::Real const dt, + amrex::XDim3 const & dinv, + amrex::XDim3 const & xyzmin, + amrex::Dim3 const lo, + amrex::Real const invvol, + [[maybe_unused]] int const n_rz_azimuthal_modes) +{ + + using namespace amrex::literals; + +#if (AMREX_SPACEDIM > 1) + amrex::Real constexpr one_third = 1.0_rt / 3.0_rt; + amrex::Real constexpr one_sixth = 1.0_rt / 6.0_rt; +#endif + + // computes current and old position in grid units +#if defined(WARPX_DIM_RZ) + amrex::Real const xp_mid = (xp_new + xp_old)*0.5_rt; + amrex::Real const yp_mid = (yp_new + yp_old)*0.5_rt; + amrex::Real const rp_new = std::sqrt(xp_new*xp_new + yp_new*yp_new); + amrex::Real const rp_old = std::sqrt(xp_old*xp_old + yp_old*yp_old); + amrex::Real const rp_mid = (rp_new + rp_old)/2._rt; + amrex::Real const costheta_mid = (rp_mid > 0._rt ? xp_mid/rp_mid : 1._rt); + amrex::Real const sintheta_mid = (rp_mid > 0._rt ? yp_mid/rp_mid : 0._rt); + Complex const xy_mid0 = Complex{costheta_mid, sintheta_mid}; + + // Keep these double to avoid bug in single precision + double const x_new = (rp_new - xyzmin.x)*dinv.x; + double const x_old = (rp_old - xyzmin.x)*dinv.x; + amrex::Real const vx = (rp_new - rp_old)/dt; + amrex::Real const vy = (-uxp_mid*sintheta_mid + uyp_mid*costheta_mid)*gaminv; +#elif defined(WARPX_DIM_XZ) + // Keep these double to avoid bug in single precision + double const x_new = (xp_new - xyzmin.x)*dinv.x; + double const x_old = (xp_old - xyzmin.x)*dinv.x; + amrex::Real const vx = (xp_new - xp_old)/dt; + amrex::Real const vy = uyp_mid*gaminv; +#elif defined(WARPX_DIM_1D_Z) + amrex::Real const vx = uxp_mid*gaminv; + amrex::Real const vy = uyp_mid*gaminv; +#elif defined(WARPX_DIM_3D) + // Keep these double to avoid bug in single precision + double const x_new = (xp_new - xyzmin.x)*dinv.x; + double const x_old = (xp_old - xyzmin.x)*dinv.x; + double const y_new = (yp_new - xyzmin.y)*dinv.y; + double const y_old = (yp_old - xyzmin.y)*dinv.y; + amrex::Real const vx = (xp_new - xp_old)/dt; + amrex::Real const vy = (yp_new - yp_old)/dt; +#endif + + // Keep these double to avoid bug in single precision + double const z_new = (zp_new - xyzmin.z)*dinv.z; + double const z_old = (zp_old - xyzmin.z)*dinv.z; + amrex::Real const vz = (zp_new - zp_old)/dt; + + // Define velocity kernals to deposit + amrex::Real const wqx = wq*vx*invvol; + amrex::Real const wqy = wq*vy*invvol; + amrex::Real const wqz = wq*vz*invvol; + + // 1) Determine the number of segments. + // 2) Loop over segments and deposit current. + + // cell crossings are defined at cell edges if depos_order is odd + // cell crossings are defined at cell centers if depos_order is even + + int num_segments = 1; + double shift = 0.0; + if ( (depos_order % 2) == 0 ) { shift = 0.5; } + +#if defined(WARPX_DIM_3D) + + // compute cell crossings in X-direction + const auto i_old = static_cast(x_old-shift); + const auto i_new = static_cast(x_new-shift); + const int cell_crossings_x = std::abs(i_new-i_old); + num_segments += cell_crossings_x; + + // compute cell crossings in Y-direction + const auto j_old = static_cast(y_old-shift); + const auto j_new = static_cast(y_new-shift); + const int cell_crossings_y = std::abs(j_new-j_old); + num_segments += cell_crossings_y; + + // compute cell crossings in Z-direction + const auto k_old = static_cast(z_old-shift); + const auto k_new = static_cast(z_new-shift); + const int cell_crossings_z = std::abs(k_new-k_old); + num_segments += cell_crossings_z; + + // need to assert that the number of cell crossings in each direction + // is within the range permitted by the number of guard cells + // e.g., if (num_segments > 7) ... + + // compute total change in particle position and the initial cell + // locations in each direction used to find the position at cell crossings. + const double dxp = x_new - x_old; + const double dyp = y_new - y_old; + const double dzp = z_new - z_old; + const auto dirX_sign = static_cast(dxp < 0. ? -1. : 1.); + const auto dirY_sign = static_cast(dyp < 0. ? -1. : 1.); + const auto dirZ_sign = static_cast(dzp < 0. ? -1. : 1.); + double Xcell = 0., Ycell = 0., Zcell = 0.; + if (num_segments > 1) { + Xcell = static_cast(i_old) + shift + 0.5*(1.-dirX_sign); + Ycell = static_cast(j_old) + shift + 0.5*(1.-dirY_sign); + Zcell = static_cast(k_old) + shift + 0.5*(1.-dirZ_sign); + } + + // loop over the number of segments and deposit + const Compute_shape_factor< depos_order-1 > compute_shape_factor_cell; + const Compute_shape_factor_pair< depos_order > compute_shape_factors_node; + double dxp_seg, dyp_seg, dzp_seg; + double x0_new, y0_new, z0_new; + double x0_old = x_old; + double y0_old = y_old; + double z0_old = z_old; + + for (int ns=0; ns(dxp == 0. ? 1. : dxp_seg/dxp); + const auto seg_factor_y = static_cast(dyp == 0. ? 1. : dyp_seg/dyp); + const auto seg_factor_z = static_cast(dzp == 0. ? 1. : dzp_seg/dzp); + + // compute cell-based weights using the average segment position + double sx_cell[depos_order] = {0.}; + double sy_cell[depos_order] = {0.}; + double sz_cell[depos_order] = {0.}; + double const x0_bar = (x0_new + x0_old)/2.0; + double const y0_bar = (y0_new + y0_old)/2.0; + double const z0_bar = (z0_new + z0_old)/2.0; + const int i0_cell = compute_shape_factor_cell( sx_cell, x0_bar-0.5 ); + const int j0_cell = compute_shape_factor_cell( sy_cell, y0_bar-0.5 ); + const int k0_cell = compute_shape_factor_cell( sz_cell, z0_bar-0.5 ); + + if constexpr (depos_order >= 3) { // higher-order correction to the cell-based weights + const Compute_shape_factor_pair compute_shape_factors_cell; + double sx_old_cell[depos_order] = {0.}; + double sx_new_cell[depos_order] = {0.}; + double sy_old_cell[depos_order] = {0.}; + double sy_new_cell[depos_order] = {0.}; + double sz_old_cell[depos_order] = {0.}; + double sz_new_cell[depos_order] = {0.}; + const int i0_cell_2 = compute_shape_factors_cell( sx_old_cell, sx_new_cell, x0_old-0.5, x0_new-0.5 ); + const int j0_cell_2 = compute_shape_factors_cell( sy_old_cell, sy_new_cell, y0_old-0.5, y0_new-0.5 ); + const int k0_cell_2 = compute_shape_factors_cell( sz_old_cell, sz_new_cell, z0_old-0.5, z0_new-0.5 ); + amrex::ignore_unused(i0_cell_2, j0_cell_2, k0_cell_2); + for (int m=0; m(x_old-shift); + const auto i_new = static_cast(x_new-shift); + const int cell_crossings_x = std::abs(i_new-i_old); + num_segments += cell_crossings_x; + + // compute cell crossings in Z-direction + const auto k_old = static_cast(z_old-shift); + const auto k_new = static_cast(z_new-shift); + const int cell_crossings_z = std::abs(k_new-k_old); + num_segments += cell_crossings_z; + + // need to assert that the number of cell crossings in each direction + // is within the range permitted by the number of guard cells + // e.g., if (num_segments > 5) ... + + // compute total change in particle position and the initial cell + // locations in each direction used to find the position at cell crossings. + const double dxp = x_new - x_old; + const double dzp = z_new - z_old; + const auto dirX_sign = static_cast(dxp < 0. ? -1. : 1.); + const auto dirZ_sign = static_cast(dzp < 0. ? -1. : 1.); + double Xcell = 0., Zcell = 0.; + if (num_segments > 1) { + Xcell = static_cast(i_old) + shift + 0.5*(1.-dirX_sign); + Zcell = static_cast(k_old) + shift + 0.5*(1.-dirZ_sign); + } + + // loop over the number of segments and deposit + const Compute_shape_factor< depos_order-1 > compute_shape_factor_cell; + const Compute_shape_factor_pair< depos_order > compute_shape_factors_node; + double dxp_seg, dzp_seg; + double x0_new, z0_new; + double x0_old = x_old; + double z0_old = z_old; + + for (int ns=0; ns(dxp == 0. ? 1. : dxp_seg/dxp); + const auto seg_factor_z = static_cast(dzp == 0. ? 1. : dzp_seg/dzp); + + // compute cell-based weights using the average segment position + double sx_cell[depos_order] = {0.}; + double sz_cell[depos_order] = {0.}; + double const x0_bar = (x0_new + x0_old)/2.0; + double const z0_bar = (z0_new + z0_old)/2.0; + const int i0_cell = compute_shape_factor_cell( sx_cell, x0_bar-0.5 ); + const int k0_cell = compute_shape_factor_cell( sz_cell, z0_bar-0.5 ); + + if constexpr (depos_order >= 3) { // higher-order correction to the cell-based weights + const Compute_shape_factor_pair compute_shape_factors_cell; + double sx_old_cell[depos_order] = {0.}; + double sx_new_cell[depos_order] = {0.}; + double sz_old_cell[depos_order] = {0.}; + double sz_new_cell[depos_order] = {0.}; + const int i0_cell_2 = compute_shape_factors_cell( sx_old_cell, sx_new_cell, x0_old-0.5, x0_new-0.5 ); + const int k0_cell_2 = compute_shape_factors_cell( sz_old_cell, sz_new_cell, z0_old-0.5, z0_new-0.5 ); + amrex::ignore_unused(i0_cell_2, k0_cell_2); + for (int m=0; m(z_old-shift); + const auto k_new = static_cast(z_new-shift); + const int cell_crossings_z = std::abs(k_new-k_old); + num_segments += cell_crossings_z; + + // need to assert that the number of cell crossings in each direction + // is within the range permitted by the number of guard cells + // e.g., if (num_segments > 3) ... + + // compute dzp and the initial cell location used to find the cell crossings. + double const dzp = z_new - z_old; + const auto dirZ_sign = static_cast(dzp < 0. ? -1. : 1.); + double Zcell = static_cast(k_old) + shift + 0.5*(1.-dirZ_sign); + + // loop over the number of segments and deposit + const Compute_shape_factor< depos_order-1 > compute_shape_factor_cell; + const Compute_shape_factor_pair< depos_order > compute_shape_factors_node; + double dzp_seg; + double z0_new; + double z0_old = z_old; + + for (int ns=0; ns(dzp == 0. ? 1. : dzp_seg/dzp); + + // compute cell-based weights using the average segment position + double sz_cell[depos_order] = {0.}; + double const z0_bar = (z0_new + z0_old)/2.0; + const int k0_cell = compute_shape_factor_cell( sz_cell, z0_bar-0.5 ); + + if constexpr (depos_order >= 3) { // higher-order correction to the cell-based weights + const Compute_shape_factor_pair compute_shape_factors_cell; + double sz_old_cell[depos_order] = {0.}; + double sz_new_cell[depos_order] = {0.}; + const int k0_cell_2 = compute_shape_factors_cell( sz_old_cell, sz_new_cell, z0_old-0.5, z0_new-0.5 ); + amrex::ignore_unused(k0_cell_2); + for (int m=0; m +void doVillasenorDepositionShapeNExplicit (const GetParticlePosition& GetPosition, + const amrex::ParticleReal * const wp, + [[maybe_unused]]const amrex::ParticleReal * const uxp, + [[maybe_unused]]const amrex::ParticleReal * const uyp, + [[maybe_unused]]const amrex::ParticleReal * const uzp, + const int * const ion_lev, + const amrex::Array4& Jx_arr, + const amrex::Array4& Jy_arr, + const amrex::Array4& Jz_arr, + const long np_to_deposit, + const amrex::Real dt, + const amrex::Real relative_time, + const amrex::XDim3 & dinv, + const amrex::XDim3 & xyzmin, + const amrex::Dim3 lo, + const amrex::Real q, + [[maybe_unused]] const int n_rz_azimuthal_modes) +{ + using namespace amrex::literals; + + // Whether ion_lev is a null pointer (do_ionization=0) or a real pointer + // (do_ionization=1) + bool const do_ionization = ion_lev; + + const amrex::Real invvol = dinv.x*dinv.y*dinv.z; + + // Loop over particles and deposit into Jx_arr, Jy_arr and Jz_arr + amrex::ParallelFor( + np_to_deposit, + [=] AMREX_GPU_DEVICE (long const ip) { + + constexpr amrex::ParticleReal inv_c2 = 1._prt/(PhysConst::c*PhysConst::c); + const amrex::Real gaminv = 1.0_rt/std::sqrt(1.0_rt + uxp[ip]*uxp[ip]*inv_c2 + + uyp[ip]*uyp[ip]*inv_c2 + + uzp[ip]*uzp[ip]*inv_c2); + + amrex::Real wq = q*wp[ip]; + if (do_ionization){ + wq *= ion_lev[ip]; + } + + amrex::ParticleReal xp, yp, zp; + GetPosition(ip, xp, yp, zp); + + // computes current and old position + amrex::Real const xp_new = xp + (relative_time + 0.5_rt*dt)*uxp[ip]*gaminv; + amrex::Real const xp_old = xp_new - dt*uxp[ip]*gaminv; + amrex::Real const yp_new = yp + (relative_time + 0.5_rt*dt)*uyp[ip]*gaminv; + amrex::Real const yp_old = yp_new - dt*uyp[ip]*gaminv; + amrex::Real const zp_new = zp + (relative_time + 0.5_rt*dt)*uzp[ip]*gaminv; + amrex::Real const zp_old = zp_new - dt*uzp[ip]*gaminv; + + VillasenorDepositionShapeNKernel(xp_old, yp_old, zp_old, xp_new, yp_new, zp_new, wq, + uxp[ip], uyp[ip], uzp[ip], gaminv, + Jx_arr, Jy_arr, Jz_arr, + dt, dinv, xyzmin, lo, invvol, n_rz_azimuthal_modes); + + }); +} + /** * \brief Villasenor and Buneman Current Deposition for thread thread_num for implicit scheme. * The specifics for the implicit scheme are in how gamma is determined. This is a charge- @@ -1356,7 +1997,7 @@ void doVillasenorDepositionShapeNImplicit ([[maybe_unused]]const amrex::Particle const amrex::Real q, [[maybe_unused]] const int n_rz_azimuthal_modes) { - using namespace amrex; + using namespace amrex::literals; // Whether ion_lev is a null pointer (do_ionization=0) or a real pointer // (do_ionization=1) @@ -1364,11 +2005,6 @@ void doVillasenorDepositionShapeNImplicit ([[maybe_unused]]const amrex::Particle const amrex::Real invvol = dinv.x*dinv.y*dinv.z; -#if (AMREX_SPACEDIM > 1) - Real constexpr one_third = 1.0_rt / 3.0_rt; - Real constexpr one_sixth = 1.0_rt / 6.0_rt; -#endif - // Loop over particles and deposit into Jx_arr, Jy_arr and Jz_arr amrex::ParallelFor( np_to_deposit, @@ -1385,536 +2021,29 @@ void doVillasenorDepositionShapeNImplicit ([[maybe_unused]]const amrex::Particle const amrex::ParticleReal gamma_n = std::sqrt(1._prt + (uxp_n[ip]*uxp_n[ip] + uyp_n[ip]*uyp_n[ip] + uzp_n[ip]*uzp_n[ip])*inv_c2); const amrex::ParticleReal gamma_np1 = std::sqrt(1._prt + (uxp_np1*uxp_np1 + uyp_np1*uyp_np1 + uzp_np1*uzp_np1)*inv_c2); const amrex::ParticleReal gaminv = 2.0_prt/(gamma_n + gamma_np1); +#else + // gaminv is unused in 3D + const amrex::ParticleReal gaminv = 1.; #endif - Real wq = q*wp[ip]; + amrex::Real wq = q*wp[ip]; if (do_ionization){ wq *= ion_lev[ip]; } - ParticleReal xp_nph, yp_nph, zp_nph; + amrex::ParticleReal xp_nph, yp_nph, zp_nph; GetPosition(ip, xp_nph, yp_nph, zp_nph); -#if !defined(WARPX_DIM_1D_Z) - ParticleReal const xp_np1 = 2._prt*xp_nph - xp_n[ip]; -#endif -#if defined(WARPX_DIM_3D) || defined(WARPX_DIM_RZ) - ParticleReal const yp_np1 = 2._prt*yp_nph - yp_n[ip]; -#endif - ParticleReal const zp_np1 = 2._prt*zp_nph - zp_n[ip]; - - // computes current and old position in grid units -#if defined(WARPX_DIM_RZ) - amrex::Real const xp_new = xp_np1; - amrex::Real const yp_new = yp_np1; - amrex::Real const xp_mid = xp_nph; - amrex::Real const yp_mid = yp_nph; - amrex::Real const xp_old = xp_n[ip]; - amrex::Real const yp_old = yp_n[ip]; - amrex::Real const rp_new = std::sqrt(xp_new*xp_new + yp_new*yp_new); - amrex::Real const rp_old = std::sqrt(xp_old*xp_old + yp_old*yp_old); - amrex::Real const rp_mid = (rp_new + rp_old)/2._rt; - amrex::Real costheta_mid, sintheta_mid; - if (rp_mid > 0._rt) { - costheta_mid = xp_mid/rp_mid; - sintheta_mid = yp_mid/rp_mid; - } else { - costheta_mid = 1._rt; - sintheta_mid = 0._rt; - } - const Complex xy_mid0 = Complex{costheta_mid, sintheta_mid}; - - // Keep these double to avoid bug in single precision - double const x_new = (rp_new - xyzmin.x)*dinv.x; - double const x_old = (rp_old - xyzmin.x)*dinv.x; - amrex::Real const vx = (rp_new - rp_old)/dt; - amrex::Real const vy = (-uxp_nph[ip]*sintheta_mid + uyp_nph[ip]*costheta_mid)*gaminv; -#elif defined(WARPX_DIM_XZ) - // Keep these double to avoid bug in single precision - double const x_new = (xp_np1 - xyzmin.x)*dinv.x; - double const x_old = (xp_n[ip] - xyzmin.x)*dinv.x; - amrex::Real const vx = (xp_np1 - xp_n[ip])/dt; - amrex::Real const vy = uyp_nph[ip]*gaminv; -#elif defined(WARPX_DIM_1D_Z) - amrex::Real const vx = uxp_nph[ip]*gaminv; - amrex::Real const vy = uyp_nph[ip]*gaminv; -#elif defined(WARPX_DIM_3D) - // Keep these double to avoid bug in single precision - double const x_new = (xp_np1 - xyzmin.x)*dinv.x; - double const x_old = (xp_n[ip] - xyzmin.x)*dinv.x; - double const y_new = (yp_np1 - xyzmin.y)*dinv.y; - double const y_old = (yp_n[ip] - xyzmin.y)*dinv.y; - amrex::Real const vx = (xp_np1 - xp_n[ip])/dt; - amrex::Real const vy = (yp_np1 - yp_n[ip])/dt; -#endif - - // Keep these double to avoid bug in single precision - double const z_new = (zp_np1 - xyzmin.z)*dinv.z; - double const z_old = (zp_n[ip] - xyzmin.z)*dinv.z; - amrex::Real const vz = (zp_np1 - zp_n[ip])/dt; - - // Define velocity kernals to deposit - amrex::Real const wqx = wq*vx*invvol; - amrex::Real const wqy = wq*vy*invvol; - amrex::Real const wqz = wq*vz*invvol; - - // 1) Determine the number of segments. - // 2) Loop over segments and deposit current. - - // cell crossings are defined at cell edges if depos_order is odd - // cell crossings are defined at cell centers if depos_order is even - - int num_segments = 1; - double shift = 0.0; - if ( (depos_order % 2) == 0 ) { shift = 0.5; } - -#if defined(WARPX_DIM_3D) - - // compute cell crossings in X-direction - const auto i_old = static_cast(x_old-shift); - const auto i_new = static_cast(x_new-shift); - const int cell_crossings_x = std::abs(i_new-i_old); - num_segments += cell_crossings_x; - - // compute cell crossings in Y-direction - const auto j_old = static_cast(y_old-shift); - const auto j_new = static_cast(y_new-shift); - const int cell_crossings_y = std::abs(j_new-j_old); - num_segments += cell_crossings_y; - - // compute cell crossings in Z-direction - const auto k_old = static_cast(z_old-shift); - const auto k_new = static_cast(z_new-shift); - const int cell_crossings_z = std::abs(k_new-k_old); - num_segments += cell_crossings_z; - - // need to assert that the number of cell crossings in each direction - // is within the range permitted by the number of guard cells - // e.g., if (num_segments > 7) ... - - // compute total change in particle position and the initial cell - // locations in each direction used to find the position at cell crossings. - const double dxp = x_new - x_old; - const double dyp = y_new - y_old; - const double dzp = z_new - z_old; - const auto dirX_sign = static_cast(dxp < 0. ? -1. : 1.); - const auto dirY_sign = static_cast(dyp < 0. ? -1. : 1.); - const auto dirZ_sign = static_cast(dzp < 0. ? -1. : 1.); - double Xcell = 0., Ycell = 0., Zcell = 0.; - if (num_segments > 1) { - Xcell = static_cast(i_old) + shift + 0.5*(1.-dirX_sign); - Ycell = static_cast(j_old) + shift + 0.5*(1.-dirY_sign); - Zcell = static_cast(k_old) + shift + 0.5*(1.-dirZ_sign); - } - - // loop over the number of segments and deposit - const Compute_shape_factor< depos_order-1 > compute_shape_factor_cell; - const Compute_shape_factor_pair< depos_order > compute_shape_factors_node; - double dxp_seg, dyp_seg, dzp_seg; - double x0_new, y0_new, z0_new; - double x0_old = x_old; - double y0_old = y_old; - double z0_old = z_old; - - for (int ns=0; ns(xp_n[ip], yp_n[ip], zp_n[ip], xp_np1, yp_np1, zp_np1, wq, + uxp_nph[ip], uyp_nph[ip], uzp_nph[ip], gaminv, + Jx_arr, Jy_arr, Jz_arr, + dt, dinv, xyzmin, lo, invvol, n_rz_azimuthal_modes); - x0_new = x_new; - y0_new = y_new; - z0_new = z_new; - dxp_seg = x0_new - x0_old; - dyp_seg = y0_new - y0_old; - dzp_seg = z0_new - z0_old; - - } - else { - - x0_new = Xcell + dirX_sign; - y0_new = Ycell + dirY_sign; - z0_new = Zcell + dirZ_sign; - dxp_seg = x0_new - x0_old; - dyp_seg = y0_new - y0_old; - dzp_seg = z0_new - z0_old; - - if ( (dyp == 0. || std::abs(dxp_seg) < std::abs(dxp/dyp*dyp_seg)) - && (dzp == 0. || std::abs(dxp_seg) < std::abs(dxp/dzp*dzp_seg)) ) { - Xcell = x0_new; - dyp_seg = dyp/dxp*dxp_seg; - dzp_seg = dzp/dxp*dxp_seg; - y0_new = y0_old + dyp_seg; - z0_new = z0_old + dzp_seg; - } - else if (dzp == 0. || std::abs(dyp_seg) < std::abs(dyp/dzp*dzp_seg)) { - Ycell = y0_new; - dxp_seg = dxp/dyp*dyp_seg; - dzp_seg = dzp/dyp*dyp_seg; - x0_new = x0_old + dxp_seg; - z0_new = z0_old + dzp_seg; - } - else { - Zcell = z0_new; - dxp_seg = dxp/dzp*dzp_seg; - dyp_seg = dyp/dzp*dzp_seg; - x0_new = x0_old + dxp_seg; - y0_new = y0_old + dyp_seg; - } - - } - - // compute the segment factors (each equal to dt_seg/dt for nonzero dxp, dyp, or dzp) - const auto seg_factor_x = static_cast(dxp == 0. ? 1. : dxp_seg/dxp); - const auto seg_factor_y = static_cast(dyp == 0. ? 1. : dyp_seg/dyp); - const auto seg_factor_z = static_cast(dzp == 0. ? 1. : dzp_seg/dzp); - - // compute cell-based weights using the average segment position - double sx_cell[depos_order] = {0.}; - double sy_cell[depos_order] = {0.}; - double sz_cell[depos_order] = {0.}; - double const x0_bar = (x0_new + x0_old)/2.0; - double const y0_bar = (y0_new + y0_old)/2.0; - double const z0_bar = (z0_new + z0_old)/2.0; - const int i0_cell = compute_shape_factor_cell( sx_cell, x0_bar-0.5 ); - const int j0_cell = compute_shape_factor_cell( sy_cell, y0_bar-0.5 ); - const int k0_cell = compute_shape_factor_cell( sz_cell, z0_bar-0.5 ); - - if constexpr (depos_order >= 3) { // higher-order correction to the cell-based weights - const Compute_shape_factor_pair compute_shape_factors_cell; - double sx_old_cell[depos_order] = {0.}; - double sx_new_cell[depos_order] = {0.}; - double sy_old_cell[depos_order] = {0.}; - double sy_new_cell[depos_order] = {0.}; - double sz_old_cell[depos_order] = {0.}; - double sz_new_cell[depos_order] = {0.}; - const int i0_cell_2 = compute_shape_factors_cell( sx_old_cell, sx_new_cell, x0_old-0.5, x0_new-0.5 ); - const int j0_cell_2 = compute_shape_factors_cell( sy_old_cell, sy_new_cell, y0_old-0.5, y0_new-0.5 ); - const int k0_cell_2 = compute_shape_factors_cell( sz_old_cell, sz_new_cell, z0_old-0.5, z0_new-0.5 ); - ignore_unused(i0_cell_2, j0_cell_2, k0_cell_2); - for (int m=0; m(x_old-shift); - const auto i_new = static_cast(x_new-shift); - const int cell_crossings_x = std::abs(i_new-i_old); - num_segments += cell_crossings_x; - - // compute cell crossings in Z-direction - const auto k_old = static_cast(z_old-shift); - const auto k_new = static_cast(z_new-shift); - const int cell_crossings_z = std::abs(k_new-k_old); - num_segments += cell_crossings_z; - - // need to assert that the number of cell crossings in each direction - // is within the range permitted by the number of guard cells - // e.g., if (num_segments > 5) ... - - // compute total change in particle position and the initial cell - // locations in each direction used to find the position at cell crossings. - const double dxp = x_new - x_old; - const double dzp = z_new - z_old; - const auto dirX_sign = static_cast(dxp < 0. ? -1. : 1.); - const auto dirZ_sign = static_cast(dzp < 0. ? -1. : 1.); - double Xcell = 0., Zcell = 0.; - if (num_segments > 1) { - Xcell = static_cast(i_old) + shift + 0.5*(1.-dirX_sign); - Zcell = static_cast(k_old) + shift + 0.5*(1.-dirZ_sign); - } - - // loop over the number of segments and deposit - const Compute_shape_factor< depos_order-1 > compute_shape_factor_cell; - const Compute_shape_factor_pair< depos_order > compute_shape_factors_node; - double dxp_seg, dzp_seg; - double x0_new, z0_new; - double x0_old = x_old; - double z0_old = z_old; - - for (int ns=0; ns(dxp == 0. ? 1. : dxp_seg/dxp); - const auto seg_factor_z = static_cast(dzp == 0. ? 1. : dzp_seg/dzp); - - // compute cell-based weights using the average segment position - double sx_cell[depos_order] = {0.}; - double sz_cell[depos_order] = {0.}; - double const x0_bar = (x0_new + x0_old)/2.0; - double const z0_bar = (z0_new + z0_old)/2.0; - const int i0_cell = compute_shape_factor_cell( sx_cell, x0_bar-0.5 ); - const int k0_cell = compute_shape_factor_cell( sz_cell, z0_bar-0.5 ); - - if constexpr (depos_order >= 3) { // higher-order correction to the cell-based weights - const Compute_shape_factor_pair compute_shape_factors_cell; - double sx_old_cell[depos_order] = {0.}; - double sx_new_cell[depos_order] = {0.}; - double sz_old_cell[depos_order] = {0.}; - double sz_new_cell[depos_order] = {0.}; - const int i0_cell_2 = compute_shape_factors_cell( sx_old_cell, sx_new_cell, x0_old-0.5, x0_new-0.5 ); - const int k0_cell_2 = compute_shape_factors_cell( sz_old_cell, sz_new_cell, z0_old-0.5, z0_new-0.5 ); - ignore_unused(i0_cell_2, k0_cell_2); - for (int m=0; m(z_old-shift); - const auto k_new = static_cast(z_new-shift); - const int cell_crossings_z = std::abs(k_new-k_old); - num_segments += cell_crossings_z; - - // need to assert that the number of cell crossings in each direction - // is within the range permitted by the number of guard cells - // e.g., if (num_segments > 3) ... - - // compute dzp and the initial cell location used to find the cell crossings. - double const dzp = z_new - z_old; - const auto dirZ_sign = static_cast(dzp < 0. ? -1. : 1.); - double Zcell = static_cast(k_old) + shift + 0.5*(1.-dirZ_sign); - - // loop over the number of segments and deposit - const Compute_shape_factor< depos_order-1 > compute_shape_factor_cell; - const Compute_shape_factor_pair< depos_order > compute_shape_factors_node; - double dzp_seg; - double z0_new; - double z0_old = z_old; - - for (int ns=0; ns(dzp == 0. ? 1. : dzp_seg/dzp); - - // compute cell-based weights using the average segment position - double sz_cell[depos_order] = {0.}; - double const z0_bar = (z0_new + z0_old)/2.0; - const int k0_cell = compute_shape_factor_cell( sz_cell, z0_bar-0.5 ); - - if constexpr (depos_order >= 3) { // higher-order correction to the cell-based weights - const Compute_shape_factor_pair compute_shape_factors_cell; - double sz_old_cell[depos_order] = {0.}; - double sz_new_cell[depos_order] = {0.}; - const int k0_cell_2 = compute_shape_factors_cell( sz_old_cell, sz_new_cell, z0_old-0.5, z0_new-0.5 ); - ignore_unused(k0_cell_2); - for (int m=0; m( + GetPosition, wp.dataPtr() + offset, + uxp.dataPtr() + offset, uyp.dataPtr() + offset, uzp.dataPtr() + offset, ion_lev, + jx_arr, jy_arr, jz_arr, np_to_deposit, dt, relative_time, dinv, xyzmin, lo, q, + WarpX::n_rz_azimuthal_modes); + } else if (WarpX::nox == 2){ + doVillasenorDepositionShapeNExplicit<2>( + GetPosition, wp.dataPtr() + offset, + uxp.dataPtr() + offset, uyp.dataPtr() + offset, uzp.dataPtr() + offset, ion_lev, + jx_arr, jy_arr, jz_arr, np_to_deposit, dt, relative_time, dinv, xyzmin, lo, q, + WarpX::n_rz_azimuthal_modes); + } else if (WarpX::nox == 3){ + doVillasenorDepositionShapeNExplicit<3>( + GetPosition, wp.dataPtr() + offset, + uxp.dataPtr() + offset, uyp.dataPtr() + offset, uzp.dataPtr() + offset, ion_lev, + jx_arr, jy_arr, jz_arr, np_to_deposit, dt, relative_time, dinv, xyzmin, lo, q, + WarpX::n_rz_azimuthal_modes); + } else if (WarpX::nox == 4){ + doVillasenorDepositionShapeNExplicit<4>( + GetPosition, wp.dataPtr() + offset, + uxp.dataPtr() + offset, uyp.dataPtr() + offset, uzp.dataPtr() + offset, ion_lev, + jx_arr, jy_arr, jz_arr, np_to_deposit, dt, relative_time, dinv, xyzmin, lo, q, + WarpX::n_rz_azimuthal_modes); + } } } else if (WarpX::current_deposition_algo == CurrentDepositionAlgo::Vay) { if (push_type == PushType::Implicit) { diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index c9e90850ee1..6c06fbe97bc 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -1187,15 +1187,6 @@ WarpX::ReadParameters () "Vay deposition not implemented with multi-J algorithm"); } - if (current_deposition_algo == CurrentDepositionAlgo::Villasenor) { - WARPX_ALWAYS_ASSERT_WITH_MESSAGE( - evolve_scheme == EvolveScheme::SemiImplicitEM || - evolve_scheme == EvolveScheme::ThetaImplicitEM || - evolve_scheme == EvolveScheme::StrangImplicitSpectralEM, - "Villasenor current deposition can only" - "be used with Implicit evolve schemes."); - } - // Query algo.field_gathering from input, set field_gathering_algo to // "default" if not found (default defined in Utils/WarpXAlgorithmSelection.cpp) pp_algo.query_enum_sloppy("field_gathering", field_gathering_algo, "-_"); From b858e36c24dab324f4fd951b62540ec76843ea76 Mon Sep 17 00:00:00 2001 From: David Grote Date: Mon, 3 Mar 2025 16:51:46 -0800 Subject: [PATCH 268/278] Synchronize velocity for diagnostics (#1751) This PR allows the synchronization in time of the particle velocities and positions when generating diagnostics. Without this option, the particle velocities will lag behind the position by a half time step. This adds the boolean input parameter `warpx.synchronize_velocity_for_diagnostics` to turn on this option, defaulting to false. There are several pieces to this PR: - Changes to `MultiDiagnostic` and `MultiReducedDiags` adding routines to check if any diagnostics will be done - Adds a call to `PushP` to just before the diagnostics are done (to get the updated fields from the electrostatic calculation) - Add the appropriate documentation What `Evolve` does is if the synchronization is to be done, advance the velocity a half step just before the diagnostics and sets `is_synchronized=true`. Then at the start of the next step, if `is_synchronized` is true, push the velocities back a half step to be ready for the full leap frog advance. Comments: - Is the documentation in the correct place in parameters.rst? - The reduced diagnostics could perhaps use the new DoDiags method instead of accessing `m_intervals` in its ComputeDiags. - This PR leaves the original PushP unchanged, even though it is possibly buggy. That PushP fetches the fields, but uses the particle positions before the particle boundary conditions have been applied, leading to a possible out of bounds reference. Also, that PushP may not be consistent with the backwards PushP since the fields may had changed. Comments are added to the code to note this potential problem. I avoided changing this since it breaks many CI tests. --------- Co-authored-by: Edoardo Zoni Co-authored-by: Axel Huebl Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> Co-authored-by: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> --- Docs/source/usage/parameters.rst | 6 ++ Examples/Tests/single_particle/CMakeLists.txt | 10 ++++ .../analysis_synchronize_velocity.py | 60 +++++++++++++++++++ .../inputs_test_1d_synchronize_velocity | 36 +++++++++++ Python/pywarpx/picmi.py | 9 +++ Source/Diagnostics/MultiDiagnostics.H | 2 + Source/Diagnostics/MultiDiagnostics.cpp | 10 ++++ .../ReducedDiags/MultiReducedDiags.H | 3 + .../ReducedDiags/MultiReducedDiags.cpp | 12 ++++ .../Diagnostics/ReducedDiags/ReducedDiags.H | 3 + Source/Evolve/WarpXEvolve.cpp | 29 +++++++-- Source/WarpX.H | 2 + Source/WarpX.cpp | 2 + 13 files changed, 180 insertions(+), 4 deletions(-) create mode 100755 Examples/Tests/single_particle/analysis_synchronize_velocity.py create mode 100644 Examples/Tests/single_particle/inputs_test_1d_synchronize_velocity diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index 735504fc7d7..c31a1cf88b9 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2744,6 +2744,12 @@ WarpX has five types of diagnostics: Similar to what is done for physical species, WarpX has a class Diagnostics that allows users to initialize different diagnostics, each of them with different fields, resolution and period. This currently applies to standard diagnostics, but should be extended to back-transformed diagnostics and reduced diagnostics (and others) in a near future. +* ``warpx.synchronize_velocity_for_diagnostics`` (``0`` or ``1``, optional, default ``0``) + Whether to synchronize the particle velocities with the particle positions in the diagnostics. + In its normal operation, WarpX is using the leap frog algorithm to advance the particles, and leaves the positions and velocities of the particles unsynchronized at the end of each time step, with the velocities lagging behind a half step. + When this option is turned on, whenever any diagnostics will be calculated, the velocities will be advanced a half step to + synchronize with the position before the diagnostics are generated. + .. _running-cpp-parameters-diagnostics-full: Full Diagnostics diff --git a/Examples/Tests/single_particle/CMakeLists.txt b/Examples/Tests/single_particle/CMakeLists.txt index fb823b39431..4282376a20f 100644 --- a/Examples/Tests/single_particle/CMakeLists.txt +++ b/Examples/Tests/single_particle/CMakeLists.txt @@ -10,3 +10,13 @@ add_warpx_test( "analysis_default_regression.py --path diags/diag1000001" # checksum OFF # dependency ) + +add_warpx_test( + test_1d_synchronize_velocity # name + 1 # dims + 1 # nprocs + inputs_test_1d_synchronize_velocity # inputs + "analysis_synchronize_velocity.py diags/diag1000005" # analysis + OFF # checksum + OFF # dependency +) diff --git a/Examples/Tests/single_particle/analysis_synchronize_velocity.py b/Examples/Tests/single_particle/analysis_synchronize_velocity.py new file mode 100755 index 00000000000..b4540e7e0ba --- /dev/null +++ b/Examples/Tests/single_particle/analysis_synchronize_velocity.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 + +# Copyright 2025 David Grote +# +# This file is part of WarpX. +# +# License: BSD-3-Clause-LBNL + +import sys + +import numpy as np +import yt + +# scipy.constants use CODATA2022 +# from scipy.constants import e, m_e, c + +# These are CODATA2018 values, as used in WarpX +e = 1.602176634e-19 +m_e = 9.1093837015e-31 +c = 299792458.0 + +# Integrate the test particle 5 timesteps, ending up with the position +# and velocity synchronized. +# In the simulation, with the synchronize_velocity_for_diagnostics flag set, +# the velocity will be synchronized at the end of step 5 when the diagnostics +# are written (even though that is not the final time step). + +z = 0.1 +uz = 0.0 +Ez = -1.0 +dt = 1.0e-6 + +# Half backward advance of velocity +uz -= -e / m_e * Ez * dt / 2.0 + +# Leap frog advance +for _ in range(5): + uz += -e / m_e * Ez * dt + g = np.sqrt((uz / c) ** 2 + 1.0) + z += (uz / g) * dt + +# Add half v advance to synchronize +uz += -e / m_e * Ez * dt / 2.0 + +filename = sys.argv[1] +ds = yt.load(filename) +ad = ds.all_data() +z_sim = ad["electron", "particle_position_x"] +uz_sim = ad["electron", "particle_momentum_z"] / m_e + +print(f"Analysis Z = {z:18.16f}, Uz = {uz:18.10f}") +print(f"Simulation Z = {z_sim.v[0]:18.16f}, Uz = {uz_sim.v[0]:18.10f}") + +tolerance_rel = 1.0e-15 +error_rel = np.abs((uz - uz_sim.v[0]) / uz) + +print("error_rel : " + str(error_rel)) +print("tolerance_rel: " + str(tolerance_rel)) + +assert error_rel < tolerance_rel diff --git a/Examples/Tests/single_particle/inputs_test_1d_synchronize_velocity b/Examples/Tests/single_particle/inputs_test_1d_synchronize_velocity new file mode 100644 index 00000000000..e0e3b427150 --- /dev/null +++ b/Examples/Tests/single_particle/inputs_test_1d_synchronize_velocity @@ -0,0 +1,36 @@ +max_step = 8 +amr.n_cell = 8 +amr.max_level = 0 +amr.blocking_factor = 8 +amr.max_grid_size = 8 +geometry.dims = 1 +geometry.prob_lo = 0 +geometry.prob_hi = 3 + +# Boundary condition +boundary.field_lo = pec +boundary.field_hi = pec + +algo.maxwell_solver = none + +warpx.const_dt = 1.e-6 +warpx.synchronize_velocity_for_diagnostics = 1 + +# Order of particle shape factors +algo.particle_shape = 1 + +particles.species_names = electron +electron.species_type = electron +electron.injection_style = "SingleParticle" +electron.single_particle_pos = 0.0 0.0 0.1 +electron.single_particle_u = 0.0 0.0 0.0 +electron.single_particle_weight = 1.0 + +# Apply a uniform Ez +particles.E_ext_particle_init_style = constant +particles.E_external_particle = 0.0 0.0 -1.0 + +# Diagnostics +diagnostics.diags_names = diag1 +diag1.intervals = 5 +diag1.diag_type = Full diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 9606fc70136..7e08acb3cec 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -2898,6 +2898,11 @@ class Simulation(picmistandard.PICMI_Simulation): warpx_checkpoint_signals: list of strings Signals on which to write out a checkpoint + warpx_synchronize_velocity: bool, default=False + Flags whether the particle velocities are synchronized in time with + the positions in the diagnostics. When False, the particles are + one half step behind the positions (except for the final diagnostic). + warpx_numprocs: list of ints (1 in 1D, 2 in 2D, 3 in 3D) Domain decomposition on the coarsest level. The domain will be chopped into the exact number of pieces in each dimension as specified by this parameter. @@ -3017,6 +3022,8 @@ def init(self, kw): self.reduced_diags_separator = kw.pop("warpx_reduced_diags_separator", None) self.reduced_diags_precision = kw.pop("warpx_reduced_diags_precision", None) + self.synchronize_velocity = kw.pop("warpx_synchronize_velocity", None) + self.inputs_initialized = False self.warpx_initialized = False @@ -3081,6 +3088,8 @@ def initialize_inputs(self): pywarpx.warpx.break_signals = self.break_signals pywarpx.warpx.checkpoint_signals = self.checkpoint_signals + pywarpx.warpx.synchronize_velocity_for_diagnostics = self.synchronize_velocity + pywarpx.warpx.numprocs = self.numprocs reduced_diags = pywarpx.warpx.get_bucket("reduced_diags") diff --git a/Source/Diagnostics/MultiDiagnostics.H b/Source/Diagnostics/MultiDiagnostics.H index a22e20b44da..d4a9e762bbc 100644 --- a/Source/Diagnostics/MultiDiagnostics.H +++ b/Source/Diagnostics/MultiDiagnostics.H @@ -23,6 +23,8 @@ public: void ReadParameters (); /** \brief Loop over diags in alldiags and call their InitDiags */ void InitData (); + /** Check if any diagnostics will do compute and pack. */ + bool DoComputeAndPack (int step, bool force_flush=false); /** \brief Called at each iteration. Compute diags and flush. */ void FilterComputePackFlush (int step, bool force_flush=false, bool BackTransform=false); /** \brief Called only at the last iteration. Loop over each diag and if m_dump_last_timestep diff --git a/Source/Diagnostics/MultiDiagnostics.cpp b/Source/Diagnostics/MultiDiagnostics.cpp index 2119ac276f9..24285f6460a 100644 --- a/Source/Diagnostics/MultiDiagnostics.cpp +++ b/Source/Diagnostics/MultiDiagnostics.cpp @@ -77,6 +77,16 @@ MultiDiagnostics::ReadParameters () } } +bool +MultiDiagnostics::DoComputeAndPack (int step, bool force_flush) +{ + bool result = false; + for( auto& diag : alldiags ){ + result = result || diag->DoComputeAndPack(step, force_flush); + } + return result; +} + void MultiDiagnostics::FilterComputePackFlush (int step, bool force_flush, bool BackTransform) { diff --git a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.H b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.H index 5a782db7118..bf43d0c6d69 100644 --- a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.H +++ b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.H @@ -57,6 +57,9 @@ public: * @param[in] step current iteration time */ void WriteToFile (int step); + /** Check if any diagnostics will be done */ + bool DoDiags(int step); + /** \brief Loop over all ReducedDiags and call their WriteCheckpointData * @param[in] dir checkpoint directory */ void WriteCheckpointData (std::string const & dir); diff --git a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp index e4c982f7323..279f9a1c2ca 100644 --- a/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp +++ b/Source/Diagnostics/ReducedDiags/MultiReducedDiags.cpp @@ -161,6 +161,18 @@ void MultiReducedDiags::WriteToFile (int step) } // end void MultiReducedDiags::WriteToFile +// Check if any diagnostics will be done +bool MultiReducedDiags::DoDiags(int step) +{ + bool result = false; + for (int i_rd = 0; i_rd < static_cast(m_rd_names.size()); ++i_rd) + { + result = result || m_multi_rd[i_rd] -> DoDiags(step); + } + return result; +} +// end bool MultiReducedDiags::DoDiags + void MultiReducedDiags::WriteCheckpointData (std::string const & dir) { // Only the I/O rank does diff --git a/Source/Diagnostics/ReducedDiags/ReducedDiags.H b/Source/Diagnostics/ReducedDiags/ReducedDiags.H index a32de30cc6f..7b61501e27c 100644 --- a/Source/Diagnostics/ReducedDiags/ReducedDiags.H +++ b/Source/Diagnostics/ReducedDiags/ReducedDiags.H @@ -97,6 +97,9 @@ public: */ virtual void WriteToFile (int step) const; + /** Check if diag should be done */ + [[nodiscard]] bool DoDiags(int step) const { return m_intervals.contains(step+1); } + /** * \brief Write out checkpoint related data * diff --git a/Source/Evolve/WarpXEvolve.cpp b/Source/Evolve/WarpXEvolve.cpp index 5593642a944..5843cab0227 100644 --- a/Source/Evolve/WarpXEvolve.cpp +++ b/Source/Evolve/WarpXEvolve.cpp @@ -83,6 +83,13 @@ WarpX::Synchronize () { using ablastr::fields::Direction; using warpx::fields::FieldType; + // Note that this is potentially buggy since the PushP will do a field gather + // using particles that have been pushed but not yet checked at the boundaries. + // Also, this PushP may be inconsistent with the PushP backwards above since + // the fields may change between the two (mainly effecting the Python version when + // using electrostatics). + // When synchronize_velocity_for_diagnostics is true, the PushP at the end of the + // step is used so that the correct behavior is obtained. FillBoundaryE(guard_cells.ng_FieldGather); FillBoundaryB(guard_cells.ng_FieldGather); if (fft_do_time_averaging) @@ -235,9 +242,12 @@ WarpX::Evolve (int numsteps) } // TODO: move out + bool const end_of_step_loop = (step == numsteps_max - 1) || (cur_time + dt[0] >= stop_time - 1.e-3*dt[0]); if (evolve_scheme == EvolveScheme::Explicit) { - // At the end of last step, push p by 0.5*dt to synchronize - if (cur_time + dt[0] >= stop_time - 1.e-3*dt[0] || step == numsteps_max-1) { + // At the end of step loop, push p by 0.5*dt to synchronize + // This synchronization is not at the correct place since it is done before the window is moved, + // before particles are scraped, and before the electrostatic field update + if (end_of_step_loop && !synchronize_velocity_for_diagnostics) { Synchronize(); } } @@ -309,6 +319,15 @@ WarpX::Evolve (int numsteps) ExecutePythonCallback("afterEsolve"); } + bool const do_diagnostic = (multi_diags->DoComputeAndPack(step) || reduced_diags->DoDiags(step)); + if (synchronize_velocity_for_diagnostics && + (do_diagnostic || end_of_step_loop)) { + // When the diagnostics require synchronization, push p by 0.5*dt to synchronize. + // Note that this will be undone at the start of the next step by the half v-push + // backwards. + Synchronize(); + } + // afterstep callback runs with the updated global time. It is included // in the evolve timing. ExecutePythonCallback("afterstep"); @@ -353,8 +372,10 @@ WarpX::Evolve (int numsteps) // This if statement is needed for PICMI, which allows the Evolve routine to be // called multiple times, otherwise diagnostics will be done at every call, // regardless of the diagnostic period parameter provided in the inputs. - if (istep[0] == max_step || (stop_time - 1.e-3*dt[0] <= cur_time && cur_time < stop_time + dt[0]) - || m_exit_loop_due_to_interrupt_signal) { + bool const final_time_step = (istep[0] == max_step) + || (cur_time >= stop_time - 1.e-3*dt[0] + && cur_time < stop_time + dt[0]); + if (final_time_step || m_exit_loop_due_to_interrupt_signal) { multi_diags->FilterComputePackFlushLastTimestep( istep[0] ); if (m_exit_loop_due_to_interrupt_signal) { ExecutePythonCallback("onbreaksignal"); } } diff --git a/Source/WarpX.H b/Source/WarpX.H index 4f6024d426d..b70d23b9e33 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -1460,6 +1460,8 @@ private: amrex::VisMF::Header::Version plotfile_headerversion = amrex::VisMF::Header::Version_v1; amrex::VisMF::Header::Version slice_plotfile_headerversion = amrex::VisMF::Header::Version_v1; + bool synchronize_velocity_for_diagnostics = false; + bool use_single_read = true; bool use_single_write = true; int mffile_nstreams = 4; diff --git a/Source/WarpX.cpp b/Source/WarpX.cpp index 6c06fbe97bc..f129987b991 100644 --- a/Source/WarpX.cpp +++ b/Source/WarpX.cpp @@ -926,6 +926,8 @@ WarpX::ReadParameters () "J-damping can only be done when PML are inside simulation domain (do_pml_in_domain=1)" ); + pp_warpx.query("synchronize_velocity_for_diagnostics", synchronize_velocity_for_diagnostics); + { // Parameters below control all plotfile diagnostics bool plotfile_min_max = true; From 645a7a3892d939258d395ec486960dea499a4cc8 Mon Sep 17 00:00:00 2001 From: Roelof Groenewald <40245517+roelof-groenewald@users.noreply.github.com> Date: Mon, 3 Mar 2025 17:17:35 -0800 Subject: [PATCH 269/278] DSMC: Add impact ionization (#5654) This PR extends the work already done by @RemiLehe and @oshapoval in #5524. Some changes were made to how the ionization process is initialized, for example, the user must now specify the `target_species` (i.e., the species which undergoes ionization) as well as the `product_species`. The product species can include the colliding species (for example electron + neutral -> 2 x electron + ion), but does not have to (for example H$^+$ + D -> H$^+$ + D$^+$ + electron). The test created by @archermarx is now passing (at least early on): ![image](https://github.com/user-attachments/assets/344476b8-bc63-4395-92c1-d795183048b9) Todo: - [x] test implementation - [x] fix scattering process to conserve momentum - [x] clean up code --------- Signed-off-by: roelof-groenewald Co-authored-by: Remi Lehe Co-authored-by: Olga Shapoval Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- Docs/source/usage/parameters.rst | 8 +- .../capacitive_discharge/analysis_dsmc.py | 66 ++-- .../inputs_base_1d_picmi.py | 76 +++-- Examples/Tests/CMakeLists.txt | 1 + Examples/Tests/ionization_dsmc/CMakeLists.txt | 12 + .../analysis_default_regression.py | 1 + .../analysis_ionization_dsmc_3d.py | 222 +++++++++++++ .../inputs_test_3d_ionization_dsmc | 116 +++++++ Python/pywarpx/picmi.py | 15 +- .../benchmarks_json/test_1d_dsmc_picmi.json | 38 +-- .../test_3d_ionization_dsmc.json | 38 +++ .../BackgroundMCC/BackgroundMCCCollision.cpp | 2 +- .../BinaryCollision/BinaryCollision.H | 34 +- .../DSMC/CollisionFilterFunc.H | 1 - .../Collision/BinaryCollision/DSMC/DSMCFunc.H | 1 + .../BinaryCollision/DSMC/DSMCFunc.cpp | 38 ++- .../DSMC/SplitAndScatterFunc.H | 314 ++++++++++++++++-- .../DSMC/SplitAndScatterFunc.cpp | 71 ++-- 18 files changed, 905 insertions(+), 149 deletions(-) create mode 100644 Examples/Tests/ionization_dsmc/CMakeLists.txt create mode 120000 Examples/Tests/ionization_dsmc/analysis_default_regression.py create mode 100755 Examples/Tests/ionization_dsmc/analysis_ionization_dsmc_3d.py create mode 100644 Examples/Tests/ionization_dsmc/inputs_test_3d_ionization_dsmc create mode 100644 Regression/Checksum/benchmarks_json/test_3d_ionization_dsmc.json diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index c31a1cf88b9..b2547b5126c 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2070,7 +2070,7 @@ Details about the collision models can be found in the :ref:`theory section .product_species`` (`strings`) - Only for ``nuclearfusion``. The name(s) of the species in which to add + Only for ``dsmc`` and ``nuclearfusion``. The name(s) of the species in which to add the new macroparticles created by the reaction. * ``.ndt`` (`int`) optional @@ -2185,7 +2185,7 @@ Details about the collision models can be found in the :ref:`theory section ._energy`` (`float`) - Only for ``background_mcc``. If the scattering process is either + Only for ``dsmc`` and ``background_mcc``. If the scattering process is either ``excitationX`` or ``ionization`` the energy cost of that process must be given in eV. * ``.ionization_species`` (`float`) @@ -2193,6 +2193,10 @@ Details about the collision models can be found in the :ref:`theory section .ionization_target_species`` (`string`) + Only for ``dsmc`` with impact ionization. This specifies which one of the + colliding particles is ionized. + .. _running-cpp-parameters-numerics: Numerics and algorithms diff --git a/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py b/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py index cdaa6bed58f..e0ffd794946 100755 --- a/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py +++ b/Examples/Physics_applications/capacitive_discharge/analysis_dsmc.py @@ -6,39 +6,39 @@ # fmt: off ref_density = np.array([ - 1.27942709e+14, 2.23579371e+14, 2.55384387e+14, 2.55660663e+14, - 2.55830911e+14, 2.55814337e+14, 2.55798906e+14, 2.55744891e+14, - 2.55915585e+14, 2.56083194e+14, 2.55942354e+14, 2.55833026e+14, - 2.56036175e+14, 2.56234141e+14, 2.56196179e+14, 2.56146141e+14, - 2.56168022e+14, 2.56216909e+14, 2.56119961e+14, 2.56065167e+14, - 2.56194764e+14, 2.56416398e+14, 2.56465239e+14, 2.56234337e+14, - 2.56234503e+14, 2.56316003e+14, 2.56175023e+14, 2.56030269e+14, - 2.56189301e+14, 2.56286379e+14, 2.56130396e+14, 2.56295225e+14, - 2.56474082e+14, 2.56340375e+14, 2.56350864e+14, 2.56462330e+14, - 2.56469391e+14, 2.56412726e+14, 2.56241788e+14, 2.56355650e+14, - 2.56650599e+14, 2.56674748e+14, 2.56642480e+14, 2.56823508e+14, - 2.57025029e+14, 2.57110614e+14, 2.57042364e+14, 2.56950884e+14, - 2.57051822e+14, 2.56952148e+14, 2.56684016e+14, 2.56481130e+14, - 2.56277073e+14, 2.56065774e+14, 2.56190033e+14, 2.56411074e+14, - 2.56202418e+14, 2.56128368e+14, 2.56227002e+14, 2.56083004e+14, - 2.56056768e+14, 2.56343831e+14, 2.56443659e+14, 2.56280541e+14, - 2.56191572e+14, 2.56147304e+14, 2.56342794e+14, 2.56735473e+14, - 2.56994680e+14, 2.56901500e+14, 2.56527131e+14, 2.56490824e+14, - 2.56614730e+14, 2.56382744e+14, 2.56588214e+14, 2.57160270e+14, - 2.57230435e+14, 2.57116530e+14, 2.57065771e+14, 2.57236507e+14, - 2.57112865e+14, 2.56540177e+14, 2.56416828e+14, 2.56648954e+14, - 2.56625594e+14, 2.56411003e+14, 2.56523754e+14, 2.56841108e+14, - 2.56856368e+14, 2.56757912e+14, 2.56895134e+14, 2.57144419e+14, - 2.57001944e+14, 2.56371759e+14, 2.56179404e+14, 2.56541905e+14, - 2.56715727e+14, 2.56851681e+14, 2.57114458e+14, 2.57001739e+14, - 2.56825690e+14, 2.56879682e+14, 2.56699673e+14, 2.56532841e+14, - 2.56479582e+14, 2.56630989e+14, 2.56885996e+14, 2.56694637e+14, - 2.56250819e+14, 2.56045278e+14, 2.56366075e+14, 2.56693733e+14, - 2.56618530e+14, 2.56580918e+14, 2.56812781e+14, 2.56754216e+14, - 2.56444736e+14, 2.56473391e+14, 2.56538398e+14, 2.56626551e+14, - 2.56471950e+14, 2.56274969e+14, 2.56489423e+14, 2.56645266e+14, - 2.56611124e+14, 2.56344324e+14, 2.56244156e+14, 2.24183727e+14, - 1.27909856e+14 + 1.27939695e+14, 2.23589080e+14, 2.55400046e+14, 2.55652603e+14, + 2.55810704e+14, 2.55816145e+14, 2.55810457e+14, 2.55743643e+14, + 2.55908052e+14, 2.56076623e+14, 2.55948081e+14, 2.55841574e+14, + 2.56029524e+14, 2.56320511e+14, 2.56608595e+14, 2.56755504e+14, + 2.56699377e+14, 2.56700767e+14, 2.56497253e+14, 2.56481560e+14, + 2.56832303e+14, 2.57064841e+14, 2.57023000e+14, 2.56614315e+14, + 2.56368670e+14, 2.56370666e+14, 2.56227710e+14, 2.56240281e+14, + 2.56673842e+14, 2.56837209e+14, 2.56625623e+14, 2.56729845e+14, + 2.56975973e+14, 2.56801701e+14, 2.56491181e+14, 2.56516559e+14, + 2.56468688e+14, 2.56251727e+14, 2.56243466e+14, 2.56484137e+14, + 2.56637978e+14, 2.56448971e+14, 2.56140684e+14, 2.56117358e+14, + 2.56274706e+14, 2.56233588e+14, 2.56047578e+14, 2.56087060e+14, + 2.56365128e+14, 2.56357745e+14, 2.56269776e+14, 2.56419914e+14, + 2.56392856e+14, 2.56202826e+14, 2.56363244e+14, 2.56572545e+14, + 2.56351695e+14, 2.56393353e+14, 2.56759784e+14, 2.56767115e+14, + 2.56700246e+14, 2.56618056e+14, 2.56234915e+14, 2.56237788e+14, + 2.56606031e+14, 2.56520133e+14, 2.56316818e+14, 2.56184858e+14, + 2.56246807e+14, 2.56626394e+14, 2.56747253e+14, 2.56630112e+14, + 2.56518940e+14, 2.56358089e+14, 2.56249884e+14, 2.56271535e+14, + 2.56420396e+14, 2.56704340e+14, 2.56912250e+14, 2.56823163e+14, + 2.56694985e+14, 2.56822690e+14, 2.56736406e+14, 2.56438911e+14, + 2.56359312e+14, 2.56356028e+14, 2.56415261e+14, 2.56408702e+14, + 2.56267048e+14, 2.56274807e+14, 2.56494202e+14, 2.56789842e+14, + 2.56939719e+14, 2.56875327e+14, 2.56831776e+14, 2.56827482e+14, + 2.56698383e+14, 2.56712727e+14, 2.56879409e+14, 2.56629297e+14, + 2.56322165e+14, 2.56377317e+14, 2.56277894e+14, 2.56112364e+14, + 2.56171697e+14, 2.56370929e+14, 2.56855124e+14, 2.57621107e+14, + 2.57656000e+14, 2.56760729e+14, 2.56449741e+14, 2.56716250e+14, + 2.56721224e+14, 2.56506121e+14, 2.56236691e+14, 2.56270200e+14, + 2.56745053e+14, 2.56940581e+14, 2.56539958e+14, 2.56403313e+14, + 2.56600509e+14, 2.56776206e+14, 2.56884434e+14, 2.56755321e+14, + 2.56558818e+14, 2.56400159e+14, 2.56223931e+14, 2.23879043e+14, + 1.27601051e+14 ]) # fmt: on diff --git a/Examples/Physics_applications/capacitive_discharge/inputs_base_1d_picmi.py b/Examples/Physics_applications/capacitive_discharge/inputs_base_1d_picmi.py index a03cf1954ad..0d1b9755af8 100644 --- a/Examples/Physics_applications/capacitive_discharge/inputs_base_1d_picmi.py +++ b/Examples/Physics_applications/capacitive_discharge/inputs_base_1d_picmi.py @@ -268,32 +268,55 @@ def setup_run(self): ####################################################################### cross_sec_direc = "../../../../warpx-data/MCC_cross_sections/He/" - electron_colls = picmi.MCCCollisions( - name="coll_elec", - species=self.electrons, - background_density=self.gas_density, - background_temperature=self.gas_temp, - background_mass=self.ions.mass, - ndt=self.mcc_subcycling_steps, - scattering_processes={ - "elastic": { - "cross_section": cross_sec_direc + "electron_scattering.dat" - }, - "excitation1": { - "cross_section": cross_sec_direc + "excitation_1.dat", - "energy": 19.82, - }, - "excitation2": { - "cross_section": cross_sec_direc + "excitation_2.dat", - "energy": 20.61, - }, - "ionization": { - "cross_section": cross_sec_direc + "ionization.dat", - "energy": 24.55, - "species": self.ions, - }, + + electron_scattering_processes = { + "elastic": {"cross_section": cross_sec_direc + "electron_scattering.dat"}, + "excitation1": { + "cross_section": cross_sec_direc + "excitation_1.dat", + "energy": 19.82, }, - ) + "excitation2": { + "cross_section": cross_sec_direc + "excitation_2.dat", + "energy": 20.61, + }, + "ionization": { + "cross_section": cross_sec_direc + "ionization.dat", + "energy": 24.55, + "species": self.ions, + }, + } + if self.dsmc: + ionization = {"ionization": electron_scattering_processes.pop("ionization")} + ionization["ionization"]["target_species"] = self.neutrals + ionization["ionization"].pop("species") + electron_colls_dsmc = picmi.DSMCCollisions( + name="coll_elec_dsmc", + species=[self.electrons, self.neutrals], + product_species=[self.ions, self.electrons], + ndt=4, + scattering_processes=ionization, + ) + electron_colls_mcc = picmi.MCCCollisions( + name="coll_elec", + species=self.electrons, + background_density=self.gas_density, + background_temperature=self.gas_temp, + background_mass=self.ions.mass, + ndt=self.mcc_subcycling_steps, + scattering_processes=electron_scattering_processes, + ) + electron_colls = [electron_colls_mcc, electron_colls_dsmc] + else: + electron_colls_mcc = picmi.MCCCollisions( + name="coll_elec", + species=self.electrons, + background_density=self.gas_density, + background_temperature=self.gas_temp, + background_mass=self.ions.mass, + ndt=self.mcc_subcycling_steps, + scattering_processes=electron_scattering_processes, + ) + electron_colls = [electron_colls_mcc] ion_scattering_processes = { "elastic": {"cross_section": cross_sec_direc + "ion_scattering.dat"}, @@ -316,6 +339,7 @@ def setup_run(self): ndt=self.mcc_subcycling_steps, scattering_processes=ion_scattering_processes, ) + ion_colls = [ion_colls] ####################################################################### # Initialize simulation # @@ -325,7 +349,7 @@ def setup_run(self): solver=self.solver, time_step_size=self.dt, max_steps=self.max_steps, - warpx_collisions=[electron_colls, ion_colls], + warpx_collisions=electron_colls + ion_colls, verbose=self.test, ) self.solver.sim = self.sim diff --git a/Examples/Tests/CMakeLists.txt b/Examples/Tests/CMakeLists.txt index b80e6158f49..b8ea7ec3e0b 100644 --- a/Examples/Tests/CMakeLists.txt +++ b/Examples/Tests/CMakeLists.txt @@ -26,6 +26,7 @@ add_subdirectory(gaussian_beam) add_subdirectory(implicit) add_subdirectory(initial_distribution) add_subdirectory(initial_plasma_profile) +add_subdirectory(ionization_dsmc) add_subdirectory(field_ionization) add_subdirectory(ion_stopping) add_subdirectory(langmuir) diff --git a/Examples/Tests/ionization_dsmc/CMakeLists.txt b/Examples/Tests/ionization_dsmc/CMakeLists.txt new file mode 100644 index 00000000000..db8d3cb7b2a --- /dev/null +++ b/Examples/Tests/ionization_dsmc/CMakeLists.txt @@ -0,0 +1,12 @@ +# Add tests (alphabetical order) ############################################## +# + +add_warpx_test( + test_3d_ionization_dsmc # name + 3 # dims + 2 # nprocs + inputs_test_3d_ionization_dsmc # inputs + "analysis_ionization_dsmc_3d.py" # analysis + "analysis_default_regression.py --path diags/diag1000250" # checksum + OFF # dependency +) diff --git a/Examples/Tests/ionization_dsmc/analysis_default_regression.py b/Examples/Tests/ionization_dsmc/analysis_default_regression.py new file mode 120000 index 00000000000..d8ce3fca419 --- /dev/null +++ b/Examples/Tests/ionization_dsmc/analysis_default_regression.py @@ -0,0 +1 @@ +../../analysis_default_regression.py \ No newline at end of file diff --git a/Examples/Tests/ionization_dsmc/analysis_ionization_dsmc_3d.py b/Examples/Tests/ionization_dsmc/analysis_ionization_dsmc_3d.py new file mode 100755 index 00000000000..294896a1761 --- /dev/null +++ b/Examples/Tests/ionization_dsmc/analysis_ionization_dsmc_3d.py @@ -0,0 +1,222 @@ +#!/usr/bin/env python3 +# DSMC ionization test script: +# - compares WarpX simulation results with theoretical model predictions. + +import matplotlib.pyplot as plt +import numpy as np +import tqdm +from openpmd_viewer import OpenPMDTimeSeries +from scipy.stats import qmc + +from pywarpx import picmi + +constants = picmi.constants + +ts = OpenPMDTimeSeries("diags/diag2/") + +q_e = constants.q_e +m_p = constants.m_p +m_e = constants.m_e +k_B = constants.kb +ep0 = constants.ep0 +clight = constants.c + +plasma_density = 1e14 +neutral_density = 1e20 +dt = 1e-9 +electron_temp = 10 +neutral_temp = 300 +max_steps = 250 +max_time = max_steps * dt + +L = [0.1] * 3 + +sigma_iz_file = "../../../../warpx-data/MCC_cross_sections/Xe/ionization.dat" +iz_data = np.loadtxt(sigma_iz_file) + +energy_eV = iz_data[:, 0] +sigma_m2 = iz_data[:, 1] +iz_energy = energy_eV[0] + + +def get_Te(ts): + T_e = [] + for iteration in tqdm.tqdm(ts.iterations): + ux, uy, uz = ts.get_particle( + ["ux", "uy", "uz"], species="electrons", iteration=iteration + ) + v_std_x = np.std(ux * clight) + v_std_y = np.std(uy * clight) + v_std_z = np.std(uz * clight) + v_std = (v_std_x + v_std_y + v_std_z) / 3 + T_e.append(m_e * v_std**2 / q_e) + return T_e + + +def get_density(ts): + number_data = np.genfromtxt("diags/counts.txt") + Te = get_Te(ts) + total_volume = L[0] * L[1] * L[2] + electron_weight = number_data[:, 8] + neutral_weight = number_data[:, 9] + ne = electron_weight / total_volume + nn = neutral_weight / total_volume + return [ne, nn, ne * Te] + + +def compute_rate_coefficients(temperatures_eV, energy_eV, sigma_m2, num_samples=1024): + """Integrate cross sections over maxwellian VDF to obtain reaction rate coefficients + Given electron energy in eV (`energy_eV`) and reaction cross sections at those energies (`sigma_m2`), + this function computes the reaction rate coefficient $k(T_e)$ for maxwellian electrons at + a provided list of electron temperatures `temperatures_eV`. + + The rate coefficient is given by + + $$ + k(T_e) = \int \sigma(E) E dE + $$ + + where the energy $E$ is drawn from a maxwellian distribution function with zero speed and temperature $T_e$. + We solve this using a quasi-monte carlo approach, by drawing a large number of low-discrepancy samples from + the appropriate distribution and obtaining the average of $\sigma(E) E$. + """ + thermal_speed_scale = np.sqrt(q_e / m_e) + k = np.zeros(temperatures_eV.size) + + # obtain low-discrepancy samples of normal dist + dist = qmc.MultivariateNormalQMC(np.zeros(3), np.eye(3)) + v = dist.random(num_samples) + + for i, T in enumerate(temperatures_eV): + # scale velocities to proper temperature + # compute energies corresponding to each sampled velocity vector + speed_squared = (v[:, 0] ** 2 + v[:, 1] ** 2 + v[:, 2] ** 2) * T + e = 0.5 * speed_squared + speed = np.sqrt(speed_squared) * thermal_speed_scale + # get cross section by interpolating on table + sigma = np.interp(e, energy_eV, sigma_m2, left=0) + k[i] = np.mean(sigma * speed) + return k + + +def rhs(state, params): + """Compute the right-hand side of ODE system that solves the global model described below. + The global model solves for the evolution of plasma density ($n_e$), neutral density ($n_n), + and electron temperature ($T_e$) in the presence of ionization. + The model equations consist of a continuity equation for electrons and neutrals, + combined with an energy equation for electrons. + + $$ + \frac{\partial n_e}{\partial t} = \dot{n} + \frac{\partial n_n}{\partial t} = -\dot{n} + \frac{3}{2}\frac{\partial n_e T_e}{\partial t} = -\dot{n} \epsilon_{iz}, + $$ + + where + + $$ + \dot{n} = n_n n_e k_{iz}(T_e), + $$ + + $k_iz$ is the ionization rate coefficient as a function of electron temperature in eV, and $\epsilon_{iz}$ is the ionization energy cost in eV. + """ + # unpack parameters + E_iz, Te_table, kiz_table = params + ne, nn, energy = state[0], state[1], state[2] + + # compute rhs + Te = energy / ne / 1.5 + rate_coeff = np.interp(Te, Te_table, kiz_table, left=0) + ndot = ne * nn * rate_coeff + f = np.empty(3) + + # fill in ionization rate + f[0] = ndot # d(ne)/dt + f[1] = -ndot # d(nn)/dt + f[2] = -ndot * iz_energy # -d(ne*eps) / dt + return f + + +def solve_theory_model(): + # integrate cross-section table to get rate coefficients + Te_table = np.linspace(0, 2 * electron_temp, 256) + kiz_table = compute_rate_coefficients( + Te_table, energy_eV, sigma_m2, num_samples=4096 + ) + + # set up system + num_steps = max_steps + 1 + state_vec = np.zeros((num_steps, 3)) + state_vec[0, :] = np.array( + [plasma_density, neutral_density, 1.5 * plasma_density * electron_temp] + ) + t = np.linspace(0, max_time, num_steps) + params = (iz_energy, Te_table, kiz_table) + + # solve the system (use RK2 integration) + for i in range(1, num_steps): + u = state_vec[i - 1, :] + k1 = rhs(u, params) + k2 = rhs(u + k1 * dt, params) + state_vec[i, :] = u + (k1 + k2) * dt / 2 + + # return result + ne = state_vec[:, 0] + nn = state_vec[:, 1] + Te = state_vec[:, 2] / (1.5 * ne) + return t, [ne, nn, ne * Te] + + +t_warpx = np.loadtxt("diags/counts.txt")[:, 1] +data_warpx = get_density(ts) + +t_theory, data_theory = solve_theory_model() + +fig, axs = plt.subplots(1, 3, figsize=(12, 4)) + +# Plot 1 +method = "dsmc" +labels = ["$n_e$ [m$^{-3}$]", "$n_n$ [m${-3}$]", "$n_e T_e$ [eVm$^{-3}$]"] +titles = ["Plasma density", "Neutral density", "Normalized electron temperature"] + +for i, (title, label, field_warpx, field_theory) in enumerate( + zip(titles, labels, data_warpx, data_theory) +): + axs[i].set_ylabel(label) + axs[i].set_title(title) + axs[i].plot(t_warpx, field_warpx, label="WarpX (" + method + ")") + axs[i].plot(t_theory, field_theory, label="theory", color="red", ls="--") + + axs[i].legend() +plt.tight_layout() +plt.savefig("ionization_dsmc_density_Te.png", dpi=150) + + +tolerances = [4e-2, 1e-6, 4e-2] + + +def check_tolerance(array, tolerance): + assert np.all(array <= tolerance), ( + f"Test did not pass: one or more elements exceed the tolerance of {tolerance}." + ) + print("All elements of are within the tolerance.") + + +plt.figure() +labels = [ + "Plasma density $(n_e$)", + "Neutral density $(n_n$)", + "Normalized electron temperature $(T_en_e$)", +] +for i, (label, field_warpx, field_theory, tolerance) in enumerate( + zip(labels, data_warpx, data_theory, tolerances) +): + relative_error = np.array( + abs((data_warpx[i] - data_theory[i][::5]) / data_theory[i][::5]) + ) + plt.plot(t_warpx, relative_error, label=label) + plt.ylabel("Relative error") + plt.xlabel("Time [s]") + plt.legend() + check_tolerance(relative_error, tolerance) +plt.savefig("./relative_error_density_Te.png", dpi=150) diff --git a/Examples/Tests/ionization_dsmc/inputs_test_3d_ionization_dsmc b/Examples/Tests/ionization_dsmc/inputs_test_3d_ionization_dsmc new file mode 100644 index 00000000000..aed4cf6c52d --- /dev/null +++ b/Examples/Tests/ionization_dsmc/inputs_test_3d_ionization_dsmc @@ -0,0 +1,116 @@ +################################# +####### GENERAL PARAMETERS ###### +################################# +max_step = 250 +amr.n_cell = 8 8 8 +amr.max_level = 0 + +warpx.do_electrostatic = labframe +algo.particle_shape = 1 +amrex.verbose = 1 +geometry.coord_sys = 0 +################################# +############ CONSTANTS ############# +################################# +my_constants.Te = 10 +warpx.const_dt = 1e-09 + +################################# +####### GENERAL PARAMETERS ###### +################################# +geometry.dims = 3 +geometry.prob_hi = 0.1 0.1 0.1 +geometry.prob_lo = 0 0 0 +amr.max_grid_size = 8 + +################################# +###### BOUNDARY CONDITIONS ###### +################################# +geometry.is_periodic = 1 1 1 +boundary.field_hi = periodic periodic periodic +boundary.field_lo = periodic periodic periodic +boundary.particle_hi = periodic periodic periodic +boundary.particle_lo = periodic periodic periodic + +################################# +############ PLASMA ############# +################################# +particles.species_names = electrons ions neutrals + +electrons.charge = -q_e +electrons.density = 1e+14 +electrons.initialize_self_fields = 0 +electrons.injection_style = nuniformpercell +electrons.mass = m_e +electrons.momentum_distribution_type = gaussian +electrons.num_particles_per_cell_each_dim = 4 4 4 +electrons.profile = constant +electrons.ux_m = 0.0 +electrons.ux_th = sqrt(q_e * Te / m_e) / clight +electrons.uy_m = 0.0 +electrons.uy_th = sqrt(q_e * Te / m_e)/ clight +electrons.uz_m = 0.0 +electrons.uz_th = sqrt(q_e * Te / m_e)/ clight + +ions.charge = q_e +ions.density = 1e+14 +ions.initialize_self_fields = 0 +ions.injection_style = nuniformpercell +ions.mass = 2.196035502270312e-25 +ions.momentum_distribution_type = gaussian +ions.num_particles_per_cell_each_dim = 4 4 4 +ions.profile = constant +ions.ux_m = 0.0 +ions.ux_th = 4.5810168302300867e-07 +ions.uy_m = 0.0 +ions.uy_th = 4.5810168302300867e-07 +ions.uz_m = 0.0 +ions.uz_th = 4.5810168302300867e-07 + +neutrals.charge = 0 +neutrals.density = 1e+20 +neutrals.initialize_self_fields = 0 +neutrals.injection_style = nuniformpercell +neutrals.mass = 2.196035502270312e-25 +neutrals.momentum_distribution_type = gaussian +neutrals.num_particles_per_cell_each_dim = 4 4 4 +neutrals.profile = constant +neutrals.ux_m = 0.0 +neutrals.ux_th = 4.5810168302300867e-07 +neutrals.uy_m = 0.0 +neutrals.uy_th = 4.5810168302300867e-07 +neutrals.uz_m = 0.0 +neutrals.uz_th = 4.5810168302300867e-07 + +collisions.collision_names = coll_elec + +coll_elec.ionization_cross_section = ../../../../warpx-data/MCC_cross_sections/Xe/ionization.dat +coll_elec.ionization_energy = 12.1298431 +coll_elec.product_species = ions electrons +coll_elec.ionization_target_species = neutrals +coll_elec.ndt = 1 +coll_elec.scattering_processes = ionization +coll_elec.species = electrons neutrals +coll_elec.type = dsmc + +################################# +############ DIAGNOSTICS ############# +################################# +diagnostics.diags_names = diag1 diag2 +warpx.reduced_diags_names = counts +counts.intervals = 5 +counts.path = diags/ +counts.type = ParticleNumber + +# Diagnostics +diag1.intervals = 250 +diag1.diag_type = Full +diag1.electrons.variables = ux uy uz +diag1.neutrals.variables = ux uy uz +diag1.format = plotfile + +diag2.intervals = 5 +diag2.diag_type = Full +diag2.electrons.variables = ux uy uz +diag2.neutrals.variables = ux uy uz +diag2.format = openpmd diff --git a/Python/pywarpx/picmi.py b/Python/pywarpx/picmi.py index 7e08acb3cec..8a0254d96f2 100644 --- a/Python/pywarpx/picmi.py +++ b/Python/pywarpx/picmi.py @@ -2563,14 +2563,21 @@ class DSMCCollisions(picmistandard.base._ClassWithInit): scattering_processes: dictionary The scattering process to use and any needed information + product_species: list + The species produced by collision processes (currently only ionization + products are supported). + ndt: integer, optional The collisions will be applied every "ndt" steps. Must be 1 or larger. """ - def __init__(self, name, species, scattering_processes, ndt=None, **kw): + def __init__( + self, name, species, scattering_processes, product_species=None, ndt=None, **kw + ): self.name = name self.species = species self.scattering_processes = scattering_processes + self.product_species = product_species self.ndt = ndt self.handle_init(kw) @@ -2579,12 +2586,16 @@ def collision_initialize_inputs(self): collision = pywarpx.Collisions.newcollision(self.name) collision.type = "dsmc" collision.species = [species.name for species in self.species] + if self.product_species is not None: + collision.product_species = [ + species.name for species in self.product_species + ] collision.ndt = self.ndt collision.scattering_processes = self.scattering_processes.keys() for process, kw in self.scattering_processes.items(): for key, val in kw.items(): - if key == "species": + if "species" in key: val = val.name collision.add_new_attr(process + "_" + key, val) diff --git a/Regression/Checksum/benchmarks_json/test_1d_dsmc_picmi.json b/Regression/Checksum/benchmarks_json/test_1d_dsmc_picmi.json index 62f915cf2e8..98872b583b8 100644 --- a/Regression/Checksum/benchmarks_json/test_1d_dsmc_picmi.json +++ b/Regression/Checksum/benchmarks_json/test_1d_dsmc_picmi.json @@ -1,27 +1,27 @@ { "lev=0": { - "rho_electrons": 0.004437338851654305, - "rho_he_ions": 0.005200276265886133 - }, - "he_ions": { - "particle_momentum_x": 2.768463746716725e-19, - "particle_momentum_y": 2.7585450668167785e-19, - "particle_momentum_z": 3.6189671443598533e-19, - "particle_position_x": 2201.408357891233, - "particle_weight": 17190472656250.002 + "rho_electrons": 0.004438215169514585, + "rho_he_ions": 0.005201960539530351 }, "electrons": { - "particle_momentum_x": 3.523554668287801e-20, - "particle_momentum_y": 3.515628626179393e-20, - "particle_momentum_z": 1.258711379033217e-19, - "particle_position_x": 2140.8168584833174, - "particle_weight": 14588988281250.002 + "particle_momentum_x": 3.533561243341172e-20, + "particle_momentum_y": 3.5386193532044873e-20, + "particle_momentum_z": 1.2559082152752288e-19, + "particle_position_x": 2141.145138810434, + "particle_weight": 14592390625000.002 + }, + "he_ions": { + "particle_momentum_x": 2.77081570738744e-19, + "particle_momentum_y": 2.757422176946563e-19, + "particle_momentum_z": 3.6216485183828697e-19, + "particle_position_x": 2201.938389152835, + "particle_weight": 17195707031250.002 }, "neutrals": { - "particle_momentum_x": 1.4054952479597137e-19, - "particle_momentum_y": 1.403311018061206e-19, - "particle_momentum_z": 1.411491089895956e-19, - "particle_position_x": 1119.82858839282, - "particle_weight": 6.4588e+19 + "particle_momentum_x": 1.4076569258763281e-19, + "particle_momentum_y": 1.4053910675178145e-19, + "particle_momentum_z": 1.4126450876863223e-19, + "particle_position_x": 1122.2997023272349, + "particle_weight": 6.4587999954199224e+19 } } diff --git a/Regression/Checksum/benchmarks_json/test_3d_ionization_dsmc.json b/Regression/Checksum/benchmarks_json/test_3d_ionization_dsmc.json new file mode 100644 index 00000000000..d9037876f89 --- /dev/null +++ b/Regression/Checksum/benchmarks_json/test_3d_ionization_dsmc.json @@ -0,0 +1,38 @@ +{ + "lev=0": { + "Bx": 0.0, + "By": 0.0, + "Bz": 0.0, + "Ex": 22604.886154206244, + "Ey": 25871.77915984331, + "Ez": 24818.05831785292, + "jx": 2268.615138333634, + "jy": 326.15256830784614, + "jz": 288.7225344616736 + }, + "neutrals": { + "particle_momentum_x": 7.849329019484996e-19, + "particle_momentum_y": 7.871937065975134e-19, + "particle_momentum_z": 7.866670113934242e-19, + "particle_position_x": 1638.398241914082, + "particle_position_y": 1638.411011051806, + "particle_position_z": 1638.4009672441146 + }, + "ions": { + "particle_momentum_x": 1.1081540791730902e-18, + "particle_momentum_y": 1.1084000949226488e-18, + "particle_momentum_z": 1.1045654617165205e-18, + "particle_position_x": 2303.068003441504, + "particle_position_y": 2301.6312817994844, + "particle_position_z": 2301.7510230334133, + "particle_weight": 140463256835.93753 + }, + "electrons": { + "particle_momentum_x": 3.213371643156214e-20, + "particle_momentum_y": 3.2189558527727626e-20, + "particle_momentum_z": 2.7592636558840016e-20, + "particle_position_x": 2305.726186451743, + "particle_position_y": 2301.7018490320256, + "particle_position_z": 2303.7728642413595 + } +} diff --git a/Source/Particles/Collision/BackgroundMCC/BackgroundMCCCollision.cpp b/Source/Particles/Collision/BackgroundMCC/BackgroundMCCCollision.cpp index 8becd7d231a..5caa949055f 100644 --- a/Source/Particles/Collision/BackgroundMCC/BackgroundMCCCollision.cpp +++ b/Source/Particles/Collision/BackgroundMCC/BackgroundMCCCollision.cpp @@ -276,7 +276,7 @@ BackgroundMCCCollision::doCollisions (amrex::Real cur_time, amrex::Real dt, Mult } amrex::Print() << Utils::TextMsg::Info( - "Setting up collisions for " + m_species_names[0] + " with:\n" + "Setting up Monte-Carlo collisions for " + m_species_names[0] + " with:\n" + " total non-ionization collision probability: " + std::to_string(m_total_collision_prob) + "\n total ionization collision probability: " diff --git a/Source/Particles/Collision/BinaryCollision/BinaryCollision.H b/Source/Particles/Collision/BinaryCollision/BinaryCollision.H index b64c6d4b1fa..b73588a2b6e 100644 --- a/Source/Particles/Collision/BinaryCollision/BinaryCollision.H +++ b/Source/Particles/Collision/BinaryCollision/BinaryCollision.H @@ -102,10 +102,40 @@ public: const amrex::ParmParse pp_collision_name(collision_name); pp_collision_name.queryarr("product_species", m_product_species); - // if DSMC the colliding species are also product species - // Therefore, we insert the colliding species at the beginning of `m_product_species` + // If DSMC the colliding species are also product species. + // Therefore, we insert the colliding species at the beginning of `m_product_species`. if (collision_type == CollisionType::DSMC) { + // If the scattering process is ionization ensure that the + // explicitly specified "target" species, i.e., the species that + // undergoes ionization, is second in the species list for this + // collision set. The reason for this is that during the collision + // operation, an outgoing particle of the first species type will + // be created. + std::string target_species; + pp_collision_name.query("ionization_target_species", target_species); + if (!target_species.empty()) { + if (m_species_names[0] == target_species) { + std::swap(m_species_names[0], m_species_names[1]); + } else if (m_species_names[1] != target_species) { + WARPX_ABORT_WITH_MESSAGE("DSMC: Ionization target species, " + target_species + " must be one of the colliding species."); + } + } + m_product_species.insert( m_product_species.begin(), m_species_names.begin(), m_species_names.end() ); + + // During an ionization event, the non-ionizing target species could + // also be a product (for example, electron impact ionization producing another + // electron). We check that if this is the case, the product species is + // only listed once. The number of product particles is appropriately + // handled in `SplitAndScatterFunc` in that case. + if (m_product_species.size() > 2) { + if (m_product_species[2] == m_product_species[0]) { + m_product_species.erase(m_product_species.begin() + 2); + } + else if (m_product_species[3] == m_product_species[0]) { + m_product_species.erase(m_product_species.begin() + 3); + } + } } m_have_product_species = !m_product_species.empty(); diff --git a/Source/Particles/Collision/BinaryCollision/DSMC/CollisionFilterFunc.H b/Source/Particles/Collision/BinaryCollision/DSMC/CollisionFilterFunc.H index c5bd2e1cec6..fd4fd6fd66e 100644 --- a/Source/Particles/Collision/BinaryCollision/DSMC/CollisionFilterFunc.H +++ b/Source/Particles/Collision/BinaryCollision/DSMC/CollisionFilterFunc.H @@ -65,7 +65,6 @@ void CollisionPairFilter (const amrex::ParticleReal u1x, const amrex::ParticleRe // Evaluate the cross-section for each scattering process to determine // the total collision probability. - // The size of the arrays below is a compile-time constant (template parameter) // for performance reasons: it avoids dynamic memory allocation on the GPU. int coll_type[max_process_count] = {0}; diff --git a/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.H b/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.H index 6051aab1b59..6f5a6561b1e 100644 --- a/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.H +++ b/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.H @@ -211,6 +211,7 @@ public: private: amrex::Vector m_scattering_processes; amrex::Gpu::DeviceVector m_scattering_processes_exe; + bool m_isSameSpecies; Executor m_exe; diff --git a/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.cpp b/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.cpp index cf5f8de8d3c..b79741a536a 100644 --- a/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.cpp +++ b/Source/Particles/Collision/BinaryCollision/DSMC/DSMCFunc.cpp @@ -32,48 +32,54 @@ DSMCFunc::DSMCFunc ( // create a vector of ScatteringProcess objects from each scattering // process name + bool ionization_flag = false; for (const auto& scattering_process : scattering_process_names) { const std::string kw_cross_section = scattering_process + "_cross_section"; std::string cross_section_file; pp_collision_name.query(kw_cross_section.c_str(), cross_section_file); - // if the scattering process is excitation or ionization get the - // energy associated with that process + // if the scattering process is excitation, ionization or forward get + // the energy associated with that process + // (note that this allows forward scattering to be used both with and + // without a fixed energy loss) amrex::ParticleReal energy = 0._prt; if (scattering_process.find("excitation") != std::string::npos || - scattering_process.find("ionization") != std::string::npos) { + scattering_process.find("ionization") != std::string::npos || + scattering_process.find("forward") != std::string::npos ) { const std::string kw_energy = scattering_process + "_energy"; utils::parser::getWithParser( pp_collision_name, kw_energy.c_str(), energy); } - // if the scattering process is forward scattering get the energy - // associated with the process if it is given (this allows forward - // scattering to be used both with and without a fixed energy loss) - else if (scattering_process.find("forward") != std::string::npos) { - const std::string kw_energy = scattering_process + "_energy"; - utils::parser::queryWithParser( - pp_collision_name, kw_energy.c_str(), energy); - } ScatteringProcess process(scattering_process, cross_section_file, energy); WARPX_ALWAYS_ASSERT_WITH_MESSAGE(process.type() != ScatteringProcessType::INVALID, "Cannot add an unknown scattering process type"); + // Only one ionization process is currently supported as part of a given + // collision set. + if (process.type() == ScatteringProcessType::IONIZATION) { + WARPX_ALWAYS_ASSERT_WITH_MESSAGE( + !ionization_flag, + "DSMC only supports a single ionization process" + ); + ionization_flag = true; + + // And add a check that the ionization species has the same mass + // (and a positive charge), compared to the target species + } m_scattering_processes.push_back(std::move(process)); } - const int process_count = static_cast(m_scattering_processes.size()); - // Store ScatteringProcess::Executor(s). #ifdef AMREX_USE_GPU amrex::Gpu::HostVector h_scattering_processes_exe; for (auto const& p : m_scattering_processes) { h_scattering_processes_exe.push_back(p.executor()); } - m_scattering_processes_exe.resize(process_count); + m_scattering_processes_exe.resize(h_scattering_processes_exe.size()); amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, h_scattering_processes_exe.begin(), - h_scattering_processes_exe.end(), m_scattering_processes_exe.begin()); + h_scattering_processes_exe.end(), m_scattering_processes_exe.begin()); amrex::Gpu::streamSynchronize(); #else for (auto const& p : m_scattering_processes) { @@ -83,6 +89,6 @@ DSMCFunc::DSMCFunc ( // Link executor to appropriate ScatteringProcess executors m_exe.m_scattering_processes_data = m_scattering_processes_exe.data(); - m_exe.m_process_count = process_count; + m_exe.m_process_count = static_cast(m_scattering_processes_exe.size()); m_exe.m_isSameSpecies = m_isSameSpecies; } diff --git a/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H b/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H index e4b4d8a6a3a..59be9944077 100644 --- a/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H +++ b/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.H @@ -67,28 +67,72 @@ public: { using namespace amrex::literals; + // Return a vector of zeros, indicating that for all the "product" species + // there were no new particles added. if (n_total_pairs == 0) { return amrex::Vector(m_num_product_species, 0); } - amrex::Gpu::DeviceVector offsets(n_total_pairs); - index_type* AMREX_RESTRICT offsets_data = offsets.data(); - const index_type* AMREX_RESTRICT p_offsets = offsets.dataPtr(); + // The following is used to calculate the appropriate offsets for + // non-product producing processes (i.e., non ionization processes). + // Note that a standard cummulative sum is not appropriate since the + // mask is also used to specify the type of collision and can therefore + // have values >1 + amrex::Gpu::DeviceVector no_product_offsets(n_total_pairs); + index_type* AMREX_RESTRICT no_product_offsets_data = no_product_offsets.data(); + const index_type* AMREX_RESTRICT no_product_p_offsets = no_product_offsets.dataPtr(); + auto const no_product_total = amrex::Scan::PrefixSum(n_total_pairs, + [=] AMREX_GPU_DEVICE (index_type i) -> index_type { + return ((mask[i] > 0) & (mask[i] != int(ScatteringProcessType::IONIZATION))) ? 1 : 0; + }, + [=] AMREX_GPU_DEVICE (index_type i, index_type s) { no_product_offsets_data[i] = s; }, + amrex::Scan::Type::exclusive, amrex::Scan::retSum + ); + + amrex::Vector num_added_vec(m_num_product_species, 0); + for (int i = 0; i < m_num_product_species; i++) + { + // Record the number of non product producing events lead to new + // particles for species1 and 2. Only 1 particle is created for + // each species (the piece that breaks off to have equal weight) + // particles. + num_added_vec[i] = static_cast(no_product_total); + } - // The following is used to calculate the appropriate offsets. Note that - // a standard cummulative sum is not appropriate since the mask is also - // used to specify the type of collision and can therefore have values >1 - auto const total = amrex::Scan::PrefixSum(n_total_pairs, - [=] AMREX_GPU_DEVICE (index_type i) -> index_type { return mask[i] ? 1 : 0; }, - [=] AMREX_GPU_DEVICE (index_type i, index_type s) { offsets_data[i] = s; }, + // The following is used to calculate the appropriate offsets for + // product producing processes (i.e., ionization). + // Note that a standard cummulative sum is not appropriate since the + // mask is also used to specify the type of collision and can therefore + // have values >1 + amrex::Gpu::DeviceVector with_product_offsets(n_total_pairs); + index_type* AMREX_RESTRICT with_product_offsets_data = with_product_offsets.data(); + const index_type* AMREX_RESTRICT with_product_p_offsets = with_product_offsets.dataPtr(); + auto const with_product_total = amrex::Scan::PrefixSum(n_total_pairs, + [=] AMREX_GPU_DEVICE (index_type i) -> index_type { + return (mask[i] == int(ScatteringProcessType::IONIZATION)) ? 1 : 0; + }, + [=] AMREX_GPU_DEVICE (index_type i, index_type s) { with_product_offsets_data[i] = s; }, amrex::Scan::Type::exclusive, amrex::Scan::retSum ); - amrex::Vector num_added_vec(m_num_product_species); for (int i = 0; i < m_num_product_species; i++) { - // How many particles of product species i are created. - const index_type num_added = total * m_num_products_host[i]; - num_added_vec[i] = static_cast(num_added); - tile_products[i]->resize(products_np[i] + num_added); + // Add the number of product producing events to the species involved + // in those processes. For the two colliding particles, if either is set to + // have just 1 copy in the products it indicates that that species is not a + // product of the product producing reaction (instead it is just tracked as + // an outgoing particle in non-product producing reactions), and therefore + // it does not count in the products. + int num_products = m_num_products_host[i]; + if ((i < 2) & (num_products == 1)) { + num_products = 0; + } + const index_type num_added = with_product_total * num_products; + num_added_vec[i] += static_cast(num_added); + } + + // resize the particle tiles to accomodate the new particles + for (int i = 0; i < m_num_product_species; i++) + { + tile_products[i]->resize(products_np[i] + num_added_vec[i]); } const auto soa_1 = ptile1.getParticleTileData(); @@ -119,27 +163,65 @@ public: const index_type* AMREX_RESTRICT products_np_data = products_np.data(); #endif - const int* AMREX_RESTRICT p_num_products_device = m_num_products_device.data(); + const int num_product_species = m_num_product_species; + const auto ionization_energy = m_ionization_energy; + // Store the list indices for ionization products, ensuring that + // the first product species is always an electron (which is assumed + // during the scattering operation). + // Also, get the starting index for the first ionization product (if ionization + // is present). If species1 is also a product, this would start the + // indexing for product particles after the particles created from + // fragmentation. + int ioniz_product1_list_index = 0, ioniz_product2_list_index = 0; + index_type ioniz_product1_offset = 0, ioniz_product2_offset = 0; + if (num_product_species == 3) { + if (pc_products[0]->getCharge() < 0.0) { + ioniz_product1_list_index = 0; + ioniz_product2_list_index = 2; + ioniz_product1_offset = products_np[0] + no_product_total + with_product_total; + ioniz_product2_offset = products_np[2]; + } else { + ioniz_product1_list_index = 2; + ioniz_product2_list_index = 0; + ioniz_product1_offset = products_np[2]; + ioniz_product2_offset = products_np[0] + no_product_total + with_product_total; + } + } else if (num_product_species == 4) { + if (pc_products[2]->getCharge() < 0.0) { + ioniz_product1_list_index = 2; + ioniz_product2_list_index = 3; + ioniz_product1_offset = products_np[2]; + ioniz_product2_offset = products_np[3]; + } else { + ioniz_product1_list_index = 3; + ioniz_product2_list_index = 2; + ioniz_product1_offset = products_np[3]; + ioniz_product2_offset = products_np[2]; + } + } + // Grab the masses of ionization products + amrex::ParticleReal m_ioniz_product1 = 0; + amrex::ParticleReal m_ioniz_product2 = 0; + if (num_product_species > 2) { + m_ioniz_product1 = pc_products[ioniz_product1_list_index]->getMass(); + m_ioniz_product2 = pc_products[ioniz_product2_list_index]->getMass(); + } + + // First perform all non-product producing collisions amrex::ParallelForRNG(n_total_pairs, [=] AMREX_GPU_DEVICE (int i, amrex::RandomEngine const& engine) noexcept { - if (mask[i]) + if ((mask[i] > 0) & (mask[i] != int(ScatteringProcessType::IONIZATION))) { - // for now we ignore the possibility of having actual reaction - // products - only duplicating (splitting) of the colliding - // particles is supported. - - const auto product1_index = products_np_data[0] + - (p_offsets[i]*p_num_products_device[0] + 0); + const auto product1_index = products_np_data[0] + no_product_p_offsets[i]; // Make a copy of the particle from species 1 copy_species1[0](soa_products_data[0], soa_1, static_cast(p_pair_indices_1[i]), static_cast(product1_index), engine); // Set the weight of the new particles to p_pair_reaction_weight[i] soa_products_data[0].m_rdata[PIdx::w][product1_index] = p_pair_reaction_weight[i]; - const auto product2_index = products_np_data[1] + - (p_offsets[i]*p_num_products_device[1] + 0); + const auto product2_index = products_np_data[1] + no_product_p_offsets[i]; // Make a copy of the particle from species 2 copy_species2[1](soa_products_data[1], soa_2, static_cast(p_pair_indices_2[i]), static_cast(product2_index), engine); @@ -235,6 +317,184 @@ public: uy2 += uCOM_y; uz2 += uCOM_z; +#if (defined WARPX_DIM_RZ) + /* Undo the earlier velocity rotation. */ + amrex::ParticleReal const ux1buf_new = ux1; + ux1 = ux1buf_new*std::cos(-theta) - uy1*std::sin(-theta); + uy1 = ux1buf_new*std::sin(-theta) + uy1*std::cos(-theta); +#endif + } + + // Next perform all product producing collisions + else if (mask[i] == int(ScatteringProcessType::IONIZATION)) + { + const auto species1_index = products_np_data[0] + no_product_total + with_product_p_offsets[i]; + // Make a copy of the particle from species 1 + copy_species1[0](soa_products_data[0], soa_1, static_cast(p_pair_indices_1[i]), + static_cast(species1_index), engine); + // Set the weight of the new particles to p_pair_reaction_weight[i] + soa_products_data[0].m_rdata[PIdx::w][species1_index] = p_pair_reaction_weight[i]; + + // create a copy of the first product species at the location of species 2 + const auto product1_index = ioniz_product1_offset + with_product_p_offsets[i]; + copy_species1[ioniz_product1_list_index](soa_products_data[ioniz_product1_list_index], soa_2, static_cast(p_pair_indices_2[i]), + static_cast(product1_index), engine); + // Set the weight of the new particle to p_pair_reaction_weight[i] + soa_products_data[ioniz_product1_list_index].m_rdata[PIdx::w][product1_index] = p_pair_reaction_weight[i]; + + // create a copy of the other product species at the location of species 2 + const auto product2_index = ioniz_product2_offset + with_product_p_offsets[i]; + copy_species1[ioniz_product2_list_index](soa_products_data[ioniz_product2_list_index], soa_2, static_cast(p_pair_indices_2[i]), + static_cast(product2_index), engine); + // Set the weight of the new particle to p_pair_reaction_weight[i] + soa_products_data[ioniz_product2_list_index].m_rdata[PIdx::w][product2_index] = p_pair_reaction_weight[i]; + + // Grab the colliding particle velocities to calculate the COM + // Note that the two product particles currently have the same + // velocity as the "target" particle + auto& ux1 = soa_products_data[0].m_rdata[PIdx::ux][species1_index]; + auto& uy1 = soa_products_data[0].m_rdata[PIdx::uy][species1_index]; + auto& uz1 = soa_products_data[0].m_rdata[PIdx::uz][species1_index]; + auto& ux_p1 = soa_products_data[ioniz_product1_list_index].m_rdata[PIdx::ux][product1_index]; + auto& uy_p1 = soa_products_data[ioniz_product1_list_index].m_rdata[PIdx::uy][product1_index]; + auto& uz_p1 = soa_products_data[ioniz_product1_list_index].m_rdata[PIdx::uz][product1_index]; + auto& ux_p2 = soa_products_data[ioniz_product2_list_index].m_rdata[PIdx::ux][product2_index]; + auto& uy_p2 = soa_products_data[ioniz_product2_list_index].m_rdata[PIdx::uy][product2_index]; + auto& uz_p2 = soa_products_data[ioniz_product2_list_index].m_rdata[PIdx::uz][product2_index]; + +#if (defined WARPX_DIM_RZ) + /* In RZ geometry, macroparticles can collide with other macroparticles + * in the same *cylindrical* cell. For this reason, collisions between macroparticles + * are actually not local in space. In this case, the underlying assumption is that + * particles within the same cylindrical cell represent a cylindrically-symmetry + * momentum distribution function. Therefore, here, we temporarily rotate the + * momentum of one of the macroparticles in agreement with this cylindrical symmetry. + * (This is technically only valid if we use only the m=0 azimuthal mode in the simulation; + * there is a corresponding assert statement at initialization.) + */ + amrex::ParticleReal const theta = ( + soa_products_data[ioniz_product1_list_index].m_rdata[PIdx::theta][product1_index] + - soa_products_data[0].m_rdata[PIdx::theta][species1_index] + ); + amrex::ParticleReal const ux1buf = ux1; + ux1 = ux1buf*std::cos(theta) - uy1*std::sin(theta); + uy1 = ux1buf*std::sin(theta) + uy1*std::cos(theta); +#endif + + // for simplicity (for now) we assume non-relativistic particles + // and simply calculate the center-of-momentum velocity from the + // rest masses + auto const uCOM_x = (m1 * ux1 + m2 * ux_p2) / (m1 + m2); + auto const uCOM_y = (m1 * uy1 + m2 * uy_p2) / (m1 + m2); + auto const uCOM_z = (m1 * uz1 + m2 * uz_p2) / (m1 + m2); + + // transform to COM frame + ux1 -= uCOM_x; + uy1 -= uCOM_y; + uz1 -= uCOM_z; + ux_p1 -= uCOM_x; + uy_p1 -= uCOM_y; + uz_p1 -= uCOM_z; + ux_p2 -= uCOM_x; + uy_p2 -= uCOM_y; + uz_p2 -= uCOM_z; + + if (mask[i] == int(ScatteringProcessType::IONIZATION)) { + // calculate kinetic energy of the collision (in eV) + const amrex::ParticleReal E1 = ( + 0.5_prt * m1 * (ux1*ux1 + uy1*uy1 + uz1*uz1) / PhysConst::q_e + ); + const amrex::ParticleReal E2 = ( + 0.5_prt * m2 * (ux_p2*ux_p2 + uy_p2*uy_p2 + uz_p2*uz_p2) / PhysConst::q_e + ); + const amrex::ParticleReal E_coll = E1 + E2; + + // subtract the energy cost for ionization + const amrex::ParticleReal E_out = (E_coll - ionization_energy) * PhysConst::q_e; + + // Energy division after the ionization event is done as follows: + // The ion product energy is obtained from the target energy as + // E2_prime = min(E2 / E_coll * E_out, 0.5 * E_out) + // The energy division for the remaining two particles + // must be done such that velocity vectors exist with net + // zero linear momentum in the current frame. A sufficient + // condition for this is that E1_prime, E2_prime and E3_prime + // are valid edge lengths for a triangle - effectively that + // a ellipse can be drawn from the energy components. + // That ellipse has semi-major and semi-minor axis: + // a = (E_out - E2_prime) / 2.0 + // b = 0.5 * sqrt(E_out^2 - 2 * E_out * E2_prime) + // The energy components are found by randomly sampling an + // x value between -a and a, and finding the corresponding + // y value that falls on the ellipse: y^2 = b^2 - b^2/a^2 * x^2. + // The secondary electron's energy is then: + // E0_prime = sqrt(y^2 + (x - E2_prime/2)^2) + // and the final particle's is: + // E1_prime = E_out - E0_prime - E0_prime + + // The product ordering ensures that product 2 is the + // ion product. + const amrex::ParticleReal E2_prime = std::min(E2 / E_coll * E_out, 0.5_prt * E_out); + + // find ellipse semi-major and minor axis + const amrex::ParticleReal a = 0.5_prt * (E_out - E2_prime); + const amrex::ParticleReal b = 0.5_prt * std::sqrt(E_out*E_out - 2.0_prt * E_out * E2_prime); + + // sample random x value and calculate y + const amrex::ParticleReal x = (2._prt * amrex::Random(engine) - 1.0_prt) * a; + const amrex::ParticleReal y2 = b*b - b*b/(a*a) * x*x; + const amrex::ParticleReal E0_prime = std::sqrt(y2 + x*x - x*E2_prime + 0.25_prt*E2_prime*E2_prime); + const amrex::ParticleReal E1_prime = E_out - E0_prime - E2_prime; + + // Now that appropriate energies are set for each outgoing species + // the directions for the velocity vectors must be chosen such + // that the net linear momentum in the current frame is 0. + // This is achieved by arranging the momentum vectors in + // a triangle and finding the required angles between the vectors. + const amrex::ParticleReal p0 = std::sqrt(2.0_prt * m1 * E0_prime); + const amrex::ParticleReal p1 = std::sqrt(2.0_prt * m_ioniz_product1 * E1_prime); + const amrex::ParticleReal p2 = std::sqrt(2.0_prt * m_ioniz_product2 * E2_prime); + + const amrex::ParticleReal cos_alpha = (p0*p0 + p1*p1 - p2*p2) / (2.0_prt * p0 * p1); + const amrex::ParticleReal sin_alpha = std::sqrt(1.0_prt - cos_alpha*cos_alpha); + const amrex::ParticleReal cos_gamma = (p0*p0 + p2*p2 - p1*p1) / (2.0_prt * p0 * p2); + const amrex::ParticleReal sin_gamma = std::sqrt(1.0_prt - cos_gamma*cos_gamma); + + // choose random theta and phi values (orientation of the triangle) + const amrex::ParticleReal Theta = amrex::Random(engine) * 2.0_prt * MathConst::pi; + const amrex::ParticleReal phi = amrex::Random(engine) * MathConst::pi; + + const amrex::ParticleReal cos_theta = std::cos(Theta); + const amrex::ParticleReal sin_theta = std::sin(Theta); + const amrex::ParticleReal cos_phi = std::cos(phi); + const amrex::ParticleReal sin_phi = std::sin(phi); + + ux1 = p0 / m1 * cos_theta * cos_phi; + uy1 = p0 / m1 * cos_theta * sin_phi; + uz1 = -p0 / m1 * sin_theta; + + ux_p1 = p1 / m_ioniz_product1 * (-cos_alpha * cos_theta * cos_phi - sin_alpha * sin_phi); + uy_p1 = p1 / m_ioniz_product1 * (-cos_alpha * cos_theta * sin_phi + sin_alpha * cos_phi); + uz_p1 = p1 / m_ioniz_product1 * (cos_alpha * sin_theta); + + ux_p2 = p2 / m_ioniz_product2 * (-cos_gamma * cos_theta * cos_phi + sin_gamma * sin_phi); + uy_p2 = p2 / m_ioniz_product2 * (-cos_gamma * cos_theta * sin_phi - sin_gamma * cos_phi); + uz_p2 = p2 / m_ioniz_product2 * (cos_gamma * sin_theta); + } + else { + amrex::Abort("Unknown scattering process."); + } + // transform back to labframe + ux1 += uCOM_x; + uy1 += uCOM_y; + uz1 += uCOM_z; + ux_p1 += uCOM_x; + uy_p1 += uCOM_y; + uz_p1 += uCOM_z; + ux_p2 += uCOM_x; + uy_p2 += uCOM_y; + uz_p2 += uCOM_z; + #if (defined WARPX_DIM_RZ) /* Undo the earlier velocity rotation. */ amrex::ParticleReal const ux1buf_new = ux1; @@ -272,10 +532,10 @@ public: private: // How many different type of species the collision produces int m_num_product_species; + // If ionization collisions are included, what is the energy cost + amrex::ParticleReal m_ionization_energy = 0.0; // Vectors of size m_num_product_species storing how many particles of a given species are - // produced by a collision event. These vectors are duplicated (one version for host and one - // for device) which is necessary with GPUs but redundant on CPU. - amrex::Gpu::DeviceVector m_num_products_device; + // produced by a collision event. amrex::Gpu::HostVector m_num_products_host; CollisionType m_collision_type; }; diff --git a/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.cpp b/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.cpp index de8de9b505d..8c93697a5a2 100644 --- a/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.cpp +++ b/Source/Particles/Collision/BinaryCollision/DSMC/SplitAndScatterFunc.cpp @@ -13,31 +13,62 @@ SplitAndScatterFunc::SplitAndScatterFunc (const std::string& collision_name, MultiParticleContainer const * const mypc): m_collision_type{BinaryCollisionUtils::get_collision_type(collision_name, mypc)} { - const amrex::ParmParse pp_collision_name(collision_name); - if (m_collision_type == CollisionType::DSMC) { - // here we can add logic to deal with cases where products are created, - // for example with impact ionization - m_num_product_species = 2; - m_num_products_host.push_back(1); - m_num_products_host.push_back(1); -#ifndef AMREX_USE_GPU - // On CPU, the device vector can be filled immediately - m_num_products_device.push_back(1); - m_num_products_device.push_back(1); -#endif + const amrex::ParmParse pp_collision_name(collision_name); + + // Check if ionization is one of the scattering processes by querying + // for any specified product species (ionization is the only current + // DSMC process with products) + amrex::Vector product_species; + pp_collision_name.queryarr("product_species", product_species); + + const bool ionization_flag = (!product_species.empty()); + + // if ionization is one of the processes, check if one of the colliding + // species is also used as a product species + if (ionization_flag) { + // grab the colliding species + amrex::Vector colliding_species; + pp_collision_name.getarr("species", colliding_species); + // grab the target species (i.e., the species that looses an + // electron during the collision) + std::string target_species; + pp_collision_name.query("ionization_target_species", target_species); + // find the index of the non-target species (the one that could + // also be used as a product species) + int non_target_idx = 0; + if (colliding_species[0] == target_species) { + non_target_idx = 1; + } + + // check if the non-target species is in ``product_species`` + auto it = std::find(product_species.begin(), product_species.end(), colliding_species[non_target_idx]); + + if (it != product_species.end()) { + m_num_product_species = 3; + m_num_products_host.push_back(2); // the non-target species + m_num_products_host.push_back(1); // the target species + m_num_products_host.push_back(1); // corresponds to whichever ionization product species1 is not (ion or electron) + } else { + m_num_product_species = 4; + m_num_products_host.push_back(1); // the non-target species + m_num_products_host.push_back(1); // the target species + m_num_products_host.push_back(1); // first product species + m_num_products_host.push_back(1); // second product species + } + + // get the ionization energy + pp_collision_name.get("ionization_energy", m_ionization_energy); + + } else { + m_num_product_species = 2; + m_num_products_host.push_back(1); + m_num_products_host.push_back(1); + } } else { WARPX_ABORT_WITH_MESSAGE("Unknown collision type in SplitAndScatterFunc"); } - -#ifdef AMREX_USE_GPU - m_num_products_device.resize(m_num_product_species); - amrex::Gpu::copyAsync(amrex::Gpu::hostToDevice, m_num_products_host.begin(), - m_num_products_host.end(), - m_num_products_device.begin()); - amrex::Gpu::streamSynchronize(); -#endif } From 304a38ee134cfbb2febae3378b7f22cd92e1d308 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Tue, 4 Mar 2025 05:52:00 +0100 Subject: [PATCH 270/278] Readme.md : correct typo in Governance section ("High Performance Computing Foundation" --> "High Performance Software Foundation" ) (#5723) This PR corrects a typo in the Governance section of our readme file! --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 7760a004081..21358d87f1b 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ We invite you to contribute to WarpX in any form following our [Code of Conduct] ## Governance -WarpX is hosted by the High Performance Computing Foundation (HPSF). +WarpX is hosted by the High Performance Software Foundation (HPSF). If your organization wants to help steer the evolution of the HPC software ecosystem, visit [hpsf.io](https://hpsf.io) and consider joining! The WarpX open governance model is described in [GOVERNANCE.rst](GOVERNANCE.rst). From 529723f812600d97cbc41ce676c3a7337911311c Mon Sep 17 00:00:00 2001 From: Arianna Formenti Date: Tue, 4 Mar 2025 07:14:41 -0800 Subject: [PATCH 271/278] Synchronize after copying in 2D reduced diagnostics (#5726) Add `Gpu::streamSynchronize();` after copying `TableData` from either host-to-device and device-to-host in the `ParticleHistogram2D` and `DifferentialLuminosity2D` reduced diagnostics. We have observed unstable behaviors with the `DifferentialLuminosity2D` diagnostic and @RemiLehe suggested this could be a reason. Marked as bug, unless otherwise recommended. --------- Co-authored-by: Remi Lehe --- Source/Diagnostics/ReducedDiags/DifferentialLuminosity2D.cpp | 1 + Source/Diagnostics/ReducedDiags/ParticleHistogram2D.cpp | 3 +++ 2 files changed, 4 insertions(+) diff --git a/Source/Diagnostics/ReducedDiags/DifferentialLuminosity2D.cpp b/Source/Diagnostics/ReducedDiags/DifferentialLuminosity2D.cpp index b3968b9fb02..09029185593 100644 --- a/Source/Diagnostics/ReducedDiags/DifferentialLuminosity2D.cpp +++ b/Source/Diagnostics/ReducedDiags/DifferentialLuminosity2D.cpp @@ -315,6 +315,7 @@ void DifferentialLuminosity2D::ComputeDiags (int step) // Copy data from GPU memory m_h_data_2D.copy(m_d_data_2D); + Gpu::streamSynchronize(); // reduced sum over mpi ranks const int size = static_cast (m_d_data_2D.size()); diff --git a/Source/Diagnostics/ReducedDiags/ParticleHistogram2D.cpp b/Source/Diagnostics/ReducedDiags/ParticleHistogram2D.cpp index 8dd19186b25..a39c5fac855 100644 --- a/Source/Diagnostics/ReducedDiags/ParticleHistogram2D.cpp +++ b/Source/Diagnostics/ReducedDiags/ParticleHistogram2D.cpp @@ -149,6 +149,8 @@ void ParticleHistogram2D::ComputeDiags (int step) } d_data_2D.copy(m_h_data_2D); + // Gpu::streamSynchronize() is not needed, because there is a sync in WarpXParIter constructor later + auto d_table = d_data_2D.table(); // get a reference to WarpX instance @@ -244,6 +246,7 @@ void ParticleHistogram2D::ComputeDiags (int step) // Copy data from GPU memory m_h_data_2D.copy(d_data_2D); + Gpu::streamSynchronize(); // reduced sum over mpi ranks const int size = static_cast (d_data_2D.size()); From da064ad2762cf5f4d292409a1de92223e6e951b4 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Tue, 4 Mar 2025 14:15:58 -0800 Subject: [PATCH 272/278] ABLASTR: Silence/Disable `write_used_inputs_file` (#5733) Add support to silence the status print or to skip the creation of `write_used_inputs_file` altogether. Useful in unsupervised runs, like many optimizations, where often we want to skip file creation altogether, e.g., if run with Python. --- Source/ablastr/utils/UsedInputsFile.H | 3 ++- Source/ablastr/utils/UsedInputsFile.cpp | 12 +++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/Source/ablastr/utils/UsedInputsFile.H b/Source/ablastr/utils/UsedInputsFile.H index 543ae98a256..a57527a434d 100644 --- a/Source/ablastr/utils/UsedInputsFile.H +++ b/Source/ablastr/utils/UsedInputsFile.H @@ -19,9 +19,10 @@ namespace ablastr::utils * Only the AMReX IOProcessor writes. * * @param filename the name of the text file to write + * @param verbose print information about the file location to stdout */ void - write_used_inputs_file (std::string const & filename); + write_used_inputs_file (std::string const & filename, bool verbose=true); } #endif // ABLASTR_USED_INPUTS_FILE_H diff --git a/Source/ablastr/utils/UsedInputsFile.cpp b/Source/ablastr/utils/UsedInputsFile.cpp index a7777556242..4f0c8498718 100644 --- a/Source/ablastr/utils/UsedInputsFile.cpp +++ b/Source/ablastr/utils/UsedInputsFile.cpp @@ -11,14 +11,20 @@ #include #include -#include +#include #include void -ablastr::utils::write_used_inputs_file (std::string const & filename) +ablastr::utils::write_used_inputs_file (std::string const & filename, bool verbose) { - amrex::Print() << "For full input parameters, see the file: " << filename << "\n\n"; + if (filename.empty() || filename == "/dev/null") { + return; + } + + if (verbose) { + amrex::Print() << "For full input parameters, see the file: " << filename << "\n\n"; + } if (amrex::ParallelDescriptor::IOProcessor()) { std::ofstream jobInfoFile; From 47235b9eefd121f2e6238f457ccdd84da42ed35a Mon Sep 17 00:00:00 2001 From: Gabriel Robert-Dautun <48356331+grobertdautun@users.noreply.github.com> Date: Wed, 5 Mar 2025 01:48:27 +0100 Subject: [PATCH 273/278] acknowledgments.rst : corrected typo ("High Performance Computing Foundation" --> "High Performance Software Foundation" ) (#5724) This PR corrects the typo pointed out in #5723 in the documentation --- Docs/source/acknowledgements.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Docs/source/acknowledgements.rst b/Docs/source/acknowledgements.rst index 6d2529705d0..5a87c43fa81 100644 --- a/Docs/source/acknowledgements.rst +++ b/Docs/source/acknowledgements.rst @@ -3,7 +3,7 @@ Funding and Acknowledgements ============================ -WarpX is hosted by the High Performance Computing Foundation (HPSF). +WarpX is hosted by the High Performance Software Foundation (HPSF). If your organization wants to help steer the evolution of the HPC software ecosystem, visit `hpsf.io `__ and consider joining! WarpX is supported by the CAMPA collaboration, a project of the U.S. Department of Energy, Office of Science, Office of Advanced Scientific Computing Research and Office of High Energy Physics, Scientific Discovery through Advanced Computing (SciDAC) program. From cd2644db470c4647041d10fc435a5fd90df4a934 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 5 Mar 2025 11:38:18 -0800 Subject: [PATCH 274/278] openPMD: no BP5 Group Based (#5735) Disallow BP5 with group based encoding, because it creates files that cannot be read back efficiently. What works: BP4 f, BP4 g, BP5 f, BP5 v (still experimental, not yet fully supported in readers), H5 f, H5 g. References: - https://github.com/BLAST-ImpactX/impactx/pull/870 - https://github.com/openPMD/openPMD-api/discussions/1724 - https://github.com/openPMD/openPMD-api/issues/1457#issuecomment-1580328866 cc @franzpoeschel --- Docs/source/usage/parameters.rst | 8 ++++---- .../usage/workflows/ml_materials/run_warpx_training.py | 4 ++-- Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp | 7 +++++++ Source/Diagnostics/OpenPMDHelpFunction.cpp | 4 ++-- Source/Diagnostics/WarpXOpenPMD.H | 6 +++--- 5 files changed, 18 insertions(+), 11 deletions(-) diff --git a/Docs/source/usage/parameters.rst b/Docs/source/usage/parameters.rst index b2547b5126c..e425104ddcb 100644 --- a/Docs/source/usage/parameters.rst +++ b/Docs/source/usage/parameters.rst @@ -2813,14 +2813,14 @@ In-situ capabilities can be used by turning on Sensei or Ascent (provided they a * ``.openpmd_backend`` (``bp5``, ``bp4``, ``h5`` or ``json``) optional, only used if ``.format = openpmd`` `I/O backend `_ for `openPMD `_ data dumps. - ``bp`` is the `ADIOS I/O library `_, ``h5`` is the `HDF5 format `_, and ``json`` is a `simple text format `_. - ``json`` only works with serial/single-rank jobs. + ``bp5``/``bp4`` is the `ADIOS I/O library `_, ``h5`` is the `HDF5 format `_, and ``json`` is a `simple text format `_. + ``json`` is for debugging and only works with serial/single-rank jobs. When WarpX is compiled with openPMD support, the first available backend in the order given above is taken. * ``.openpmd_encoding`` (optional, ``v`` (variable based), ``f`` (file based) or ``g`` (group based) ) only read if ``.format = openpmd``. openPMD `file output encoding `__. File based: one file per timestep (slower), group/variable based: one file for all steps (faster)). - ``variable based`` is an `experimental feature with ADIOS2 `__ and not supported for back-transformed diagnostics. + ``variable based`` is an `experimental feature with ADIOS2 BP5 `__ that will replace ``g``. Default: ``f`` (full diagnostics) * ``.adios2_operator.type`` (``zfp``, ``blosc``) optional, @@ -2854,7 +2854,7 @@ In-situ capabilities can be used by turning on Sensei or Ascent (provided they a .openpmd_backend = bp5 .adios2_engine.parameters.FlattenSteps = on -* ``.adios2_engine.type`` (``bp4``, ``sst``, ``ssc``, ``dataman``) optional, +* ``.adios2_engine.type`` (``bp5``, ``bp4``, ``sst``, ``ssc``, ``dataman``) optional, `ADIOS2 Engine type `__ for `openPMD `_ data dumps. See full list of engines at `ADIOS2 readthedocs `__ diff --git a/Docs/source/usage/workflows/ml_materials/run_warpx_training.py b/Docs/source/usage/workflows/ml_materials/run_warpx_training.py index 9a246de1cc2..3f0bb0ba0da 100644 --- a/Docs/source/usage/workflows/ml_materials/run_warpx_training.py +++ b/Docs/source/usage/workflows/ml_materials/run_warpx_training.py @@ -274,7 +274,7 @@ def get_laser(antenna_z, profile_t_peak, fill_in=True): data_list=diag_particle_list, write_dir="lab_particle_diags", warpx_format="openpmd", - warpx_openpmd_backend="bp", + warpx_openpmd_backend="bp5", ) btd_field_diag = picmi.LabFrameFieldDiagnostic( @@ -287,7 +287,7 @@ def get_laser(antenna_z, profile_t_peak, fill_in=True): warpx_upper_bound=[128.0e-6, 0.0e-6, 0.0], write_dir="lab_field_diags", warpx_format="openpmd", - warpx_openpmd_backend="bp", + warpx_openpmd_backend="bp5", ) field_diag = picmi.FieldDiagnostic( diff --git a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp index aeb26656b46..ae54fd13e5a 100644 --- a/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp +++ b/Source/Diagnostics/FlushFormats/FlushFormatOpenPMD.cpp @@ -46,6 +46,13 @@ FlushFormatOpenPMD::FlushFormatOpenPMD (const std::string& diag_name) encoding = openPMD::IterationEncoding::fileBased; } + // BP5 does not support groupBased (metadata explosion) + if ((openpmd_backend == "bp5" || openpmd_backend == "bp") && + (encoding == openPMD::IterationEncoding::groupBased)) + { + throw std::runtime_error("BeamMonitor: groupBased encoding not supported for BP5."); + } + std::string diag_type_str; pp_diag_name.get("diag_type", diag_type_str); if (diag_type_str == "BackTransformed") diff --git a/Source/Diagnostics/OpenPMDHelpFunction.cpp b/Source/Diagnostics/OpenPMDHelpFunction.cpp index cc798adc29e..38284b69714 100644 --- a/Source/Diagnostics/OpenPMDHelpFunction.cpp +++ b/Source/Diagnostics/OpenPMDHelpFunction.cpp @@ -17,9 +17,9 @@ WarpXOpenPMDFileType () std::string openPMDFileType; #ifdef WARPX_USE_OPENPMD #if openPMD_HAVE_ADIOS2==1 - openPMDFileType = "bp"; + openPMDFileType = "bp5"; #elif openPMD_HAVE_ADIOS1==1 - openPMDFileType = "bp"; + openPMDFileType = "bp"; // bp3 #elif openPMD_HAVE_HDF5==1 openPMDFileType = "h5"; #else diff --git a/Source/Diagnostics/WarpXOpenPMD.H b/Source/Diagnostics/WarpXOpenPMD.H index a25b1057da9..c6fadb34bf3 100644 --- a/Source/Diagnostics/WarpXOpenPMD.H +++ b/Source/Diagnostics/WarpXOpenPMD.H @@ -82,7 +82,7 @@ public: /** Initialize openPMD I/O routines * * @param ie iteration encoding from openPMD: "group, file, variable" - * @param filetype file backend, e.g. "bp" or "h5" + * @param filetype file backend, e.g. "bp5", "bp4", or "h5" * @param operator_type openPMD-api backend operator (compressor) for ADIOS2 * @param operator_parameters openPMD-api backend operator parameters for ADIOS2 * @param engine_type ADIOS engine for output @@ -149,7 +149,7 @@ public: bool isBTD = false, const amrex::Geometry& full_BTD_snapshot=amrex::Geometry() ) const; - /** Return OpenPMD File type ("bp" or "h5" or "json")*/ + /** Return OpenPMD File type ("bp5", "bp4", "h5" or "json")*/ std::string OpenPMDFileType () { return m_OpenPMDFileType; } private: @@ -350,7 +350,7 @@ private: int m_MPISize = 1; openPMD::IterationEncoding m_Encoding = openPMD::IterationEncoding::fileBased; - std::string m_OpenPMDFileType = "bp"; //! MPI-parallel openPMD backend: bp or h5 + std::string m_OpenPMDFileType = "bp5"; //! MPI-parallel openPMD backend: bp5, bp4 or h5 std::string m_OpenPMDoptions = "{}"; //! JSON option string for openPMD::Series constructor int m_CurrentStep = -1; From cade3190ebfd59b66bd8a4954142e9860d824023 Mon Sep 17 00:00:00 2001 From: Axel Huebl Date: Wed, 5 Mar 2025 12:24:39 -0800 Subject: [PATCH 275/278] Python: `setuptools[core]` (#5736) The pip instructions for setuptools require now to specify the `[core]` argument, otherwise dependent packages are used from the system and are usually outdated, which causes errors. X-ref: - https://setuptools.pypa.io/en/latest/userguide/quickstart.html - https://github.com/pypa/setuptools/issues/4483#issuecomment-2236528158 --- .azure-pipelines.yml | 2 +- .github/workflows/cuda.yml | 4 ++-- .github/workflows/intel.yml | 6 +++--- .github/workflows/macos.yml | 2 +- .github/workflows/ubuntu.yml | 2 +- .github/workflows/windows.yml | 4 ++-- Docs/source/install/cmake.rst | 2 +- Docs/source/install/hpc/lawrencium.rst | 2 +- Docs/source/install/hpc/lxplus.rst | 2 +- Docs/source/install/users.rst | 2 +- Tools/machines/adastra-cines/install_dependencies.sh | 2 +- Tools/machines/crusher-olcf/install_dependencies.sh | 2 +- Tools/machines/dane-llnl/install_dependencies.sh | 2 +- Tools/machines/frontier-olcf/install_dependencies.sh | 2 +- .../machines/greatlakes-umich/install_v100_dependencies.sh | 2 +- Tools/machines/hpc3-uci/install_gpu_dependencies.sh | 2 +- .../machines/lassen-llnl/install_v100_dependencies_toss3.sh | 2 +- Tools/machines/leonardo-cineca/install_gpu_dependencies.sh | 2 +- Tools/machines/lonestar6-tacc/install_a100_dependencies.sh | 2 +- Tools/machines/lumi-csc/install_dependencies.sh | 2 +- Tools/machines/perlmutter-nersc/Containerfile | 2 +- Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh | 2 +- Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh | 2 +- Tools/machines/pitzer-osc/install_cpu_dependencies.sh | 2 +- Tools/machines/pitzer-osc/install_v100_dependencies.sh | 2 +- Tools/machines/polaris-alcf/install_gpu_dependencies.sh | 2 +- Tools/machines/summit-olcf/install_gpu_dependencies.sh | 2 +- Tools/machines/tioga-llnl/install_mi300a_dependencies.sh | 2 +- 28 files changed, 32 insertions(+), 32 deletions(-) diff --git a/.azure-pipelines.yml b/.azure-pipelines.yml index 427cf21600b..372fdc6dc69 100644 --- a/.azure-pipelines.yml +++ b/.azure-pipelines.yml @@ -80,7 +80,7 @@ jobs: python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging - python3 -m pip install --upgrade setuptools + python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade wheel python3 -m pip install --upgrade virtualenv python3 -m pip install --upgrade pipx diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 30e4adf1b51..6208f272f99 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -88,7 +88,7 @@ jobs: cmake --build build_sp -j 4 python3 -m pip install --upgrade pip - python3 -m pip install --upgrade build packaging setuptools wheel + python3 -m pip install --upgrade build packaging setuptools[core] wheel export WARPX_MPI=ON export PYWARPX_LIB_DIR=$PWD/build_sp/lib/site-packages/pywarpx/ python3 -m pip wheel . @@ -191,7 +191,7 @@ jobs: #export CFLAGS="-noswitcherror" #python3 -m pip install --upgrade pip - #python3 -m pip install --upgrade build packaging setuptools wheel + #python3 -m pip install --upgrade build packaging setuptools[core] wheel #export WARPX_MPI=ON #export PYWARPX_LIB_DIR=$PWD/build/lib/site-packages/pywarpx/ #python3 -m pip wheel . diff --git a/.github/workflows/intel.yml b/.github/workflows/intel.yml index 25819e188e3..cf160ab5760 100644 --- a/.github/workflows/intel.yml +++ b/.github/workflows/intel.yml @@ -50,7 +50,7 @@ jobs: export CC=$(which icc) python3 -m pip install --upgrade pip - python3 -m pip install --upgrade build packaging setuptools wheel + python3 -m pip install --upgrade build packaging setuptools[core] wheel cmake -S . -B build_dp \ -DCMAKE_VERBOSE_MAKEFILE=ON \ @@ -118,7 +118,7 @@ jobs: export CC=$(which icx) python3 -m pip install --upgrade pip - python3 -m pip install --upgrade build packaging setuptools wheel + python3 -m pip install --upgrade build packaging setuptools[core] wheel cmake -S . -B build_sp \ -DCMAKE_CXX_FLAGS_RELEASE="-O1 -DNDEBUG" \ @@ -201,6 +201,6 @@ jobs: # Skip this as it will copy the binary artifacts and we are tight on disk space # python3 -m pip install --upgrade pip - # python3 -m pip install --upgrade build packaging setuptools wheel + # python3 -m pip install --upgrade build packaging setuptools[core] wheel # PYWARPX_LIB_DIR=$PWD/build_sp/lib/site-packages/pywarpx/ python3 -m pip wheel . # python3 -m pip install *.whl diff --git a/.github/workflows/macos.yml b/.github/workflows/macos.yml index 87482cc6166..e5fea8cd5cf 100644 --- a/.github/workflows/macos.yml +++ b/.github/workflows/macos.yml @@ -47,7 +47,7 @@ jobs: - name: install pip dependencies run: | python3 -m pip install --upgrade pip - python3 -m pip install --upgrade build packaging setuptools wheel + python3 -m pip install --upgrade build packaging setuptools[core] wheel python3 -m pip install --upgrade mpi4py python3 -m pip install --upgrade -r Regression/requirements.txt - name: CCache Cache diff --git a/.github/workflows/ubuntu.yml b/.github/workflows/ubuntu.yml index d657daf5793..a9a824eca56 100644 --- a/.github/workflows/ubuntu.yml +++ b/.github/workflows/ubuntu.yml @@ -211,7 +211,7 @@ jobs: ccache -z python3 -m pip install --upgrade pip - python3 -m pip install --upgrade build packaging setuptools wheel + python3 -m pip install --upgrade build packaging setuptools[core] wheel export CXXFLAGS="-Werror -Wno-error=pass-failed" diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index 7f964239a02..877290f5844 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -52,7 +52,7 @@ jobs: python3 -m pip install --upgrade pip if(!$?) { Exit $LASTEXITCODE } - python3 -m pip install --upgrade build packaging setuptools wheel + python3 -m pip install --upgrade build packaging setuptools[core] wheel if(!$?) { Exit $LASTEXITCODE } cmake --build build --config Debug --target install if(!$?) { Exit $LASTEXITCODE } @@ -113,7 +113,7 @@ jobs: python3 -m pip install --upgrade pip if errorlevel 1 exit 1 - python3 -m pip install --upgrade build packaging setuptools wheel + python3 -m pip install --upgrade build packaging setuptools[core] wheel if errorlevel 1 exit 1 python3 -m pip install --upgrade -r requirements.txt if errorlevel 1 exit 1 diff --git a/Docs/source/install/cmake.rst b/Docs/source/install/cmake.rst index fbdc6809853..7b1b732a4d1 100644 --- a/Docs/source/install/cmake.rst +++ b/Docs/source/install/cmake.rst @@ -222,7 +222,7 @@ PICMI Python Bindings .. code-block:: bash python3 -m pip install -U pip - python3 -m pip install -U build packaging setuptools wheel + python3 -m pip install -U build packaging setuptools[core] wheel python3 -m pip install -U cmake python3 -m pip install -r requirements.txt diff --git a/Docs/source/install/hpc/lawrencium.rst b/Docs/source/install/hpc/lawrencium.rst index f163531a29a..f842f2a3ae7 100644 --- a/Docs/source/install/hpc/lawrencium.rst +++ b/Docs/source/install/hpc/lawrencium.rst @@ -83,7 +83,7 @@ Optionally, download and install Python packages for :ref:`PICMI ` python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel - python3 -m pip install --upgrade setuptools + python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Docs/source/install/hpc/lxplus.rst b/Docs/source/install/hpc/lxplus.rst index fc1e5d4286d..dd8ce634484 100644 --- a/Docs/source/install/hpc/lxplus.rst +++ b/Docs/source/install/hpc/lxplus.rst @@ -148,7 +148,7 @@ Now, ensure Python tooling is up-to-date: .. code-block:: bash python3 -m pip install -U pip - python3 -m pip install -U build packaging setuptools wheel + python3 -m pip install -U build packaging setuptools[core] wheel python3 -m pip install -U cmake Then we compile WarpX as in the previous section (with or without CUDA) adding ``-DWarpX_PYTHON=ON`` and then we install it into our Python: diff --git a/Docs/source/install/users.rst b/Docs/source/install/users.rst index 650cbacd4d0..d2cb3c7e60d 100644 --- a/Docs/source/install/users.rst +++ b/Docs/source/install/users.rst @@ -109,7 +109,7 @@ Given that you have the :ref:`WarpX dependencies ` install .. code-block:: bash python3 -m pip install -U pip - python3 -m pip install -U build packaging setuptools wheel + python3 -m pip install -U build packaging setuptools[core] wheel python3 -m pip install -U cmake python3 -m pip wheel -v git+https://github.com/ECP-WarpX/WarpX.git diff --git a/Tools/machines/adastra-cines/install_dependencies.sh b/Tools/machines/adastra-cines/install_dependencies.sh index 896b775db3b..9864869f5a8 100755 --- a/Tools/machines/adastra-cines/install_dependencies.sh +++ b/Tools/machines/adastra-cines/install_dependencies.sh @@ -104,7 +104,7 @@ python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/crusher-olcf/install_dependencies.sh b/Tools/machines/crusher-olcf/install_dependencies.sh index 6e9f97eddae..39e1b9af088 100755 --- a/Tools/machines/crusher-olcf/install_dependencies.sh +++ b/Tools/machines/crusher-olcf/install_dependencies.sh @@ -87,7 +87,7 @@ python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/dane-llnl/install_dependencies.sh b/Tools/machines/dane-llnl/install_dependencies.sh index 25e8e965777..2a21fc4758d 100755 --- a/Tools/machines/dane-llnl/install_dependencies.sh +++ b/Tools/machines/dane-llnl/install_dependencies.sh @@ -100,7 +100,7 @@ python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/frontier-olcf/install_dependencies.sh b/Tools/machines/frontier-olcf/install_dependencies.sh index 17b4955e7c4..18d01d03e30 100755 --- a/Tools/machines/frontier-olcf/install_dependencies.sh +++ b/Tools/machines/frontier-olcf/install_dependencies.sh @@ -86,7 +86,7 @@ python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade "cython>=3.0" # cupy for ROCm # https://docs.cupy.dev/en/stable/install.html#building-cupy-for-rocm-from-source diff --git a/Tools/machines/greatlakes-umich/install_v100_dependencies.sh b/Tools/machines/greatlakes-umich/install_v100_dependencies.sh index c6925442d9f..126b0b9f854 100755 --- a/Tools/machines/greatlakes-umich/install_v100_dependencies.sh +++ b/Tools/machines/greatlakes-umich/install_v100_dependencies.sh @@ -118,7 +118,7 @@ python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/hpc3-uci/install_gpu_dependencies.sh b/Tools/machines/hpc3-uci/install_gpu_dependencies.sh index e4b9f4caa5a..93c5ace1df7 100755 --- a/Tools/machines/hpc3-uci/install_gpu_dependencies.sh +++ b/Tools/machines/hpc3-uci/install_gpu_dependencies.sh @@ -117,7 +117,7 @@ python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade pipx python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy diff --git a/Tools/machines/lassen-llnl/install_v100_dependencies_toss3.sh b/Tools/machines/lassen-llnl/install_v100_dependencies_toss3.sh index b4e80f2ddea..32106638721 100644 --- a/Tools/machines/lassen-llnl/install_v100_dependencies_toss3.sh +++ b/Tools/machines/lassen-llnl/install_v100_dependencies_toss3.sh @@ -118,7 +118,7 @@ python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/leonardo-cineca/install_gpu_dependencies.sh b/Tools/machines/leonardo-cineca/install_gpu_dependencies.sh index 2df123ba0ee..ea867b5e0eb 100644 --- a/Tools/machines/leonardo-cineca/install_gpu_dependencies.sh +++ b/Tools/machines/leonardo-cineca/install_gpu_dependencies.sh @@ -85,7 +85,7 @@ python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh b/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh index 1ade3fe77d4..c3ddb82ab73 100755 --- a/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh +++ b/Tools/machines/lonestar6-tacc/install_a100_dependencies.sh @@ -108,7 +108,7 @@ python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/lumi-csc/install_dependencies.sh b/Tools/machines/lumi-csc/install_dependencies.sh index e149b8abd43..4ae2f597441 100755 --- a/Tools/machines/lumi-csc/install_dependencies.sh +++ b/Tools/machines/lumi-csc/install_dependencies.sh @@ -154,7 +154,7 @@ python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/perlmutter-nersc/Containerfile b/Tools/machines/perlmutter-nersc/Containerfile index 5a8553c2619..7c5eec7f4e6 100644 --- a/Tools/machines/perlmutter-nersc/Containerfile +++ b/Tools/machines/perlmutter-nersc/Containerfile @@ -155,7 +155,7 @@ RUN python3 -m venv /opt/venv && \ cython \ packaging \ build \ - setuptools + setuptools[core] # Set up the environment for the virtual environment ENV PATH="/opt/venv/bin:${PATH}" diff --git a/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh b/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh index 5be1ef7b9aa..ec4c0d178d2 100755 --- a/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh +++ b/Tools/machines/perlmutter-nersc/install_cpu_dependencies.sh @@ -139,7 +139,7 @@ python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh b/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh index a029b428e8a..6c444418542 100755 --- a/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh +++ b/Tools/machines/perlmutter-nersc/install_gpu_dependencies.sh @@ -139,7 +139,7 @@ python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/pitzer-osc/install_cpu_dependencies.sh b/Tools/machines/pitzer-osc/install_cpu_dependencies.sh index cc74adb1a52..46df4c454af 100644 --- a/Tools/machines/pitzer-osc/install_cpu_dependencies.sh +++ b/Tools/machines/pitzer-osc/install_cpu_dependencies.sh @@ -139,7 +139,7 @@ python3 -m pip cache purge python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/pitzer-osc/install_v100_dependencies.sh b/Tools/machines/pitzer-osc/install_v100_dependencies.sh index eae1a01e39e..73c0c471768 100644 --- a/Tools/machines/pitzer-osc/install_v100_dependencies.sh +++ b/Tools/machines/pitzer-osc/install_v100_dependencies.sh @@ -139,7 +139,7 @@ python3 -m pip cache purge python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/polaris-alcf/install_gpu_dependencies.sh b/Tools/machines/polaris-alcf/install_gpu_dependencies.sh index 48744741a21..c46ada72bcc 100755 --- a/Tools/machines/polaris-alcf/install_gpu_dependencies.sh +++ b/Tools/machines/polaris-alcf/install_gpu_dependencies.sh @@ -102,7 +102,7 @@ python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/summit-olcf/install_gpu_dependencies.sh b/Tools/machines/summit-olcf/install_gpu_dependencies.sh index 042e34538d0..ec17adf7307 100755 --- a/Tools/machines/summit-olcf/install_gpu_dependencies.sh +++ b/Tools/machines/summit-olcf/install_gpu_dependencies.sh @@ -100,7 +100,7 @@ python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas diff --git a/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh b/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh index d3ade42fca9..33976b20740 100644 --- a/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh +++ b/Tools/machines/tioga-llnl/install_mi300a_dependencies.sh @@ -158,7 +158,7 @@ python3 -m pip install --upgrade pip python3 -m pip install --upgrade build python3 -m pip install --upgrade packaging python3 -m pip install --upgrade wheel -python3 -m pip install --upgrade setuptools +python3 -m pip install --upgrade setuptools[core] python3 -m pip install --upgrade cython python3 -m pip install --upgrade numpy python3 -m pip install --upgrade pandas From 0bb3c26570d448715f8b0f22270ab47555b77c97 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 7 Mar 2025 00:23:38 +0100 Subject: [PATCH 276/278] WarpX class: move out PSATDCurrentCorrection and PSATDVayDeposition (#5684) `PSATDCurrentCorrection` and `PSATDVayDeposition` are member functions of the WarpX class, but they are used only in `WarpXPushFieldsEM.cpp` and they can be easily turned into pure functions. Therefore, this PR moves them inside an anonymous namespace in `WarpXPushFieldsEM.cpp` . The goal is the simplification of the WarpX class. --- Source/FieldSolver/WarpXPushFieldsEM.cpp | 72 ++++++++++++++---------- Source/WarpX.H | 10 ---- 2 files changed, 42 insertions(+), 40 deletions(-) diff --git a/Source/FieldSolver/WarpXPushFieldsEM.cpp b/Source/FieldSolver/WarpXPushFieldsEM.cpp index 0163d158dd0..6a40e625542 100644 --- a/Source/FieldSolver/WarpXPushFieldsEM.cpp +++ b/Source/FieldSolver/WarpXPushFieldsEM.cpp @@ -100,6 +100,44 @@ namespace { solver.BackwardTransform(lev, *vector_field[2], compz, fill_guards); #endif } + + /** + * \brief Correct current in Fourier space so that the continuity equation is satisfied + */ + template + void PSATDCurrentCorrection ( + const int finest_level, + amrex::Vector>& spectral_solver_fp, + amrex::Vector>& spectral_solver_cp) + { + for (int lev = 0; lev <= finest_level; ++lev) + { + spectral_solver_fp[lev]->CurrentCorrection(); + if (spectral_solver_cp[lev]) + { + spectral_solver_cp[lev]->CurrentCorrection(); + } + } + } + + /** + * \brief Vay deposition in Fourier space (https://doi.org/10.1016/j.jcp.2013.03.010) + */ + template + void PSATDVayDeposition ( + const int finest_level, + amrex::Vector>& spectral_solver_fp, + amrex::Vector>& spectral_solver_cp) + { + for (int lev = 0; lev <= finest_level; ++lev) + { + spectral_solver_fp[lev]->VayDeposition(); + if (spectral_solver_cp[lev]) + { + spectral_solver_cp[lev]->VayDeposition(); + } + } + } } void WarpX::PSATDForwardTransformEB () @@ -466,32 +504,6 @@ void WarpX::PSATDForwardTransformRho ( #endif } -void WarpX::PSATDCurrentCorrection () -{ - for (int lev = 0; lev <= finest_level; ++lev) - { - spectral_solver_fp[lev]->CurrentCorrection(); - - if (spectral_solver_cp[lev]) - { - spectral_solver_cp[lev]->CurrentCorrection(); - } - } -} - -void WarpX::PSATDVayDeposition () -{ - for (int lev = 0; lev <= finest_level; ++lev) - { - spectral_solver_fp[lev]->VayDeposition(); - - if (spectral_solver_cp[lev]) - { - spectral_solver_cp[lev]->VayDeposition(); - } - } -} - void WarpX::PSATDSubtractCurrentPartialSumsAvg () { using ablastr::fields::Direction; @@ -744,7 +756,7 @@ WarpX::PushPSATD (amrex::Real start_time) PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 1, rho_new); // Correct J in k-space - PSATDCurrentCorrection(); + ::PSATDCurrentCorrection(finest_level, spectral_solver_fp, spectral_solver_cp); // Inverse FFT of J PSATDBackwardTransformJ(current_fp_string, current_cp_string); @@ -759,7 +771,7 @@ WarpX::PushPSATD (amrex::Real start_time) PSATDForwardTransformRho(rho_fp_string, rho_cp_string, 1, rho_new); // Compute J from D in k-space - PSATDVayDeposition(); + ::PSATDVayDeposition(finest_level, spectral_solver_fp, spectral_solver_cp); // Inverse FFT of J, subtract cumulative sums of D current_fp_string = "current_fp"; @@ -797,7 +809,7 @@ WarpX::PushPSATD (amrex::Real start_time) #endif // Correct J in k-space - PSATDCurrentCorrection(); + ::PSATDCurrentCorrection(finest_level, spectral_solver_fp, spectral_solver_cp); // Inverse FFT of J PSATDBackwardTransformJ(current_fp_string, current_cp_string); @@ -813,7 +825,7 @@ WarpX::PushPSATD (amrex::Real start_time) PSATDForwardTransformJ(current_fp_string, current_cp_string); // Compute J from D in k-space - PSATDVayDeposition(); + ::PSATDVayDeposition(finest_level, spectral_solver_fp, spectral_solver_cp); // Inverse FFT of J, subtract cumulative sums of D current_fp_string = "current_fp"; diff --git a/Source/WarpX.H b/Source/WarpX.H index b70d23b9e33..56dfa3e2f10 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -1655,16 +1655,6 @@ private: */ void PSATDBackwardTransformG (); - /** - * \brief Correct current in Fourier space so that the continuity equation is satisfied - */ - void PSATDCurrentCorrection (); - - /** - * \brief Vay deposition in Fourier space (https://doi.org/10.1016/j.jcp.2013.03.010) - */ - void PSATDVayDeposition (); - /** * \brief Update all necessary fields in spectral space */ From 7d7ddcadd05ab59d006ca31b86f6a4a3d453cf25 Mon Sep 17 00:00:00 2001 From: Edoardo Zoni <59625522+EZoni@users.noreply.github.com> Date: Fri, 7 Mar 2025 10:08:16 -0800 Subject: [PATCH 277/278] Release 25.03 (#5734) Prepare the March release of WarpX, following the [documentation](https://warpx.readthedocs.io/en/latest/maintenance/release.html): 1. Update to latest AMReX release: ```console ./Tools/Release/updateAMReX.py ``` 2. Update to latest pyAMReX release: ```console ./Tools/Release/updatepyAMReX.py ``` 3. Update to latest PICSAR release (no changes, still 25.01): ```console ./Tools/Release/updatePICSAR.py ``` 4. Update WarpX version number: ```console ./Tools/Release/newVersion.sh ``` --- .github/workflows/cuda.yml | 2 +- CMakeLists.txt | 2 +- Docs/source/conf.py | 4 ++-- Python/setup.py | 2 +- cmake/dependencies/AMReX.cmake | 4 ++-- cmake/dependencies/pyAMReX.cmake | 4 ++-- setup.py | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/.github/workflows/cuda.yml b/.github/workflows/cuda.yml index 6208f272f99..ebe57ba8554 100644 --- a/.github/workflows/cuda.yml +++ b/.github/workflows/cuda.yml @@ -127,7 +127,7 @@ jobs: which nvcc || echo "nvcc not in PATH!" git clone https://github.com/AMReX-Codes/amrex.git ../amrex - cd ../amrex && git checkout --detach 044d52f7d309e340939d7cae449fd83209da317f && cd - + cd ../amrex && git checkout --detach 25.03 && cd - make COMP=gcc QED=FALSE USE_MPI=TRUE USE_GPU=TRUE USE_OMP=FALSE USE_FFT=TRUE USE_CCACHE=TRUE -j 4 ccache -s diff --git a/CMakeLists.txt b/CMakeLists.txt index bb3ee66f786..bdc66feca01 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -1,7 +1,7 @@ # Preamble #################################################################### # cmake_minimum_required(VERSION 3.24.0) -project(WarpX VERSION 25.02) +project(WarpX VERSION 25.03) include(${WarpX_SOURCE_DIR}/cmake/WarpXFunctions.cmake) diff --git a/Docs/source/conf.py b/Docs/source/conf.py index a5fed3a4614..009279500a0 100644 --- a/Docs/source/conf.py +++ b/Docs/source/conf.py @@ -112,9 +112,9 @@ def __init__(self, *args, **kwargs): # built documents. # # The short X.Y version. -version = "25.02" +version = "25.03" # The full version, including alpha/beta/rc tags. -release = "25.02" +release = "25.03" # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/Python/setup.py b/Python/setup.py index e0ec6c98a7d..c801c2c0a74 100644 --- a/Python/setup.py +++ b/Python/setup.py @@ -65,7 +65,7 @@ setup( name="pywarpx", - version="25.02", + version="25.03", packages=["pywarpx"], package_dir={"pywarpx": "pywarpx"}, description="""Wrapper of WarpX""", diff --git a/cmake/dependencies/AMReX.cmake b/cmake/dependencies/AMReX.cmake index 3c389a0a01b..0a4cc0e1ea9 100644 --- a/cmake/dependencies/AMReX.cmake +++ b/cmake/dependencies/AMReX.cmake @@ -271,7 +271,7 @@ macro(find_amrex) endif() set(COMPONENT_PRECISION ${WarpX_PRECISION} P${WarpX_PARTICLE_PRECISION}) - find_package(AMReX 25.02 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} ${COMPONENT_FFT} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) + find_package(AMReX 25.03 CONFIG REQUIRED COMPONENTS ${COMPONENT_ASCENT} ${COMPONENT_CATALYST} ${COMPONENT_DIMS} ${COMPONENT_EB} ${COMPONENT_FFT} PARTICLES ${COMPONENT_PIC} ${COMPONENT_PRECISION} ${COMPONENT_SENSEI} LSOLVERS) # note: TINYP skipped because user-configured and optional # AMReX CMake helper scripts @@ -294,7 +294,7 @@ set(WarpX_amrex_src "" set(WarpX_amrex_repo "https://github.com/AMReX-Codes/amrex.git" CACHE STRING "Repository URI to pull and build AMReX from if(WarpX_amrex_internal)") -set(WarpX_amrex_branch "044d52f7d309e340939d7cae449fd83209da317f" +set(WarpX_amrex_branch "25.03" CACHE STRING "Repository branch for WarpX_amrex_repo if(WarpX_amrex_internal)") diff --git a/cmake/dependencies/pyAMReX.cmake b/cmake/dependencies/pyAMReX.cmake index be7c64acd69..8e758185ec7 100644 --- a/cmake/dependencies/pyAMReX.cmake +++ b/cmake/dependencies/pyAMReX.cmake @@ -59,7 +59,7 @@ function(find_pyamrex) endif() elseif(NOT WarpX_pyamrex_internal) # TODO: MPI control - find_package(pyAMReX 25.02 CONFIG REQUIRED) + find_package(pyAMReX 25.03 CONFIG REQUIRED) message(STATUS "pyAMReX: Found version '${pyAMReX_VERSION}'") endif() endfunction() @@ -74,7 +74,7 @@ option(WarpX_pyamrex_internal "Download & build pyAMReX" ON) set(WarpX_pyamrex_repo "https://github.com/AMReX-Codes/pyamrex.git" CACHE STRING "Repository URI to pull and build pyamrex from if(WarpX_pyamrex_internal)") -set(WarpX_pyamrex_branch "3088ea12a1a6287246bf027c4235f10e92472450" +set(WarpX_pyamrex_branch "25.03" CACHE STRING "Repository branch for WarpX_pyamrex_repo if(WarpX_pyamrex_internal)") diff --git a/setup.py b/setup.py index fae11aa0654..8f3a9e328ce 100644 --- a/setup.py +++ b/setup.py @@ -280,7 +280,7 @@ def build_extension(self, ext): setup( name="pywarpx", # note PEP-440 syntax: x.y.zaN but x.y.z.devN - version="25.02", + version="25.03", packages=["pywarpx"], package_dir={"pywarpx": "Python/pywarpx"}, author="Jean-Luc Vay, David P. Grote, Maxence Thévenet, Rémi Lehe, Andrew Myers, Weiqun Zhang, Axel Huebl, et al.", From 14a4e8fbdd64dfe2a96c9715788c88c7f4925062 Mon Sep 17 00:00:00 2001 From: Luca Fedeli Date: Fri, 7 Mar 2025 20:52:56 +0100 Subject: [PATCH 278/278] WarpX class: move UpdateCurrentNodalToStag to anonymous namespace in WarpXComm.cpp (#5672) `UpdateCurrentNodalToStag` is a member function of the WarpX class, but it is used only inside `WarpXComm.cpp` and it is defined there. Therefore, this PR turns it into a pure function and moves it inside an anonymous namespace in `WarpXComm.cpp` . This simplifies the interface of the WarpX class. --- Source/Parallelization/WarpXComm.cpp | 122 ++++++++++++++++----------- Source/WarpX.H | 10 --- 2 files changed, 72 insertions(+), 60 deletions(-) diff --git a/Source/Parallelization/WarpXComm.cpp b/Source/Parallelization/WarpXComm.cpp index 3adf4389a46..a06ad61af19 100644 --- a/Source/Parallelization/WarpXComm.cpp +++ b/Source/Parallelization/WarpXComm.cpp @@ -53,6 +53,69 @@ using namespace amrex; using warpx::fields::FieldType; +namespace +{ + /** + * \brief This function is called if \c warpx.do_current_centering = 1 and + * it centers the currents from a nodal grid to a staggered grid (Yee) using + * finite-order interpolation based on the Fornberg coefficients. + * + * \param[in,out] dst destination \c MultiFab where the results of the finite-order centering are stored + * \param[in] src source \c MultiFab that contains the values of the nodal current to be centered + * \param[in] cc_nox order of finite-order centering of currents, along x + * \param[in] cc_noy order of finite-order centering of currents, along y + * \param[in] cc_noz order of finite-order centering of currents, along z + * \param[in] device_current_centering_stencil_coeffs_x stencil coefficients for finite-order centering of currents, along x + * \param[in] device_current_centering_stencil_coeffs_y stencil coefficients for finite-order centering of currents, along y + * \param[in] device_current_centering_stencil_coeffs_z stencil coefficients for finite-order centering of currents, along z + */ + void UpdateCurrentNodalToStag ( + amrex::MultiFab& dst, const amrex::MultiFab& src, + const int cc_nox, const int cc_noy, const int cc_noz, + const amrex::Gpu::DeviceVector& device_current_centering_stencil_coeffs_x, + const amrex::Gpu::DeviceVector& device_current_centering_stencil_coeffs_y, + const amrex::Gpu::DeviceVector& device_current_centering_stencil_coeffs_z) + { + // If source and destination MultiFabs have the same index type, a simple copy is enough + // (for example, this happens with the current along y in 2D, which is always fully nodal) + if (dst.ixType() == src.ixType()) + { + amrex::MultiFab::Copy(dst, src, 0, 0, dst.nComp(), dst.nGrowVect()); + return; + } + + amrex::IntVect const& dst_stag = dst.ixType().toIntVect(); + + // Source MultiFab always has nodal index type when this function is called + amrex::IntVect const& src_stag = amrex::IntVect::TheNodeVector(); + +#ifdef AMREX_USE_OMP + #pragma omp parallel if (Gpu::notInLaunchRegion()) +#endif + for (MFIter mfi(dst, TilingIfNotGPU()); mfi.isValid(); ++mfi) + { + // Loop over full box including ghost cells + // (input arrays will be padded with zeros beyond ghost cells + // for out-of-bound accesses due to large-stencil operations) + const Box bx = mfi.growntilebox(); + + amrex::Array4 const& src_arr = src.const_array(mfi); + amrex::Array4 const& dst_arr = dst.array(mfi); + + // Device vectors of stencil coefficients used for finite-order centering of currents + amrex::Real const * stencil_coeffs_x = device_current_centering_stencil_coeffs_x.data(); + amrex::Real const * stencil_coeffs_y = device_current_centering_stencil_coeffs_y.data(); + amrex::Real const * stencil_coeffs_z = device_current_centering_stencil_coeffs_z.data(); + + amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE (int j, int k, int l) noexcept + { + warpx_interp(j, k, l, dst_arr, src_arr, dst_stag, src_stag, cc_nox, cc_noy, cc_noz, + stencil_coeffs_x, stencil_coeffs_y, stencil_coeffs_z); + }); + } + } +} + void WarpX::UpdateAuxilaryData () { @@ -594,53 +657,6 @@ WarpX::UpdateAuxilaryDataSameType () } } -void WarpX::UpdateCurrentNodalToStag (amrex::MultiFab& dst, amrex::MultiFab const& src) -{ - // If source and destination MultiFabs have the same index type, a simple copy is enough - // (for example, this happens with the current along y in 2D, which is always fully nodal) - if (dst.ixType() == src.ixType()) - { - amrex::MultiFab::Copy(dst, src, 0, 0, dst.nComp(), dst.nGrowVect()); - return; - } - - amrex::IntVect const& dst_stag = dst.ixType().toIntVect(); - - // Source MultiFab always has nodal index type when this function is called - amrex::IntVect const& src_stag = amrex::IntVect::TheNodeVector(); - -#ifdef AMREX_USE_OMP -#pragma omp parallel if (Gpu::notInLaunchRegion()) -#endif - - for (MFIter mfi(dst, TilingIfNotGPU()); mfi.isValid(); ++mfi) - { - // Loop over full box including ghost cells - // (input arrays will be padded with zeros beyond ghost cells - // for out-of-bound accesses due to large-stencil operations) - const Box bx = mfi.growntilebox(); - - amrex::Array4 const& src_arr = src.const_array(mfi); - amrex::Array4 const& dst_arr = dst.array(mfi); - - // Order of finite-order centering of currents - const int cc_nox = WarpX::current_centering_nox; - const int cc_noy = WarpX::current_centering_noy; - const int cc_noz = WarpX::current_centering_noz; - - // Device vectors of stencil coefficients used for finite-order centering of currents - amrex::Real const * stencil_coeffs_x = WarpX::device_current_centering_stencil_coeffs_x.data(); - amrex::Real const * stencil_coeffs_y = WarpX::device_current_centering_stencil_coeffs_y.data(); - amrex::Real const * stencil_coeffs_z = WarpX::device_current_centering_stencil_coeffs_z.data(); - - amrex::ParallelFor(bx, [=] AMREX_GPU_DEVICE (int j, int k, int l) noexcept - { - warpx_interp(j, k, l, dst_arr, src_arr, dst_stag, src_stag, cc_nox, cc_noy, cc_noz, - stencil_coeffs_x, stencil_coeffs_y, stencil_coeffs_z); - }); - } -} - void WarpX::FillBoundaryB (IntVect ng, std::optional nodal_sync) { @@ -1094,9 +1110,15 @@ WarpX::SyncCurrent (const std::string& current_fp_string) "warpx.do_current_centering=1 not supported with more than one fine levels"); for (int lev = 0; lev <= finest_level; lev++) { - WarpX::UpdateCurrentNodalToStag(*J_fp[lev][Direction{0}], *J_fp_nodal[lev][Direction{0}]); - WarpX::UpdateCurrentNodalToStag(*J_fp[lev][Direction{1}], *J_fp_nodal[lev][Direction{1}]); - WarpX::UpdateCurrentNodalToStag(*J_fp[lev][Direction{2}], *J_fp_nodal[lev][Direction{2}]); + constexpr auto all_dirs = std::array{Direction{0}, Direction{1}, Direction{2}}; + for (const auto& dir : all_dirs){ + ::UpdateCurrentNodalToStag( + *J_fp[lev][dir], *J_fp_nodal[lev][dir], + current_centering_nox, current_centering_noy, current_centering_noz, + device_current_centering_stencil_coeffs_x, + device_current_centering_stencil_coeffs_y, + device_current_centering_stencil_coeffs_z); + } } } diff --git a/Source/WarpX.H b/Source/WarpX.H index 56dfa3e2f10..afe41c459aa 100644 --- a/Source/WarpX.H +++ b/Source/WarpX.H @@ -675,16 +675,6 @@ public: void UpdateAuxilaryDataStagToNodal (); void UpdateAuxilaryDataSameType (); - /** - * \brief This function is called if \c warpx.do_current_centering = 1 and - * it centers the currents from a nodal grid to a staggered grid (Yee) using - * finite-order interpolation based on the Fornberg coefficients. - * - * \param[in,out] dst destination \c MultiFab where the results of the finite-order centering are stored - * \param[in] src source \c MultiFab that contains the values of the nodal current to be centered - */ - void UpdateCurrentNodalToStag (amrex::MultiFab& dst, amrex::MultiFab const& src); - // Fill boundary cells including coarse/fine boundaries void FillBoundaryB (amrex::IntVect ng, std::optional nodal_sync = std::nullopt); void FillBoundaryE (amrex::IntVect ng, std::optional nodal_sync = std::nullopt);