From ea61e26ff330e720b011a85a2f4b9040e54da870 Mon Sep 17 00:00:00 2001 From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com> Date: Mon, 5 Feb 2024 11:50:25 +0100 Subject: [PATCH 01/58] Add section about false sharing problems to documentation (#1819) * Add section to docs about false sharing * Fix typos * Fix typo * Implement suggestions --- docs/src/performance.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/src/performance.md b/docs/src/performance.md index df66f451b7..82d7f501f6 100644 --- a/docs/src/performance.md +++ b/docs/src/performance.md @@ -267,3 +267,14 @@ requires. It can thus be seen as a proxy for "energy used" and, as an extension, timing result, you need to set the analysis interval such that the `AnalysisCallback` is invoked at least once during the course of the simulation and discard the first PID value. + +## Performance issues with multi-threaded reductions +[False sharing](https://en.wikipedia.org/wiki/False_sharing) is a known performance issue +for systems with distributed caches. It also occurred for the implementation of a thread +parallel bounds checking routine for the subcell IDP limiting +in [PR #1736](https://github.com/trixi-framework/Trixi.jl/pull/1736). +After some [testing and discussion](https://github.com/trixi-framework/Trixi.jl/pull/1736#discussion_r1423881895), +it turned out that initializing a vector of length `n * Threads.nthreads()` and only using every +n-th entry instead of a vector of length `Threads.nthreads()` fixes the problem. +Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)`. +Now, the bounds checking routine of the IDP limiting scales as hoped. From 14151e636ef654cb5421b3a7e498a9d76ed46a64 Mon Sep 17 00:00:00 2001 From: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Date: Mon, 5 Feb 2024 12:49:51 +0100 Subject: [PATCH 02/58] Add entry for gmsh tutorial in introduction (#1829) * add entry for gmsh tutorial in introduction * Update docs/literate/src/files/index.jl --------- Co-authored-by: Daniel Doehring Co-authored-by: Daniel Doehring --- docs/literate/src/files/index.jl | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/docs/literate/src/files/index.jl b/docs/literate/src/files/index.jl index e259d25fb2..26637e5b24 100644 --- a/docs/literate/src/files/index.jl +++ b/docs/literate/src/files/index.jl @@ -108,20 +108,26 @@ # software in the Trixi.jl ecosystem, and then run a simulation using Trixi.jl on said mesh. # In the end, the tutorial briefly explains how to simulate an example using AMR via `P4estMesh`. -# ### [15 Explicit time stepping](@ref time_stepping) +# ### [15 P4est mesh from gmsh](@ref p4est_from_gmsh) +#- +# This tutorial describes how to obtain a [`P4estMesh`](@ref) from an existing mesh generated +# by [`gmsh`](https://gmsh.info/) or any other meshing software that can export to the Abaqus +# input `.inp` format. The tutorial demonstrates how edges/faces can be associated with boundary conditions based on the physical nodesets. + +# ### [16 Explicit time stepping](@ref time_stepping) #- # This tutorial is about time integration using [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl). # It explains how to use their algorithms and presents two types of time step choices - with error-based # and CFL-based adaptive step size control. -# ### [16 Differentiable programming](@ref differentiable_programming) +# ### [17 Differentiable programming](@ref differentiable_programming) #- # This part deals with some basic differentiable programming topics. For example, a Jacobian, its # eigenvalues and a curve of total energy (through the simulation) are calculated and plotted for # a few semidiscretizations. Moreover, we calculate an example for propagating errors with Measurement.jl # at the end. -# ### [17 Custom semidiscretization](@ref custom_semidiscretization) +# ### [18 Custom semidiscretization](@ref custom_semidiscretization) #- # This tutorial describes the [semidiscretiations](@ref overview-semidiscretizations) of Trixi.jl # and explains how to extend them for custom tasks. From 5fec7f42121be7d01d8645fc91fe0da8567f3946 Mon Sep 17 00:00:00 2001 From: ArseniyKholod <119304909+ArseniyKholod@users.noreply.github.com> Date: Tue, 6 Feb 2024 20:52:50 +0100 Subject: [PATCH 03/58] Getting started with trixi (#1343) * 0th tutorial v1 * 0th tutorial v2 * 0th tutorial v3 (topic for developers) * 0th tutorial v4 * 0th tutorial v5 * 0th tutorial v6 * 0th tutorial v6.1 * 0th tutorial v6.2 * 0th tutorial v7 (new example) * 0th tutorial v8 * 0th tutorial v8.1 * 0th tutorial v9 New structure + new usage example * 0th tutorial v9.1 * 0th tutorial v9.2 * Revert "0th tutorial v9.2" This reverts commit e2da5c24c71ec5ebb86ead4da9e6dc33f8b2cf7f. * 0th tutorial v9.3 (test) * Revert "0th tutorial v9.3 (test)" This reverts commit 05d3d1c22729fc65f6a85bc224e0376bf7cc79ac. * 0th tutorial v9.3 (test) * 0th tutorial v9.3 (test checks without diff. prog.) * 0th tutorial v9.3 (test new diff. prog.) * 0th tutorial v9.3 (test new diff. prog. v2) * 0th tutorial v9.3 (test update of packages) * 0th tutorial v9.3 (test update of packages v2) * 0th tutorial v9.3 (downgrade Measurements.jl) * 0th tutorial v9.4 * 0th tutorial v9.5 * 0th tutorial review * 0th tutorial review 2 * 0th tutorial v9.6 * delete test files * Revert "rename into getting_started.jl" This reverts commit 6605ece69240d8bc850c6ef0b14b94beaac5eee6. * Update docs/make.jl Co-authored-by: Michael Schlottke-Lakemper * Update Project.toml Co-authored-by: Michael Schlottke-Lakemper * Update Project.toml Co-authored-by: Michael Schlottke-Lakemper * Update docs/make.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * rename into getting_started.jl * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started_with_Trixi.jl Co-authored-by: Michael Schlottke-Lakemper * correction of spelling errors * cleaning out directory * Correction according to the comments above * Trixi installation for Linux * Update getting_started.jl * cross-referencing correction * spelling * Update .gitignore Co-authored-by: Michael Schlottke-Lakemper * Update docs/make.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update .gitignore * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * new plot in Modifying an existing setup * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * julia/shell-formatting * Update getting_started.jl * Update getting_started.jl * Correction of Modifying part * Usage update * Update getting_started.jl * add Visualize the solution * spell check * correction * correction * add picture paraview * divide in two parts & correction * divide in 3 parts * Update docs/literate/src/files/getting_started.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update create_first_setup.jl * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update getting_started.jl * Update getting_started.jl * Update create_first_setup.jl * Update changing_trixi.jl * Update changing_trixi.jl * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/getting_started.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * move files to subfolder * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update create_first_setup.jl * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/create_first_setup.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/changing_trixi.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/changing_trixi.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/changing_trixi.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/changing_trixi.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * Update docs/literate/src/files/changing_trixi.jl Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> * add transition between 2nd and 3rd parts * Update docs/make.jl Co-authored-by: Michael Schlottke-Lakemper * clear out folder * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update getting_started.jl * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/getting_started.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update getting_started.jl * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update create_first_setup.jl * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update create_first_setup.jl * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * spelling * delete julia, tabs * add juliaup * add intro * rm * Apply suggestions from code review Co-authored-by: Daniel Doehring * Apply suggestions from code review Co-authored-by: Daniel Doehring * Update docs/literate/src/files/first_steps/getting_started.jl * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/first_steps/changing_trixi.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/first_steps/create_first_setup.jl Co-authored-by: Andrew Winters * add save solution dt --------- Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com> Co-authored-by: Benedict <135045760+bgeihe@users.noreply.github.com> Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Co-authored-by: Daniel Doehring Co-authored-by: Andrew Winters --- .../src/files/first_steps/changing_trixi.jl | 77 +++++ .../files/first_steps/create_first_setup.jl | 268 ++++++++++++++++++ .../src/files/first_steps/getting_started.jl | 242 ++++++++++++++++ docs/literate/src/files/index.jl | 43 +-- docs/make.jl | 6 + 5 files changed, 618 insertions(+), 18 deletions(-) create mode 100644 docs/literate/src/files/first_steps/changing_trixi.jl create mode 100644 docs/literate/src/files/first_steps/create_first_setup.jl create mode 100644 docs/literate/src/files/first_steps/getting_started.jl diff --git a/docs/literate/src/files/first_steps/changing_trixi.jl b/docs/literate/src/files/first_steps/changing_trixi.jl new file mode 100644 index 0000000000..551377a6a7 --- /dev/null +++ b/docs/literate/src/files/first_steps/changing_trixi.jl @@ -0,0 +1,77 @@ +#src # Changing Trixi.jl itself + +# If you plan on editing Trixi.jl itself, you can download Trixi.jl locally and run it from +# the cloned directory. + + +# ## Cloning Trixi.jl + + +# ### Windows + +# If you are using Windows, you can clone Trixi.jl by using the GitHub Desktop tool: +# - If you do not have a GitHub account yet, create it on +# the [GitHub website](https://github.com/join). +# - Download and install [GitHub Desktop](https://desktop.github.com/) and then log in to +# your account. +# - Open GitHub Desktop, press `Ctrl+Shift+O`. +# - In the opened window, paste `trixi-framework/Trixi.jl` and choose the path to the folder where +# you want to save Trixi.jl. Then click `Clone` and Trixi.jl will be cloned to your computer. + +# Now you cloned Trixi.jl and only need to tell Julia to use the local clone as the package sources: +# - Open a terminal using `Win+r` and `cmd`. Navigate to the folder with the cloned Trixi.jl using `cd`. +# - Create a new directory `run`, enter it, and start Julia with the `--project=.` flag: +# ```shell +# mkdir run +# cd run +# julia --project=. +# ``` +# - Now run the following commands to install all relevant packages: +# ```julia +# using Pkg; Pkg.develop(PackageSpec(path="..")) # Tell Julia to use the local Trixi.jl clone +# Pkg.add(["OrdinaryDiffEq", "Plots"]) # Install additional packages +# ``` + +# Now you already installed Trixi.jl from your local clone. Note that if you installed Trixi.jl +# this way, you always have to start Julia with the `--project` flag set to your `run` directory, +# e.g., +# ```shell +# julia --project=. +# ``` +# if already inside the `run` directory. + + +# ### Linux + +# You can clone Trixi.jl to your computer by executing the following commands: +# ```shell +# git clone git@github.com:trixi-framework/Trixi.jl.git +# # If an error occurs, try the following: +# # git clone https://github.com/trixi-framework/Trixi.jl +# cd Trixi.jl +# mkdir run +# cd run +# julia --project=. -e 'using Pkg; Pkg.develop(PackageSpec(path=".."))' # Tell Julia to use the local Trixi.jl clone +# julia --project=. -e 'using Pkg; Pkg.add(["OrdinaryDiffEq", "Plots"])' # Install additional packages +# ``` +# Note that if you installed Trixi.jl this way, +# you always have to start Julia with the `--project` flag set to your `run` directory, e.g., +# ```shell +# julia --project=. +# ``` +# if already inside the `run` directory. + + +# ## Additional reading + +# To further delve into Trixi.jl, you may have a look at the following introductory tutorials. +# - [Introduction to DG methods](@ref scalar_linear_advection_1d) will teach you how to set up a +# simple way to approximate the solution of a hyperbolic partial differential equation. It will +# be especially useful to learn about the +# [Discontinuous Galerkin method](https://en.wikipedia.org/wiki/Discontinuous_Galerkin_method) +# and the way it is implemented in Trixi.jl. +# - [Adding a new scalar conservation law](@ref adding_new_scalar_equations) and +# [Adding a non-conservative equation](@ref adding_nonconservative_equation) +# describe how to add new physics models that are not yet included in Trixi.jl. +# - [Callbacks](@ref callbacks-id) gives an overview of how to regularly execute specific actions +# during a simulation, e.g., to store the solution or adapt the mesh. diff --git a/docs/literate/src/files/first_steps/create_first_setup.jl b/docs/literate/src/files/first_steps/create_first_setup.jl new file mode 100644 index 0000000000..906a6f9346 --- /dev/null +++ b/docs/literate/src/files/first_steps/create_first_setup.jl @@ -0,0 +1,268 @@ +#src # Create first setup + +# In this part of the introductory guide, we will create a first Trixi.jl setup as an extension of +# [`elixir_advection_basic.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_advection_basic.jl). +# Since Trixi.jl has a common basic structure for the setups, you can create your own by extending +# and modifying the following example. + +# Let's consider the linear advection equation for a state ``u = u(x, y, t)`` on the two-dimensional spatial domain +# ``[-1, 1] \times [-1, 1]`` with a source term +# ```math +# \frac{\partial}{\partial t}u + \frac{\partial}{\partial x} (0.2 u) - \frac{\partial}{\partial y} (0.7 u) = - 2 e^{-t} +# \sin\bigl(2 \pi (x - t) \bigr) \sin\bigl(2 \pi (y - t) \bigr), +# ``` +# with the initial condition +# ```math +# u(x, y, 0) = \sin\bigl(\pi x \bigr) \sin\bigl(\pi y \bigr), +# ``` +# and periodic boundary conditions. + +# The first step is to create and open a file with the .jl extension. You can do this with your +# favorite text editor (if you do not have one, we recommend [VS Code](https://code.visualstudio.com/)). +# In this file you will create your setup. + +# To be able to use functionalities of Trixi.jl, you always need to load Trixi.jl itself +# and the [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl) package. + +using Trixi +using OrdinaryDiffEq + +# The next thing to do is to choose an equation that is suitable for your problem. To see all the +# currently implemented equations, take a look at +# [`src/equations`](https://github.com/trixi-framework/Trixi.jl/tree/main/src/equations). +# If you are interested in adding a new physics model that has not yet been implemented in +# Trixi.jl, take a look at the tutorials +# [Adding a new scalar conservation law](@ref adding_new_scalar_equations) or +# [Adding a non-conservative equation](@ref adding_nonconservative_equation). + +# The linear scalar advection equation in two spatial dimensions +# ```math +# \frac{\partial}{\partial t}u + \frac{\partial}{\partial x} (a_1 u) + \frac{\partial}{\partial y} (a_2 u) = 0 +# ``` +# is already implemented in Trixi.jl as +# [`LinearScalarAdvectionEquation2D`](@ref), for which we need to define a two-dimensional parameter +# `advection_velocity` describing the parameters ``a_1`` and ``a_2``. Appropriate for our problem is `(0.2, -0.7)`. + +advection_velocity = (0.2, -0.7) +equations = LinearScalarAdvectionEquation2D(advection_velocity) + +# To solve our problem numerically using Trixi.jl, we have to discretize the spatial +# domain, for which we set up a mesh. One of the most used meshes in Trixi.jl is the +# [`TreeMesh`](@ref). The spatial domain used is ``[-1, 1] \times [-1, 1]``. We set an initial number +# of elements in the mesh using `initial_refinement_level`, which describes the initial number of +# hierarchical refinements. In this simple case, the total number of elements is `2^initial_refinement_level` +# throughout the simulation. The variable `n_cells_max` is used to limit the number of elements in the mesh, +# which cannot be exceeded when using [adaptive mesh refinement](@ref Adaptive-mesh-refinement). + +# All minimum and all maximum coordinates must be combined into `Tuples`. + +coordinates_min = (-1.0, -1.0) +coordinates_max = ( 1.0, 1.0) +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 30_000) + +# To approximate the solution of the defined model, we create a [`DGSEM`](@ref) solver. +# The solution in each of the recently defined mesh elements will be approximated by a polynomial +# of degree `polydeg`. For more information about discontinuous Galerkin methods, +# check out the [Introduction to DG methods](@ref scalar_linear_advection_1d) tutorial. + +solver = DGSEM(polydeg=3) + +# Now we need to define an initial condition for our problem. All the already implemented +# initial conditions for [`LinearScalarAdvectionEquation2D`](@ref) can be found in +# [`src/equations/linear_scalar_advection_2d.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/src/equations/linear_scalar_advection_2d.jl). +# If you want to use, for example, a Gaussian pulse, it can be used as follows: +# ```julia +# initial_conditions = initial_condition_gauss +# ``` +# But to show you how an arbitrary initial condition can be implemented in a way suitable for +# Trixi.jl, we define our own initial conditions. +# ```math +# u(x, y, 0) = \sin\bigl(\pi x \bigr) \sin\bigl(\pi y \bigr). +# ``` +# The initial conditions function must take spatial coordinates, time and equation as arguments +# and returns an initial condition as a statically sized vector `SVector`. Following the same structure, you +# can define your own initial conditions. The time variable `t` can be unused in the initial +# condition, but might also be used to describe an analytical solution if known. If you use the +# initial condition as analytical solution, you can analyze your numerical solution by computing +# the error, see also the +# [section about analyzing the solution](https://trixi-framework.github.io/Trixi.jl/stable/callbacks/#Analyzing-the-numerical-solution). + +function initial_condition_sinpi(x, t, equations::LinearScalarAdvectionEquation2D) + scalar = sinpi(x[1]) * sinpi(x[2]) + return SVector(scalar) +end +initial_condition = initial_condition_sinpi + +# The next step is to define a function of the source term corresponding to our problem. +# ```math +# f(u, x, y, t) = - 2 e^{-t} \sin\bigl(2 \pi (x - t) \bigr) \sin\bigl(2 \pi (y - t) \bigr) +# ``` +# This function must take the state variable, the spatial coordinates, the time and the +# equation itself as arguments and returns the source term as a static vector `SVector`. + +function source_term_exp_sinpi(u, x, t, equations::LinearScalarAdvectionEquation2D) + scalar = - 2 * exp(-t) * sinpi(2*(x[1] - t)) * sinpi(2*(x[2] - t)) + return SVector(scalar) +end + +# Now we collect all the information that is necessary to define a spatial discretization, +# which leaves us with an ODE problem in time with a span from 0.0 to 1.0. +# This approach is commonly referred to as the method of lines. + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver; + source_terms = source_term_exp_sinpi) +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan); + +# At this point, our problem is defined. We will use the `solve` function defined in +# [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl) to get the solution. +# OrdinaryDiffEq.jl gives us the ability to customize the solver +# using callbacks without actually modifying it. Trixi.jl already has some implemented +# [Callbacks](@ref callbacks-id). The most widely used callbacks in Trixi.jl are +# [step control callbacks](https://docs.sciml.ai/DiffEqCallbacks/stable/step_control/) that are +# activated at the end of each time step to perform some actions, e.g. to print statistics. +# We will show you how to use some of the common callbacks. + +# To print a summary of the simulation setup at the beginning +# and to reset timers we use the [`SummaryCallback`](@ref). +# When the returned callback is executed directly, the current timer values are shown. + +summary_callback = SummaryCallback() + +# We also want to analyze the current state of the solution in regular intervals. +# The [`AnalysisCallback`](@ref) outputs some useful statistical information during the solving process +# every `interval` time steps. + +analysis_callback = AnalysisCallback(semi, interval = 5) + +# It is also possible to control the time step size using the [`StepsizeCallback`](@ref) if the time +# integration method isn't adaptive itself. To get more details, look at +# [CFL based step size control](@ref CFL-based-step-size-control). + +stepsize_callback = StepsizeCallback(cfl = 1.6) + +# To save the current solution in regular intervals we use the [`SaveSolutionCallback`](@ref). +# We would like to save the initial and final solutions as well. The data +# will be saved as HDF5 files located in the `out` folder. Afterwards it is possible to visualize +# a solution from saved files using Trixi2Vtk.jl and ParaView, which is described below in the +# section [Visualize the solution](@ref Visualize-the-solution). + +save_solution = SaveSolutionCallback(interval = 5, + save_initial_solution = true, + save_final_solution = true) + +# Alternatively, we have the option to print solution files at fixed time intervals. +# ```julua +# save_solution = SaveSolutionCallback(dt = 0.1, +# save_initial_solution = true, +# save_final_solution = true) +# ``` + +# Another useful callback is the [`SaveRestartCallback`](@ref). It saves information for restarting +# in regular intervals. We are interested in saving a restart file for the final solution as +# well. To perform a restart, you need to configure the restart setup in a special way, which is +# described in the section [Restart simulation](@ref restart). + +save_restart = SaveRestartCallback(interval = 100, save_final_restart = true) + +# Create a `CallbackSet` to collect all callbacks so that they can be passed to the `solve` +# function. + +callbacks = CallbackSet(summary_callback, analysis_callback, stepsize_callback, save_solution, + save_restart) + +# The last step is to choose the time integration method. OrdinaryDiffEq.jl defines a wide range of +# [ODE solvers](https://docs.sciml.ai/DiffEqDocs/latest/solvers/ode_solve/), e.g. +# `CarpenterKennedy2N54(williamson_condition = false)`. We will pass the ODE +# problem, the ODE solver and the callbacks to the `solve` function. Also, to use +# `StepsizeCallback`, we must explicitly specify the initial trial time step `dt`, the selected +# value is not important, because it will be overwritten by the `StepsizeCallback`. And there is no +# need to save every step of the solution, we are only interested in the final result. + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), dt = 1.0, + save_everystep = false, callback = callbacks); + +# Finally, we print the timer summary. + +summary_callback() + +# Now you can plot the solution as shown below, analyze it and improve the stability, accuracy or +# efficiency of your setup. + + +# ## Visualize the solution + +# In the previous part of the tutorial, we calculated the final solution of the given problem, now we want +# to visualize it. A more detailed explanation of visualization methods can be found in the section +# [Visualization](@ref visualization). + + +# ### Using Plots.jl + +# The first option is to use the [Plots.jl](https://github.com/JuliaPlots/Plots.jl) package +# directly after calculations, when the solution is saved in the `sol` variable. We load the +# package and use the `plot` function. + +using Plots +plot(sol) + +# To show the mesh on the plot, we need to extract the visualization data from the solution as +# a [`PlotData2D`](@ref) object. Mesh extraction is possible using the [`getmesh`](@ref) function. +# Plots.jl has the `plot!` function that allows you to modify an already built graph. + +pd = PlotData2D(sol) +plot!(getmesh(pd)) + + +# ### Using Trixi2Vtk.jl + +# Another way to visualize a solution is to extract it from a saved HDF5 file. After we used the +# `solve` function with [`SaveSolutionCallback`](@ref) there is a file with the final solution. +# It is located in the `out` folder and is named as follows: `solution_index.h5`. The `index` +# is the final time step of the solution that is padded to 6 digits with zeros from the beginning. +# With [Trixi2Vtk](@ref) you can convert the HDF5 output file generated by Trixi.jl into a VTK file. +# This can be used in visualization tools such as [ParaView](https://www.paraview.org) or +# [VisIt](https://visit.llnl.gov) to plot the solution. The important thing is that currently +# Trixi2Vtk.jl supports conversion only for solutions in 2D and 3D spatial domains. + +# If you haven't added Trixi2Vtk.jl to your project yet, you can add it as follows. +# ```julia +# import Pkg +# Pkg.add(["Trixi2Vtk"]) +# ``` +# Now we load the Trixi2Vtk.jl package and convert the file `out/solution_000018.h5` with +# the final solution using the [`trixi2vtk`](@ref) function saving the resulting file in the +# `out` folder. + +using Trixi2Vtk +trixi2vtk(joinpath("out", "solution_000018.h5"), output_directory="out") + +# Now two files `solution_000018.vtu` and `solution_000018_celldata.vtu` have been generated in the +# `out` folder. The first one contains all the information for visualizing the solution, the +# second one contains all the cell-based or discretization-based information. + +# Now let's visualize the solution from the generated files in ParaView. Follow this short +# instruction to get the visualization. +# - Download, install and open [ParaView](https://www.paraview.org/download/). +# - Press `Ctrl+O` and select the generated files `solution_000018.vtu` and +# `solution_000018_celldata.vtu` from the `out` folder. +# - In the upper-left corner in the Pipeline Browser window, left-click on the eye-icon near +# `solution_000018.vtu`. +# - In the lower-left corner in the Properties window, change the Coloring from Solid Color to +# scalar. This already generates the visualization of the final solution. +# - Now let's add the mesh to the visualization. In the upper-left corner in the +# Pipeline Browser window, left-click on the eye-icon near `solution_000018_celldata.vtu`. +# - In the lower-left corner in the Properties window, change the Representation from Surface +# to Wireframe. Then a white grid should appear on the visualization. +# Now, if you followed the instructions exactly, you should get a similar image as shown in the +# section [Using Plots.jl](@ref Using-Plots.jl): + +# ![paraview_trixi2vtk_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/0c29139b-6c5d-4d5c-86e1-f4ebc95aca7e) + +# After completing this tutorial you are able to set up your own simulations with +# Trixi.jl. If you have an interest in contributing to Trixi.jl as a developer, refer to the third +# part of the introduction titled [Changing Trixi.jl itself](@ref changing_trixi). + +Sys.rm("out"; recursive=true, force=true) #hide #md \ No newline at end of file diff --git a/docs/literate/src/files/first_steps/getting_started.jl b/docs/literate/src/files/first_steps/getting_started.jl new file mode 100644 index 0000000000..2bfaf33b5f --- /dev/null +++ b/docs/literate/src/files/first_steps/getting_started.jl @@ -0,0 +1,242 @@ +#src # Getting started + +# Trixi.jl is a numerical simulation framework for conservation laws and +# is written in the [Julia programming language](https://julialang.org/). +# This tutorial is intended for beginners in Julia and Trixi.jl. +# After reading it, you will know how to install Julia and Trixi.jl on your computer, +# and you will be able to download setup files from our GitHub repository, modify them, +# and run simulations. + +# The contents of this tutorial: +# - [Julia installation](@ref Julia-installation) +# - [Trixi.jl installation](@ref Trixi.jl-installation) +# - [Running a simulation](@ref Running-a-simulation) +# - [Getting an existing setup file](@ref Getting-an-existing-setup-file) +# - [Modifying an existing setup](@ref Modifying-an-existing-setup) + + +# ## Julia installation + +# Trixi.jl is compatible with the latest stable release of Julia. Additional details regarding Julia +# support can be found in the [`README.md`](https://github.com/trixi-framework/Trixi.jl#installation) +# file. The current default Julia installation is managed through `juliaup`. You may follow our +# concise installation guidelines for Windows, Linux, and MacOS provided below. In the event of any +# issues during the installation process, please consult the official +# [Julia installation instruction](https://julialang.org/downloads/). + + +# ### Windows + +# - Open a terminal by pressing `Win+r` and entering `cmd` in the opened window. +# - To install Julia, execute the following command in the terminal: +# ```shell +# winget install julia -s msstore +# ``` +# - Verify the successful installation of Julia by executing the following command in the terminal: +# ```shell +# julia +# ``` +# To exit Julia, execute `exit()` or press `Ctrl+d`. + + +# ### Linux and MacOS + +# - To install Julia, run the following command in a terminal: +# ```shell +# curl -fsSL https://install.julialang.org | sh +# ``` +# Follow the instructions displayed in the terminal during the installation process. +# - If an error occurs during the execution of the previous command, you may need to install +# `curl`. On Ubuntu-type systems, you can use the following command: +# ```shell +# sudo apt install curl +# ``` +# After installing `curl`, repeat the first step once more to proceed with Julia installation. +# - Verify the successful installation of Julia by executing the following command in the terminal: +# ```shell +# julia +# ``` +# To exit Julia, execute `exit()` or press `Ctrl+d`. + + +# ## Trixi.jl installation + +# Trixi.jl and its related tools are registered Julia packages, thus their installation +# happens inside Julia. +# For a smooth workflow experience with Trixi.jl, you need to install +# [Trixi.jl](https://github.com/trixi-framework/Trixi.jl), +# [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl), and +# [Plots.jl](https://github.com/JuliaPlots/Plots.jl). + +# - Open a terminal and start Julia. +# - Execute following commands: +# ```julia +# import Pkg +# Pkg.add(["OrdinaryDiffEq", "Plots", "Trixi"]) +# ``` + +# Now you have installed all these +# packages. [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl) provides time +# integration schemes used by Trixi.jl and [Plots.jl](https://github.com/JuliaPlots/Plots.jl) +# can be used to directly visualize Trixi.jl results from the Julia REPL. + + +# ## Usage + + +# ### Running a simulation + +# To get you started, Trixi.jl has a large set +# of [example setups](https://github.com/trixi-framework/Trixi.jl/tree/main/examples), that can be +# taken as a basis for your future investigations. In Trixi.jl, we call these setup files +# "elixirs", since they contain Julia code that takes parts of Trixi.jl and combines them into +# something new. + +# Any of the examples can be executed using the [`trixi_include`](@ref) +# function. `trixi_include(...)` expects +# a single string argument with a path to a file containing Julia code. +# For convenience, the [`examples_dir`](@ref) function returns a path to the +# [`examples`](https://github.com/trixi-framework/Trixi.jl/tree/main/examples) +# folder, which has been locally downloaded while installing Trixi.jl. +# `joinpath(...)` can be used to join path components into a full path. + +# Let's execute a short two-dimensional problem setup. It approximates the solution of +# the compressible Euler equations in 2D for an ideal gas ([`CompressibleEulerEquations2D`](@ref)) +# with a weak blast wave as the initial condition. + +# Start Julia in a terminal and execute the following code: + +# ```julia +# using Trixi, OrdinaryDiffEq +# trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_ec.jl")) +# ``` +using Trixi, OrdinaryDiffEq #hide #md +trixi_include(@__MODULE__,joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_ec.jl")) #hide #md + +# To analyze the result of the computation, we can use the Plots.jl package and the function +# `plot(...)`, which creates a graphical representation of the solution. `sol` is a variable +# defined in the executed example and it contains the solution at the final moment of the simulation. + +using Plots +plot(sol) + +# To obtain a list of all Trixi.jl elixirs execute +# [`get_examples`](@ref). It returns the paths to all example setups. + +get_examples() + +# Editing an existing elixir is the best way to start your first own investigation using Trixi.jl. + + +# ### Getting an existing setup file + +# To edit an existing elixir, you first have to find a suitable one and then copy it to a local +# folder. Let's have a look at how to download the `elixir_euler_ec.jl` elixir used in the previous +# section from the [Trixi.jl GitHub repository](https://github.com/trixi-framework/Trixi.jl). + +# - All examples are located inside +# the [`examples`](https://github.com/trixi-framework/Trixi.jl/tree/main/examples) folder. +# - Navigate to the +# file [`elixir_euler_ec.jl`](https://github.com/trixi-framework/Trixi.jl/blob/main/examples/tree_2d_dgsem/elixir_euler_ec.jl). +# - Right-click the `Raw` button on the right side of the webpage and choose `Save as...` +# (or `Save Link As...`). +# - Choose a folder and save the file. + + +# ### Modifying an existing setup + +# As an example, we will change the initial condition for calculations that occur in +# `elixir_euler_ec.jl`. In this example we consider the compressible Euler equations in two spatial +# dimensions, +# ```math +# \frac{\partial}{\partial t} +# \begin{pmatrix} +# \rho \\ \rho v_1 \\ \rho v_2 \\ \rho e +# \end{pmatrix} +# + +# \frac{\partial}{\partial x} +# \begin{pmatrix} +# \rho v_1 \\ \rho v_1^2 + p \\ \rho v_1 v_2 \\ (\rho e + p) v_1 +# \end{pmatrix} +# + +# \frac{\partial}{\partial y} +# \begin{pmatrix} +# \rho v_2 \\ \rho v_1 v_2 \\ \rho v_2^2 + p \\ (\rho e + p) v_2 +# \end{pmatrix} +# = +# \begin{pmatrix} +# 0 \\ 0 \\ 0 \\ 0 +# \end{pmatrix}, +# ``` +# for an ideal gas with the specific heat ratio ``\gamma``. +# Here, ``\rho`` is the density, ``v_1`` and ``v_2`` are the velocities, ``e`` is the specific +# total energy, and +# ```math +# p = (\gamma - 1) \left( \rho e - \frac{1}{2} \rho (v_1^2 + v_2^2) \right) +# ``` +# is the pressure. +# Initial conditions consist of initial values for ``\rho``, ``\rho v_1``, +# ``\rho v_2`` and ``\rho e``. +# One of the common initial conditions for the compressible Euler equations is a simple density +# wave. Let's implement it. + +# - Open the downloaded file `elixir_euler_ec.jl` with a text editor. +# - Go to the line with the following code: +# ```julia +# initial_condition = initial_condition_weak_blast_wave +# ``` +# Here, [`initial_condition_weak_blast_wave`](@ref) is used as the initial condition. +# - Comment out the line using the `#` symbol: +# ```julia +# # initial_condition = initial_condition_weak_blast_wave +# ``` +# - Now you can create your own initial conditions. Add the following code after the +# commented line: + +function initial_condition_density_waves(x, t, equations::CompressibleEulerEquations2D) + v1 = 0.1 # velocity along x-axis + v2 = 0.2 # velocity along y-axis + rho = 1.0 + 0.98 * sinpi(sum(x) - t * (v1 + v2)) # density wave profile + p = 20 # pressure + rho_e = p / (equations.gamma - 1) + 1/2 * rho * (v1^2 + v2^2) + return SVector(rho, rho*v1, rho*v2, rho_e) +end +initial_condition = initial_condition_density_waves + +# - Execute the following code one more time, but instead of `path/to/file` paste the path to the +# `elixir_euler_ec.jl` file that you just edited. +# ```julia +# using Trixi +# trixi_include(path/to/file) +# using Plots +# plot(sol) +# ``` +# Then you will obtain a new solution from running the simulation with a different initial +# condition. + +trixi_include(@__MODULE__,joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_ec.jl"), #hide #md + initial_condition=initial_condition) #hide #md +pd = PlotData2D(sol) #hide #md +p1 = plot(pd["rho"]) #hide #md +p2 = plot(pd["v1"], clim=(0.05, 0.15)) #hide #md +p3 = plot(pd["v2"], clim=(0.15, 0.25)) #hide #md +p4 = plot(pd["p"], clim=(10, 30)) #hide #md +plot(p1, p2, p3, p4) #hide #md + +# To get exactly the same picture execute the following. +# ```julia +# pd = PlotData2D(sol) +# p1 = plot(pd["rho"]) +# p2 = plot(pd["v1"], clim=(0.05, 0.15)) +# p3 = plot(pd["v2"], clim=(0.15, 0.25)) +# p4 = plot(pd["p"], clim=(10, 30)) +# plot(p1, p2, p3, p4) +# ``` + +# Feel free to make further changes to the initial condition to observe different solutions. + +# Now you are able to download, modify and execute simulation setups for Trixi.jl. To explore +# further details on setting up a new simulation with Trixi.jl, refer to the second part of +# the introduction titled [Create first setup](@ref create_first_setup). + +Sys.rm("out"; recursive=true, force=true) #hide #md \ No newline at end of file diff --git a/docs/literate/src/files/index.jl b/docs/literate/src/files/index.jl index 26637e5b24..1fc025d84d 100644 --- a/docs/literate/src/files/index.jl +++ b/docs/literate/src/files/index.jl @@ -14,20 +14,27 @@ # There are tutorials for the following topics: -# ### [1 Introduction to DG methods](@ref scalar_linear_advection_1d) +# ### [1 First steps in Trixi.jl](@ref getting_started) +#- +# This tutorial provides guidance for getting started with Trixi.jl, and Julia as well. It outlines +# the installation procedures for both Julia and Trixi.jl, the execution of Trixi.jl elixirs, the +# fundamental structure of a Trixi.jl setup, the visualization of results, and the development +# process for Trixi.jl. + +# ### [2 Introduction to DG methods](@ref scalar_linear_advection_1d) #- # This tutorial gives an introduction to discontinuous Galerkin (DG) methods with the example of the # scalar linear advection equation in 1D. Starting with some theoretical explanations, we first implement # a raw version of a discontinuous Galerkin spectral element method (DGSEM). Then, we will show how # to use features of Trixi.jl to achieve the same result. -# ### [2 DGSEM with flux differencing](@ref DGSEM_FluxDiff) +# ### [3 DGSEM with flux differencing](@ref DGSEM_FluxDiff) #- # To improve stability often the flux differencing formulation of the DGSEM (split form) is used. # We want to present the idea and formulation on a basic 1D level. Then, we show how this formulation # can be implemented in Trixi.jl and analyse entropy conservation for two different flux combinations. -# ### [3 Shock capturing with flux differencing and stage limiter](@ref shock_capturing) +# ### [4 Shock capturing with flux differencing and stage limiter](@ref shock_capturing) #- # Using the flux differencing formulation, a simple procedure to capture shocks is a hybrid blending # of a high-order DG method and a low-order subcell finite volume (FV) method. We present the idea on a @@ -35,20 +42,20 @@ # explained and added to an exemplary simulation of the Sedov blast wave with the 2D compressible Euler # equations. -# ### [4 Non-periodic boundary conditions](@ref non_periodic_boundaries) +# ### [5 Non-periodic boundary conditions](@ref non_periodic_boundaries) #- # Thus far, all examples used periodic boundaries. In Trixi.jl, you can also set up a simulation with # non-periodic boundaries. This tutorial presents the implementation of the classical Dirichlet # boundary condition with a following example. Then, other non-periodic boundaries are mentioned. -# ### [5 DG schemes via `DGMulti` solver](@ref DGMulti_1) +# ### [6 DG schemes via `DGMulti` solver](@ref DGMulti_1) #- # This tutorial is about the more general DG solver [`DGMulti`](@ref), introduced [here](@ref DGMulti). # We are showing some examples for this solver, for instance with discretization nodes by Gauss or # triangular elements. Moreover, we present a simple way to include pre-defined triangulate meshes for # non-Cartesian domains using the package [StartUpDG.jl](https://github.com/jlchan/StartUpDG.jl). -# ### [6 Other SBP schemes (FD, CGSEM) via `DGMulti` solver](@ref DGMulti_2) +# ### [7 Other SBP schemes (FD, CGSEM) via `DGMulti` solver](@ref DGMulti_2) #- # Supplementary to the previous tutorial about DG schemes via the `DGMulti` solver we now present # the possibility for `DGMulti` to use other SBP schemes via the package @@ -56,7 +63,7 @@ # For instance, we show how to set up a finite differences (FD) scheme and a continuous Galerkin # (CGSEM) method. -# ### [7 Upwind FD SBP schemes](@ref upwind_fdsbp) +# ### [8 Upwind FD SBP schemes](@ref upwind_fdsbp) #- # General SBP schemes can not only be used via the [`DGMulti`](@ref) solver but # also with a general `DG` solver. In particular, upwind finite difference SBP @@ -64,42 +71,42 @@ # schemes in the `DGMulti` framework, the interface is based on the package # [SummationByPartsOperators.jl](https://github.com/ranocha/SummationByPartsOperators.jl). -# ### [8 Adding a new scalar conservation law](@ref adding_new_scalar_equations) +# ### [9 Adding a new scalar conservation law](@ref adding_new_scalar_equations) #- # This tutorial explains how to add a new physics model using the example of the cubic conservation # law. First, we define the equation using a `struct` `CubicEquation` and the physical flux. Then, # the corresponding standard setup in Trixi.jl (`mesh`, `solver`, `semi` and `ode`) is implemented # and the ODE problem is solved by OrdinaryDiffEq's `solve` method. -# ### [9 Adding a non-conservative equation](@ref adding_nonconservative_equation) +# ### [10 Adding a non-conservative equation](@ref adding_nonconservative_equation) #- # In this part, another physics model is implemented, the nonconservative linear advection equation. # We run two different simulations with different levels of refinement and compare the resulting errors. -# ### [10 Parabolic terms](@ref parabolic_terms) +# ### [11 Parabolic terms](@ref parabolic_terms) #- # This tutorial describes how parabolic terms are implemented in Trixi.jl, e.g., # to solve the advection-diffusion equation. -# ### [11 Adding new parabolic terms](@ref adding_new_parabolic_terms) +# ### [12 Adding new parabolic terms](@ref adding_new_parabolic_terms) #- # This tutorial describes how new parabolic terms can be implemented using Trixi.jl. -# ### [12 Adaptive mesh refinement](@ref adaptive_mesh_refinement) +# ### [13 Adaptive mesh refinement](@ref adaptive_mesh_refinement) #- # Adaptive mesh refinement (AMR) helps to increase the accuracy in sensitive or turbolent regions while # not wasting resources for less interesting parts of the domain. This leads to much more efficient # simulations. This tutorial presents the implementation strategy of AMR in Trixi.jl, including the use of # different indicators and controllers. -# ### [13 Structured mesh with curvilinear mapping](@ref structured_mesh_mapping) +# ### [14 Structured mesh with curvilinear mapping](@ref structured_mesh_mapping) #- # In this tutorial, the use of Trixi.jl's structured curved mesh type [`StructuredMesh`](@ref) is explained. # We present the two basic option to initialize such a mesh. First, the curved domain boundaries # of a circular cylinder are set by explicit boundary functions. Then, a fully curved mesh is # defined by passing the transformation mapping. -# ### [14 Unstructured meshes with HOHQMesh.jl](@ref hohqmesh_tutorial) +# ### [15 Unstructured meshes with HOHQMesh.jl](@ref hohqmesh_tutorial) #- # The purpose of this tutorial is to demonstrate how to use the [`UnstructuredMesh2D`](@ref) # functionality of Trixi.jl. This begins by running and visualizing an available unstructured @@ -108,26 +115,26 @@ # software in the Trixi.jl ecosystem, and then run a simulation using Trixi.jl on said mesh. # In the end, the tutorial briefly explains how to simulate an example using AMR via `P4estMesh`. -# ### [15 P4est mesh from gmsh](@ref p4est_from_gmsh) +# ### [16 P4est mesh from gmsh](@ref p4est_from_gmsh) #- # This tutorial describes how to obtain a [`P4estMesh`](@ref) from an existing mesh generated # by [`gmsh`](https://gmsh.info/) or any other meshing software that can export to the Abaqus # input `.inp` format. The tutorial demonstrates how edges/faces can be associated with boundary conditions based on the physical nodesets. -# ### [16 Explicit time stepping](@ref time_stepping) +# ### [17 Explicit time stepping](@ref time_stepping) #- # This tutorial is about time integration using [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl). # It explains how to use their algorithms and presents two types of time step choices - with error-based # and CFL-based adaptive step size control. -# ### [17 Differentiable programming](@ref differentiable_programming) +# ### [18 Differentiable programming](@ref differentiable_programming) #- # This part deals with some basic differentiable programming topics. For example, a Jacobian, its # eigenvalues and a curve of total energy (through the simulation) are calculated and plotted for # a few semidiscretizations. Moreover, we calculate an example for propagating errors with Measurement.jl # at the end. -# ### [18 Custom semidiscretization](@ref custom_semidiscretization) +# ### [19 Custom semidiscretization](@ref custom_semidiscretization) #- # This tutorial describes the [semidiscretiations](@ref overview-semidiscretizations) of Trixi.jl # and explains how to extend them for custom tasks. diff --git a/docs/make.jl b/docs/make.jl index 7fce3b31e2..584f151b5f 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -48,6 +48,12 @@ end # "title" => ["subtitle 1" => ("folder 1", "filename 1.jl"), # "subtitle 2" => ("folder 2", "filename 2.jl")] files = [ + # Topic: introduction + "First steps in Trixi.jl" => [ + "Getting started" => ("first_steps", "getting_started.jl"), + "Create first setup" => ("first_steps", "create_first_setup.jl"), + "Changing Trixi.jl itself" => ("first_steps", "changing_trixi.jl"), + ], # Topic: DG semidiscretizations "Introduction to DG methods" => "scalar_linear_advection_1d.jl", "DGSEM with flux differencing" => "DGSEM_FluxDiff.jl", From fa18c8bab4c6855a989691ede7f8947c5e3ea945 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 7 Feb 2024 07:19:49 +0100 Subject: [PATCH 04/58] fix benchmarks configuration (#1837) --- benchmark/benchmarks.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl index a3f7d1d256..15d1d96c05 100644 --- a/benchmark/benchmarks.jl +++ b/benchmark/benchmarks.jl @@ -55,5 +55,5 @@ let SUITE["latency"]["euler_2d"] = @benchmarkable run( `$(Base.julia_cmd()) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_kelvin_helmholtz_instability.jl"), tspan=(0.0, 1.0e-10), save_restart=TrivialCallback(), save_solution=TrivialCallback())'`) seconds=60 SUITE["latency"]["mhd_2d"] = @benchmarkable run( - `$(Base.julia_cmd()) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_mhd_blast_wave.jl"), tspan=(0.0, 1.0e-10), save_restart=TrivialCallback(), save_solution=TrivialCallback())'`) seconds=60 + `$(Base.julia_cmd()) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_mhd_blast_wave.jl"), tspan=(0.0, 1.0e-10), save_solution=TrivialCallback())'`) seconds=60 end From 8c6d9bc727219e470f56f3317997f17eb5615842 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 7 Feb 2024 07:27:40 +0100 Subject: [PATCH 05/58] Make CI fail if Codecov fails (#1834) --- .github/workflows/ci.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 2e388366fc..86bd3f836e 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -134,7 +134,8 @@ jobs: file: ./lcov.info flags: unittests name: codecov-umbrella - fail_ci_if_error: false + fail_ci_if_error: true + verbose: true token: ${{ secrets.CODECOV_TOKEN }} # The standard setup of Coveralls is just annoying for parallel builds, see, e.g., # https://github.com/trixi-framework/Trixi.jl/issues/691 From 4fb8160c397df922e8fc4906a5c8f92225c21ecd Mon Sep 17 00:00:00 2001 From: Michael Schlottke-Lakemper Date: Wed, 7 Feb 2024 08:43:50 +0100 Subject: [PATCH 06/58] Update compat bounds for Makie, CairoMakie (#1836) --- Project.toml | 2 +- docs/Project.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/Project.toml b/Project.toml index e99b08e0e8..9b624c7733 100644 --- a/Project.toml +++ b/Project.toml @@ -68,7 +68,7 @@ LinearAlgebra = "1" LinearMaps = "2.7, 3.0" LoopVectorization = "0.12.118" MPI = "0.20" -Makie = "0.19" +Makie = "0.19, 0.20" MuladdMacro = "0.2.2" Octavian = "0.3.5" OffsetArrays = "1.3" diff --git a/docs/Project.toml b/docs/Project.toml index 3a091f5b4f..cc48aeb8ed 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -12,7 +12,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Trixi2Vtk = "bc1476a1-1ca6-4cc3-950b-c312b255ff95" [compat] -CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10" +CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10, 0.11" Documenter = "1" ForwardDiff = "0.10" HOHQMesh = "0.1, 0.2" From 3e6872bd486a4cfcc94bd2c31b2fe8510670a7a2 Mon Sep 17 00:00:00 2001 From: Michael Schlottke-Lakemper Date: Wed, 7 Feb 2024 09:06:07 +0100 Subject: [PATCH 07/58] Enable CI testing on Apple Silicon (#1830) * Enable CI testing on Apple Silicon * Use `threaded` instead of `threaded_legacy` --- .github/workflows/ci.yml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 86bd3f836e..9d398f187b 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -101,6 +101,10 @@ jobs: os: windows-latest arch: x64 trixi_test: threaded + - version: '1.9' + os: macos-14 + arch: arm64 + trixi_test: threaded steps: - uses: actions/checkout@v4 - uses: julia-actions/setup-julia@v1 From fe6a5276e1ea92a23d4897ca33a057288f277e8f Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 7 Feb 2024 10:37:22 +0100 Subject: [PATCH 08/58] coverallsapp at v2 (#1777) Co-authored-by: Michael Schlottke-Lakemper --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 9d398f187b..a7dfe033a9 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -200,7 +200,7 @@ jobs: coverage = merge_coverage_counts(coverage) @show covered_lines, total_lines = get_summary(coverage) LCOV.writefile("./lcov.info", coverage) - - uses: coverallsapp/github-action@master + - uses: coverallsapp/github-action@v2 with: github-token: ${{ secrets.GITHUB_TOKEN }} path-to-lcov: ./lcov.info From 4369c1c00b3e44de43bba95adeedfb37fd09551c Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 14 Feb 2024 07:56:08 +0100 Subject: [PATCH 09/58] Fix formatter to older version (#1843) * Fix formatter to older version * fix JuliaFormatter version also in utils folder * Update utils/trixi-format-file.jl Co-authored-by: Arpit Babbar * Update utils/trixi-format.jl Co-authored-by: Arpit Babbar * Apply suggestions from code review Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * format --------- Co-authored-by: Arpit Babbar Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> --- .github/workflows/FormatCheck.yml | 2 +- utils/trixi-format-file.jl | 3 ++- utils/trixi-format.jl | 3 ++- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/.github/workflows/FormatCheck.yml b/.github/workflows/FormatCheck.yml index a733cb7cc2..7297f1c3ff 100644 --- a/.github/workflows/FormatCheck.yml +++ b/.github/workflows/FormatCheck.yml @@ -29,7 +29,7 @@ jobs: # TODO: Change the call below to # format(".") run: | - julia -e 'using Pkg; Pkg.add(PackageSpec(name = "JuliaFormatter"))' + julia -e 'using Pkg; Pkg.add(PackageSpec(name = "JuliaFormatter", version="1.0.45"))' julia -e 'using JuliaFormatter; format(["benchmark", "examples", "ext", "src", "test", "utils"])' - name: Format check run: | diff --git a/utils/trixi-format-file.jl b/utils/trixi-format-file.jl index c4d8e7c903..9b9a0e4949 100755 --- a/utils/trixi-format-file.jl +++ b/utils/trixi-format-file.jl @@ -2,7 +2,8 @@ using Pkg Pkg.activate(; temp = true, io = devnull) -Pkg.add("JuliaFormatter"; preserve = PRESERVE_ALL, io = devnull) +Pkg.add(PackageSpec(name = "JuliaFormatter", version = "1.0.45"); preserve = PRESERVE_ALL, + io = devnull) using JuliaFormatter: format_file diff --git a/utils/trixi-format.jl b/utils/trixi-format.jl index d1e7efa656..63f1407880 100755 --- a/utils/trixi-format.jl +++ b/utils/trixi-format.jl @@ -2,7 +2,8 @@ using Pkg Pkg.activate(; temp = true, io = devnull) -Pkg.add("JuliaFormatter"; preserve = PRESERVE_ALL, io = devnull) +Pkg.add(PackageSpec(name = "JuliaFormatter", version = "1.0.45"); preserve = PRESERVE_ALL, + io = devnull) using JuliaFormatter: format From 4f33837e3c95b5af21057850c30ef603a9191d86 Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Thu, 15 Feb 2024 08:50:30 +0100 Subject: [PATCH 10/58] Use `trixi_include` from TrixiBase.jl (#1832) * Use `trixi_include` from TrixiBase.jl * Add compat entry for TrixiBase.jl * Remove unused functions * Use `TrixiBase.walkexpr` * Fix docs * Add TrixiBase.jl API reference * Add TrixiBase to docs dependencies * Import `TrixiBase` * Remove `find_assignment` * Add TrixiBase to makedocs modules * Apply suggestions from code review Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> --------- Co-authored-by: Hendrik Ranocha Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> --- Project.toml | 2 + docs/Project.toml | 2 + docs/make.jl | 4 +- docs/src/conventions.md | 10 +- docs/src/reference-trixibase.md | 9 ++ src/Trixi.jl | 3 +- src/auxiliary/precompile.jl | 3 - src/auxiliary/special_elixirs.jl | 158 +------------------------------ src/callbacks_step/trivial.jl | 4 +- test/test_unit.jl | 97 ------------------- 10 files changed, 31 insertions(+), 261 deletions(-) create mode 100644 docs/src/reference-trixibase.md diff --git a/Project.toml b/Project.toml index 9b624c7733..b4a06a7068 100644 --- a/Project.toml +++ b/Project.toml @@ -45,6 +45,7 @@ TimerOutputs = "a759f4b9-e2f1-59dc-863e-4aeb61b1ea8f" Triangulate = "f7e6ffb2-c36d-4f8f-a77e-16e897189344" TriplotBase = "981d1d27-644d-49a2-9326-4793e63143c3" TriplotRecipes = "808ab39a-a642-4abf-81ff-4cb34ebbffa3" +TrixiBase = "9a0f1c46-06d5-4909-a5a3-ce25d3fa3284" [weakdeps] Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a" @@ -95,6 +96,7 @@ TimerOutputs = "0.5.7" Triangulate = "2.0" TriplotBase = "0.1" TriplotRecipes = "0.1" +TrixiBase = "0.1.1" julia = "1.8" [extras] diff --git a/docs/Project.toml b/docs/Project.toml index cc48aeb8ed..3b8d169fdb 100644 --- a/docs/Project.toml +++ b/docs/Project.toml @@ -10,6 +10,7 @@ OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed" Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Trixi2Vtk = "bc1476a1-1ca6-4cc3-950b-c312b255ff95" +TrixiBase = "9a0f1c46-06d5-4909-a5a3-ce25d3fa3284" [compat] CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10, 0.11" @@ -23,3 +24,4 @@ OrdinaryDiffEq = "6.49.1" Plots = "1.9" Test = "1" Trixi2Vtk = "0.3" +TrixiBase = "0.1.1" diff --git a/docs/make.jl b/docs/make.jl index 584f151b5f..946b803b71 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -8,6 +8,7 @@ end using Trixi using Trixi2Vtk +using TrixiBase # Get Trixi.jl root directory trixi_root_dir = dirname(@__DIR__) @@ -82,7 +83,7 @@ tutorials = create_tutorials(files) # Make documentation makedocs( # Specify modules for which docstrings should be shown - modules = [Trixi, Trixi2Vtk], + modules = [Trixi, TrixiBase, Trixi2Vtk], # Set sitename to Trixi.jl sitename = "Trixi.jl", # Provide additional formatting options @@ -128,6 +129,7 @@ makedocs( "Troubleshooting and FAQ" => "troubleshooting.md", "Reference" => [ "Trixi.jl" => "reference-trixi.md", + "TrixiBase.jl" => "reference-trixibase.md", "Trixi2Vtk.jl" => "reference-trixi2vtk.md" ], "Authors" => "authors.md", diff --git a/docs/src/conventions.md b/docs/src/conventions.md index dab1b8533a..4f9e0ec4e6 100644 --- a/docs/src/conventions.md +++ b/docs/src/conventions.md @@ -47,10 +47,12 @@ Trixi.jl is distributed with several examples in the form of elixirs, small Julia scripts containing everything to set up and run a simulation. Working interactively from the Julia REPL with these scripts can be quite convenient while for exploratory research and development of Trixi.jl. For example, you -can use the convenience function [`trixi_include`](@ref) to `include` an elixir -with some modified arguments. To enable this, it is helpful to use a consistent -naming scheme in elixirs, since [`trixi_include`](@ref) can only perform simple -replacements. Some standard variables names are +can use the convenience function +[`trixi_include`](@ref) +to `include` an elixir with some modified arguments. To enable this, it is +helpful to use a consistent naming scheme in elixirs, since +[`trixi_include`](@ref) +can only perform simple replacements. Some standard variables names are - `polydeg` for the polynomial degree of a solver - `surface_flux` for the numerical flux at surfaces diff --git a/docs/src/reference-trixibase.md b/docs/src/reference-trixibase.md new file mode 100644 index 0000000000..c7a970f88e --- /dev/null +++ b/docs/src/reference-trixibase.md @@ -0,0 +1,9 @@ +# TrixiBase.jl API + +```@meta +CurrentModule = TrixiBase +``` + +```@autodocs +Modules = [TrixiBase] +``` diff --git a/src/Trixi.jl b/src/Trixi.jl index 8d74fbc973..ea72fbc915 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -70,6 +70,7 @@ using Triangulate: Triangulate, TriangulateIO, triangulate export TriangulateIO # for type parameter in DGMultiMesh using TriplotBase: TriplotBase using TriplotRecipes: DGTriPseudocolor +@reexport using TrixiBase: TrixiBase, trixi_include @reexport using SimpleUnPack: @unpack using SimpleUnPack: @pack! using DataStructures: BinaryHeap, FasterForward, extract_all! @@ -129,7 +130,7 @@ include("callbacks_step/callbacks_step.jl") include("callbacks_stage/callbacks_stage.jl") include("semidiscretization/semidiscretization_euler_gravity.jl") -# `trixi_include` and special elixirs such as `convergence_test` +# Special elixirs such as `convergence_test` include("auxiliary/special_elixirs.jl") # Plot recipes and conversion functions to visualize results with Plots.jl diff --git a/src/auxiliary/precompile.jl b/src/auxiliary/precompile.jl index 9cec502f6c..4d5399b5ba 100644 --- a/src/auxiliary/precompile.jl +++ b/src/auxiliary/precompile.jl @@ -577,9 +577,6 @@ function _precompile_manual_() @assert Base.precompile(Tuple{typeof(show), Base.TTY, lbm_collision_callback_type}) @assert Base.precompile(Tuple{typeof(show), IOContext{Base.TTY}, MIME"text/plain", lbm_collision_callback_type}) - - # infrastructure, special elixirs - @assert Base.precompile(Tuple{typeof(trixi_include), String}) end @assert Base.precompile(Tuple{typeof(init_mpi)}) diff --git a/src/auxiliary/special_elixirs.jl b/src/auxiliary/special_elixirs.jl index 5fdd9aea0c..d71a27aa96 100644 --- a/src/auxiliary/special_elixirs.jl +++ b/src/auxiliary/special_elixirs.jl @@ -5,58 +5,6 @@ @muladd begin #! format: noindent -# Note: We can't call the method below `Trixi.include` since that is created automatically -# inside `module Trixi` to `include` source files and evaluate them within the global scope -# of `Trixi`. However, users will want to evaluate in the global scope of `Main` or something -# similar to manage dependencies on their own. -""" - trixi_include([mod::Module=Main,] elixir::AbstractString; kwargs...) - -`include` the file `elixir` and evaluate its content in the global scope of module `mod`. -You can override specific assignments in `elixir` by supplying keyword arguments. -It's basic purpose is to make it easier to modify some parameters while running Trixi.jl from the -REPL. Additionally, this is used in tests to reduce the computational burden for CI while still -providing examples with sensible default values for users. - -Before replacing assignments in `elixir`, the keyword argument `maxiters` is inserted -into calls to `solve` and `Trixi.solve` with it's default value used in the SciML ecosystem -for ODEs, see the "Miscellaneous" section of the -[documentation](https://docs.sciml.ai/DiffEqDocs/stable/basics/common_solver_opts/). - -# Examples - -```jldoctest -julia> redirect_stdout(devnull) do - trixi_include(@__MODULE__, joinpath(examples_dir(), "tree_1d_dgsem", "elixir_advection_extended.jl"), - tspan=(0.0, 0.1)) - sol.t[end] - end -[ Info: You just called `trixi_include`. Julia may now compile the code, please be patient. -0.1 -``` -""" -function trixi_include(mod::Module, elixir::AbstractString; kwargs...) - # Check that all kwargs exist as assignments - code = read(elixir, String) - expr = Meta.parse("begin \n$code \nend") - expr = insert_maxiters(expr) - - for (key, val) in kwargs - # This will throw an error when `key` is not found - find_assignment(expr, key) - end - - # Print information on potential wait time only in non-parallel case - if !mpi_isparallel() - @info "You just called `trixi_include`. Julia may now compile the code, please be patient." - end - Base.include(ex -> replace_assignments(insert_maxiters(ex); kwargs...), mod, elixir) -end - -function trixi_include(elixir::AbstractString; kwargs...) - trixi_include(Main, elixir; kwargs...) -end - """ convergence_test([mod::Module=Main,] elixir::AbstractString, iterations; kwargs...) @@ -177,112 +125,15 @@ end # Helper methods used in the functions defined above -# Apply the function `f` to `expr` and all sub-expressions recursively. -walkexpr(f, expr::Expr) = f(Expr(expr.head, (walkexpr(f, arg) for arg in expr.args)...)) -walkexpr(f, x) = f(x) - -# Insert the keyword argument `maxiters` into calls to `solve` and `Trixi.solve` -# with default value `10^5` if it is not already present. -function insert_maxiters(expr) - maxiters_default = 10^5 - - expr = walkexpr(expr) do x - if x isa Expr - is_plain_solve = x.head === Symbol("call") && x.args[1] === Symbol("solve") - is_trixi_solve = (x.head === Symbol("call") && x.args[1] isa Expr && - x.args[1].head === Symbol(".") && - x.args[1].args[1] === Symbol("Trixi") && - x.args[1].args[2] isa QuoteNode && - x.args[1].args[2].value === Symbol("solve")) - - if is_plain_solve || is_trixi_solve - # Do nothing if `maxiters` is already set as keyword argument... - for arg in x.args - # This detects the case where `maxiters` is set as keyword argument - # without or before a semicolon - if (arg isa Expr && arg.head === Symbol("kw") && - arg.args[1] === Symbol("maxiters")) - return x - end - - # This detects the case where maxiters is set as keyword argument - # after a semicolon - if (arg isa Expr && arg.head === Symbol("parameters")) - # We need to check each keyword argument listed here - for nested_arg in arg.args - if (nested_arg isa Expr && - nested_arg.head === Symbol("kw") && - nested_arg.args[1] === Symbol("maxiters")) - return x - end - end - end - end - - # ...and insert it otherwise. - push!(x.args, Expr(Symbol("kw"), Symbol("maxiters"), maxiters_default)) - end - end - return x - end - - return expr -end - -# Replace assignments to `key` in `expr` by `key = val` for all `(key,val)` in `kwargs`. -function replace_assignments(expr; kwargs...) - # replace explicit and keyword assignments - expr = walkexpr(expr) do x - if x isa Expr - for (key, val) in kwargs - if (x.head === Symbol("=") || x.head === :kw) && - x.args[1] === Symbol(key) - x.args[2] = :($val) - # dump(x) - end - end - end - return x - end - - return expr -end - -# find a (keyword or common) assignment to `destination` in `expr` -# and return the assigned value -function find_assignment(expr, destination) - # declare result to be able to assign to it in the closure - local result - found = false - - # find explicit and keyword assignments - walkexpr(expr) do x - if x isa Expr - if (x.head === Symbol("=") || x.head === :kw) && - x.args[1] === Symbol(destination) - result = x.args[2] - found = true - # dump(x) - end - end - return x - end - - if !found - throw(ArgumentError("assignment `$destination` not found in expression")) - end - - result -end - -# searches the parameter that specifies the mesh reslution in the elixir +# Searches for the assignment that specifies the mesh resolution in the elixir function extract_initial_resolution(elixir, kwargs) code = read(elixir, String) expr = Meta.parse("begin \n$code \nend") try # get the initial_refinement_level from the elixir - initial_refinement_level = find_assignment(expr, :initial_refinement_level) + initial_refinement_level = TrixiBase.find_assignment(expr, + :initial_refinement_level) if haskey(kwargs, :initial_refinement_level) return kwargs[:initial_refinement_level] @@ -294,7 +145,8 @@ function extract_initial_resolution(elixir, kwargs) if isa(e, ArgumentError) try # get cells_per_dimension from the elixir - cells_per_dimension = eval(find_assignment(expr, :cells_per_dimension)) + cells_per_dimension = eval(TrixiBase.find_assignment(expr, + :cells_per_dimension)) if haskey(kwargs, :cells_per_dimension) return kwargs[:cells_per_dimension] diff --git a/src/callbacks_step/trivial.jl b/src/callbacks_step/trivial.jl index a55b7d85b1..fb93cf96c0 100644 --- a/src/callbacks_step/trivial.jl +++ b/src/callbacks_step/trivial.jl @@ -8,8 +8,8 @@ """ TrivialCallback() -A callback that does nothing. This can be useful to disable some callbacks -easily via [`trixi_include`](@ref). +A callback that does nothing. This can be useful to disable some callbacks easily via +[`trixi_include`](@ref). """ function TrivialCallback() DiscreteCallback(trivial_callback, trivial_callback, diff --git a/test/test_unit.jl b/test/test_unit.jl index 7943d952f7..3b8dc3c433 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -1529,103 +1529,6 @@ end @test mesh.boundary_faces[:entire_boundary] == [1, 2] end - -@testset "trixi_include" begin - @trixi_testset "Basic" begin - example = """ - x = 4 - """ - - filename = tempname() - try - open(filename, "w") do file - write(file, example) - end - - # Use `@trixi_testset`, which wraps code in a temporary module, and call - # `trixi_include` with `@__MODULE__` in order to isolate this test. - @test_warn "You just called" trixi_include(@__MODULE__, filename) - @test @isdefined x - @test x == 4 - - @test_warn "You just called" trixi_include(@__MODULE__, filename, x = 7) - @test x == 7 - - @test_throws "assignment `y` not found in expression" trixi_include(@__MODULE__, - filename, - y = 3) - finally - rm(filename, force = true) - end - end - - @trixi_testset "With `solve` Without `maxiters`" begin - # `trixi_include` assumes this to be the `solve` function of OrdinaryDiffEq, - # and therefore tries to insert the kwarg `maxiters`, which will fail here. - example = """ - solve() = 0 - x = solve() - """ - - filename = tempname() - try - open(filename, "w") do file - write(file, example) - end - - # Use `@trixi_testset`, which wraps code in a temporary module, and call - # `trixi_include` with `@__MODULE__` in order to isolate this test. - @test_throws "no method matching solve(; maxiters::Int64)" trixi_include(@__MODULE__, - filename) - - @test_throws "no method matching solve(; maxiters::Int64)" trixi_include(@__MODULE__, - filename, - maxiters = 3) - finally - rm(filename, force = true) - end - end - - @trixi_testset "With `solve` with `maxiters`" begin - # We need another example file that we include with `Base.include` first, in order to - # define the `solve` method without `trixi_include` trying to insert `maxiters` kwargs. - # Then, we can test that `trixi_include` inserts the kwarg in the `solve()` call. - example1 = """ - solve(; maxiters=0) = maxiters - """ - - example2 = """ - x = solve() - """ - - filename1 = tempname() - filename2 = tempname() - try - open(filename1, "w") do file - write(file, example1) - end - open(filename2, "w") do file - write(file, example2) - end - - # Use `@trixi_testset`, which wraps code in a temporary module, and call - # `Base.include` and `trixi_include` with `@__MODULE__` in order to isolate this test. - Base.include(@__MODULE__, filename1) - @test_warn "You just called" trixi_include(@__MODULE__, filename2) - @test @isdefined x - # This is the default `maxiters` inserted by `trixi_include` - @test x == 10^5 - - @test_warn "You just called" trixi_include(@__MODULE__, filename2, - maxiters = 7) - # Test that `maxiters` got overwritten - @test x == 7 - finally - rm(filename1, force = true) - rm(filename2, force = true) - end - end -end end end #module From 08c6034139451ba03be2abb6748bcd402f151b5d Mon Sep 17 00:00:00 2001 From: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Date: Thu, 15 Feb 2024 21:37:50 +0100 Subject: [PATCH 11/58] Don't export `TrixiBase` (#1846) --- src/Trixi.jl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/Trixi.jl b/src/Trixi.jl index ea72fbc915..8ab8085d4e 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -70,7 +70,8 @@ using Triangulate: Triangulate, TriangulateIO, triangulate export TriangulateIO # for type parameter in DGMultiMesh using TriplotBase: TriplotBase using TriplotRecipes: DGTriPseudocolor -@reexport using TrixiBase: TrixiBase, trixi_include +@reexport using TrixiBase: trixi_include +using TrixiBase: TrixiBase @reexport using SimpleUnPack: @unpack using SimpleUnPack: @pack! using DataStructures: BinaryHeap, FasterForward, extract_all! From a872bc55e545baf36be91a949940434ae89f3426 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 16 Feb 2024 06:24:33 +0100 Subject: [PATCH 12/58] set version to v0.6.9 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index b4a06a7068..10bf79cf9a 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.9-pre" +version = "0.6.9" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 29e173eb5f955771755cc28342e07c9903204327 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 16 Feb 2024 06:24:52 +0100 Subject: [PATCH 13/58] set development version to v0.6.10-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 10bf79cf9a..af3e6b4d07 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.9" +version = "0.6.10-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 3c9374bb6c336be32c56d5c13b213ae3900269a3 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Tue, 20 Feb 2024 16:15:48 +0100 Subject: [PATCH 14/58] Classic LWR traffic flow (#1840) * Add classic LWR traffic flow model to Trixi * fmt * shorten * Update examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl * Update examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl * rm IC const, fmt * add davis wave speed estimate for upcoming change * news and md * Use euler * fmt * Update examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl * Update examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl * Update NEWS.md Co-authored-by: Andrew Winters * Andrew's suggestions * Apply suggestions from code review Co-authored-by: Hendrik Ranocha * add domain of u * back to carpenter kennedy * Update NEWS.md Co-authored-by: Andrew Winters --------- Co-authored-by: Andrew Winters Co-authored-by: Hendrik Ranocha --- NEWS.md | 1 + README.md | 2 +- .../elixir_traffic_flow_lwr_greenlight.jl | 80 ++++++++++++ .../elixir_traffic_flow_lwr_convergence.jl | 54 ++++++++ .../elixir_traffic_flow_lwr_trafficjam.jl | 82 +++++++++++++ src/Trixi.jl | 3 +- src/equations/equations.jl | 5 + src/equations/traffic_flow_lwr_1d.jl | 116 ++++++++++++++++++ test/test_structured_1d.jl | 15 +++ test/test_tree_1d.jl | 3 + test/test_tree_1d_traffic_flow_lwr.jl | 42 +++++++ 11 files changed, 401 insertions(+), 2 deletions(-) create mode 100644 examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl create mode 100644 examples/tree_1d_dgsem/elixir_traffic_flow_lwr_convergence.jl create mode 100644 examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl create mode 100644 src/equations/traffic_flow_lwr_1d.jl create mode 100644 test/test_tree_1d_traffic_flow_lwr.jl diff --git a/NEWS.md b/NEWS.md index 02a723fca4..feccd1f985 100644 --- a/NEWS.md +++ b/NEWS.md @@ -12,6 +12,7 @@ for human readability. - Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, can now be digested by Trixi in 2D and 3D. - Subcell (positivity) limiting support for nonlinear variables in 2D for `TreeMesh` +- Added Lighthill-Whitham-Richards (LWR) traffic model ## Changes when updating to v0.6 from v0.5.x diff --git a/README.md b/README.md index c531ab4d1a..71370d3478 100644 --- a/README.md +++ b/README.md @@ -53,7 +53,7 @@ installation and postprocessing procedures. Its features include: * Hyperbolic diffusion equations for elliptic problems * Lattice-Boltzmann equations (D2Q9 and D3Q27 schemes) * Shallow water equations - * Several scalar conservation laws (e.g., linear advection, Burgers' equation) + * Several scalar conservation laws (e.g., linear advection, Burgers' equation, LWR traffic flow) * Multi-physics simulations * [Self-gravitating gas dynamics](https://github.com/trixi-framework/paper-self-gravitating-gas-dynamics) * Shared-memory parallelization via multithreading diff --git a/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl b/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl new file mode 100644 index 0000000000..e5badf1445 --- /dev/null +++ b/examples/structured_1d_dgsem/elixir_traffic_flow_lwr_greenlight.jl @@ -0,0 +1,80 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### + +equations = TrafficFlowLWREquations1D() + +solver = DGSEM(polydeg = 3, surface_flux = FluxHLL(min_max_speed_davis)) + +coordinates_min = (-1.0,) # minimum coordinate +coordinates_max = (1.0,) # maximum coordinate +cells_per_dimension = (64,) + +mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max, + periodicity = false) + +# Example inspired from http://www.clawpack.org/riemann_book/html/Traffic_flow.html#Example:-green-light +# Green light that at x = 0 which switches at t = 0 from red to green. +# To the left there are cars bumper to bumper, to the right there are no cars. +function initial_condition_greenlight(x, t, equation::TrafficFlowLWREquations1D) + scalar = x[1] < 0.0 ? 1.0 : 0.0 + + return SVector(scalar) +end + +############################################################################### +# Specify non-periodic boundary conditions + +# Assume that there are always cars waiting at the left +function inflow(x, t, equations::TrafficFlowLWREquations1D) + return initial_condition_greenlight(coordinates_min, t, equations) +end +boundary_condition_inflow = BoundaryConditionDirichlet(inflow) + +# Cars may leave the modeled domain +function boundary_condition_outflow(u_inner, orientation, normal_direction, x, t, + surface_flux_function, + equations::TrafficFlowLWREquations1D) + # Calculate the boundary flux entirely from the internal solution state + flux = Trixi.flux(u_inner, orientation, equations) + + return flux +end + +boundary_conditions = (x_neg = boundary_condition_inflow, + x_pos = boundary_condition_outflow) + +initial_condition = initial_condition_greenlight + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.5) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.2) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), + dt = 42, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_convergence.jl b/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_convergence.jl new file mode 100644 index 0000000000..59258018f8 --- /dev/null +++ b/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_convergence.jl @@ -0,0 +1,54 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### + +equations = TrafficFlowLWREquations1D() + +# Use first order finite volume to prevent oscillations at the shock +solver = DGSEM(polydeg = 3, surface_flux = flux_hll) + +coordinates_min = 0.0 # minimum coordinate +coordinates_max = 2.0 # maximum coordinate + +# Create a uniformly refined mesh with periodic boundaries +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 30_000) + +############################################################################### +# Specify non-periodic boundary conditions + +initial_condition = initial_condition_convergence_test +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_terms_convergence_test) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 2.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.6) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, CarpenterKennedy2N54(), + dt = 42, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl b/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl new file mode 100644 index 0000000000..d3a17b513f --- /dev/null +++ b/examples/tree_1d_dgsem/elixir_traffic_flow_lwr_trafficjam.jl @@ -0,0 +1,82 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### + +equations = TrafficFlowLWREquations1D() + +# Use first order finite volume to prevent oscillations at the shock +solver = DGSEM(polydeg = 0, surface_flux = flux_lax_friedrichs) + +coordinates_min = -1.0 # minimum coordinate +coordinates_max = 1.0 # maximum coordinate + +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 9, + n_cells_max = 30_000, + periodicity = false) + +# Example taken from http://www.clawpack.org/riemann_book/html/Traffic_flow.html#Example:-Traffic-jam +# Discontinuous initial condition (Riemann Problem) leading to a shock that moves to the left. +# The shock corresponds to the traffic congestion. +function initial_condition_traffic_jam(x, t, equation::TrafficFlowLWREquations1D) + scalar = x[1] < 0.0 ? 0.5 : 1.0 + + return SVector(scalar) +end + +############################################################################### +# Specify non-periodic boundary conditions + +function outflow(x, t, equations::TrafficFlowLWREquations1D) + return initial_condition_traffic_jam(coordinates_min, t, equations) +end +boundary_condition_outflow = BoundaryConditionDirichlet(outflow) + +function boundary_condition_inflow(u_inner, orientation, normal_direction, x, t, + surface_flux_function, + equations::TrafficFlowLWREquations1D) + # Calculate the boundary flux entirely from the internal solution state + flux = Trixi.flux(u_inner, orientation, equations) + + return flux +end + +boundary_conditions = (x_neg = boundary_condition_outflow, + x_pos = boundary_condition_inflow) + +initial_condition = initial_condition_traffic_jam + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 0.5) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +stepsize_callback = StepsizeCallback(cfl = 1.0) + +callbacks = CallbackSet(summary_callback, + analysis_callback, alive_callback, + stepsize_callback) + +############################################################################### +# run the simulation + +# Note: Be careful when increasing the polynomial degree and switching from first order finite volume +# to some actual DG method - in that case, you should also exchange the ODE solver. +sol = solve(ode, Euler(), + dt = 42, # solve needs some value here but it will be overwritten by the stepsize_callback + save_everystep = false, callback = callbacks); + +summary_callback() # print the timer summary diff --git a/src/Trixi.jl b/src/Trixi.jl index 8ab8085d4e..bf0986084a 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -157,7 +157,8 @@ export AcousticPerturbationEquations2D, ShallowWaterTwoLayerEquations1D, ShallowWaterTwoLayerEquations2D, ShallowWaterEquationsQuasi1D, LinearizedEulerEquations2D, - PolytropicEulerEquations2D + PolytropicEulerEquations2D, + TrafficFlowLWREquations1D export LaplaceDiffusion1D, LaplaceDiffusion2D, LaplaceDiffusion3D, CompressibleNavierStokesDiffusion1D, CompressibleNavierStokesDiffusion2D, diff --git a/src/equations/equations.jl b/src/equations/equations.jl index c041bf117b..65875a2a7e 100644 --- a/src/equations/equations.jl +++ b/src/equations/equations.jl @@ -507,4 +507,9 @@ include("linearized_euler_2d.jl") abstract type AbstractEquationsParabolic{NDIMS, NVARS, GradientVariables} <: AbstractEquations{NDIMS, NVARS} end + +# Lighthill-Witham-Richards (LWR) traffic flow model +abstract type AbstractTrafficFlowLWREquations{NDIMS, NVARS} <: + AbstractEquations{NDIMS, NVARS} end +include("traffic_flow_lwr_1d.jl") end # @muladd diff --git a/src/equations/traffic_flow_lwr_1d.jl b/src/equations/traffic_flow_lwr_1d.jl new file mode 100644 index 0000000000..a4d2613a5c --- /dev/null +++ b/src/equations/traffic_flow_lwr_1d.jl @@ -0,0 +1,116 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +@doc raw""" + TrafficFlowLWREquations1D + +The classic Lighthill-Witham Richards (LWR) model for 1D traffic flow. +The car density is denoted by $u \in [0, 1]$ and +the maximum possible speed (e.g. due to speed limits) is $v_{\text{max}}$. +```math +\partial_t u + v_{\text{max}} \partial_1 [u (1 - u)] = 0 +``` +For more details see e.g. Section 11.1 of +- Randall LeVeque (2002) +Finite Volume Methods for Hyperbolic Problems +[DOI: 10.1017/CBO9780511791253]https://doi.org/10.1017/CBO9780511791253 +""" +struct TrafficFlowLWREquations1D{RealT <: Real} <: AbstractTrafficFlowLWREquations{1, 1} + v_max::RealT + + function TrafficFlowLWREquations1D(v_max = 1.0) + new{typeof(v_max)}(v_max) + end +end + +varnames(::typeof(cons2cons), ::TrafficFlowLWREquations1D) = ("car-density",) +varnames(::typeof(cons2prim), ::TrafficFlowLWREquations1D) = ("car-density",) + +""" + initial_condition_convergence_test(x, t, equations::TrafficFlowLWREquations1D) + +A smooth initial condition used for convergence tests. +""" +function initial_condition_convergence_test(x, t, equations::TrafficFlowLWREquations1D) + c = 2.0 + A = 1.0 + L = 1 + f = 1 / L + omega = 2 * pi * f + scalar = c + A * sin(omega * (x[1] - t)) + + return SVector(scalar) +end + +""" + source_terms_convergence_test(u, x, t, equations::TrafficFlowLWREquations1D) + +Source terms used for convergence tests in combination with +[`initial_condition_convergence_test`](@ref). +""" +@inline function source_terms_convergence_test(u, x, t, + equations::TrafficFlowLWREquations1D) + # Same settings as in `initial_condition` + c = 2.0 + A = 1.0 + L = 1 + f = 1 / L + omega = 2 * pi * f + du = omega * cos(omega * (x[1] - t)) * + (-1 - equations.v_max * (2 * sin(omega * (x[1] - t)) + 3)) + + return SVector(du) +end + +# Calculate 1D flux in for a single point +@inline function flux(u, orientation::Integer, equations::TrafficFlowLWREquations1D) + return SVector(equations.v_max * u[1] * (1.0 - u[1])) +end + +# Calculate maximum wave speed for local Lax-Friedrichs-type dissipation +@inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer, + equations::TrafficFlowLWREquations1D) + λ_max = max(abs(equations.v_max * (1.0 - 2 * u_ll[1])), + abs(equations.v_max * (1.0 - 2 * u_rr[1]))) +end + +# Calculate minimum and maximum wave speeds for HLL-type fluxes +@inline function min_max_speed_naive(u_ll, u_rr, orientation::Integer, + equations::TrafficFlowLWREquations1D) + jac_L = equations.v_max * (1.0 - 2 * u_ll[1]) + jac_R = equations.v_max * (1.0 - 2 * u_rr[1]) + + λ_min = min(jac_L, jac_R) + λ_max = max(jac_L, jac_R) + + return λ_min, λ_max +end + +@inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer, + equations::TrafficFlowLWREquations1D) + min_max_speed_naive(u_ll, u_rr, orientation, equations) +end + +@inline function max_abs_speeds(u, equations::TrafficFlowLWREquations1D) + return (abs(equations.v_max * (1.0 - 2 * u[1])),) +end + +# Convert conservative variables to primitive +@inline cons2prim(u, equations::TrafficFlowLWREquations1D) = u + +# Convert conservative variables to entropy variables +@inline cons2entropy(u, equations::TrafficFlowLWREquations1D) = u + +# Calculate entropy for a conservative state `cons` +@inline entropy(u::Real, ::TrafficFlowLWREquations1D) = 0.5 * u^2 +@inline entropy(u, equations::TrafficFlowLWREquations1D) = entropy(u[1], equations) + +# Calculate total energy for a conservative state `cons` +@inline energy_total(u::Real, ::TrafficFlowLWREquations1D) = 0.5 * u^2 +@inline energy_total(u, equations::TrafficFlowLWREquations1D) = energy_total(u[1], + equations) +end # @muladd diff --git a/test/test_structured_1d.jl b/test/test_structured_1d.jl index f0eecfa9ac..fea06554c5 100644 --- a/test/test_structured_1d.jl +++ b/test/test_structured_1d.jl @@ -138,6 +138,21 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +@trixi_testset "elixir_traffic_flow_lwr_greenlight.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_traffic_flow_lwr_greenlight.jl"), + l2=[0.2005523261652845], + linf=[0.5052827913468407]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end # Clean up afterwards: delete Trixi.jl output directory diff --git a/test/test_tree_1d.jl b/test/test_tree_1d.jl index 4654f6313f..8b470278ff 100644 --- a/test/test_tree_1d.jl +++ b/test/test_tree_1d.jl @@ -47,6 +47,9 @@ isdir(outdir) && rm(outdir, recursive = true) # FDSBP methods on the TreeMesh include("test_tree_1d_fdsbp.jl") + + # Traffic flow LWR + include("test_tree_1d_traffic_flow_lwr.jl") end # Coverage test for all initial conditions diff --git a/test/test_tree_1d_traffic_flow_lwr.jl b/test/test_tree_1d_traffic_flow_lwr.jl new file mode 100644 index 0000000000..54412e314b --- /dev/null +++ b/test/test_tree_1d_traffic_flow_lwr.jl @@ -0,0 +1,42 @@ +module TestExamples1DTrafficFlowLWR + +using Test +using Trixi + +include("test_trixi.jl") + +EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") + +@testset "Traffic-flow LWR" begin +#! format: noindent + +@trixi_testset "elixir_traffic_flow_lwr_convergence.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_traffic_flow_lwr_convergence.jl"), + l2=[0.0008455067389588569], + linf=[0.004591951086623913]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "elixir_traffic_flow_lwr_trafficjam.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_traffic_flow_lwr_trafficjam.jl"), + l2=[0.1761758135539748], linf=[0.5]) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end +end + +end # module From c5e743aa1b229562132e3bbfbd936e995404739f Mon Sep 17 00:00:00 2001 From: Simon Candelaresi <10759273+SimonCan@users.noreply.github.com> Date: Tue, 20 Feb 2024 18:38:26 +0000 Subject: [PATCH 15/58] WIP: Sc/polytropic 2d wave speed (#1816) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Added coupling converters. * Added generic converter_function for structured 2d meshes. * Added example elixir for coupling converters. * Cleaned up converter coupling elixir. * Added equations in coupling converters. * Added converter functions. * Added identity converter function. * Autoformat for converter coupling implementation. * Added coupled converter elixir. * Corrected file name of coupled converters test. * Removed redundant doc string. * Added function signature in doc string. * Removed coverage_override in coupled tests. * Removed old commented code. * Update make.jl Added interface coupling docs to the main menu. * Update make.jl Moved converter coupling section. * Create coupling.md * Update coupling.md Added some documentation on coupling converters. * Removed troublesome AnalysisCallbackCoupled from test. * Chenged coupling converter function. * Changed coupling converter function and updated tests. * Sepcialized coupling function call. * Removed volume coupling from documentation to avoit confusion. * Update src/coupling_converters/coupling_converters.jl Co-authored-by: Hendrik Ranocha * Removed redundant converter function for coupling. * Removed redundant coupling converter file mentioned in some files. * Autoreformatted. * Removed old coupled elixir and replaced it with one using converter functions. * Updated errors for coupled tests. * Corrected test results for coupled equations. * Corrected comment. * Removed coupled test from special tests. * Removed coupled test from specials. * Chaned the coupling function to the identity. * Updated coupling tests. * Updated errors for coupled test. * Added advice about binary compatability for coupled equations in the documentation. * Typo. * Added numerical fluxes. * Corrected rs copy routine. Now loop over this semi's components. * Reformatted equations source file. * Removed problemating include of time_integration.jl. * Removed export of deleted methods. * Reverted to old version of compressible Euler multicomponent with no support for structured grid. * Renamed documentation file for multi-physics coupling. * Renamed doc reference. * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/src/multi-physics_coupling.md Co-authored-by: Michael Schlottke-Lakemper * Update docs/src/multi-physics_coupling.md Co-authored-by: Michael Schlottke-Lakemper * Update docs/src/multi-physics_coupling.md Co-authored-by: Michael Schlottke-Lakemper * Reinstated structured_2d_dgsem coupled in special tests. * Update examples/structured_2d_dgsem/elixir_advection_coupled.jl Co-authored-by: Michael Schlottke-Lakemper * Renamed CouplingFunction to CouplingConverter. * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Michael Schlottke-Lakemper * Cleaned the copy of coupled boundary values. * Reduced time span for example coupling elixir. * Removed redundant loop. * Applied formatter. * Removed default coupling covnerter function. * Moved coupling converter function into elixir. * Apply suggestions from code review Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/make.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Removed coupling_converters.jl from the include. * Corrected introduced issue with coupling boundary copy. The latest change to clean up the boundary copying introduced a bug related to the determination of the wrong node indices. This is now corrected. * Corrected comment on final simulation time. * Updated errors for coupled test to reflect changed final simulation time. * Added miladd. * Corrected coordinate finding in semidiscretization_coupled. * Fixed issued related to memory allocation. * Corrected loop over semidiscretization. * Removed commented out code. * Fixed type instability with loops over semidiscretizations using lispy tuple programming. * Removed obsolete code. * Fixed another typa instability in coupled semidiscretization. * Cleaning up of the coupled semidiscretization. * Autoformatted coupled semidiscretization. * Fixed last type instability in coupling. * Autoformatter on semidiscretization. * Fixed bug in boundary values copy that arose when coupling multiple systems. * aplpied autoformatter on coupled semidiscretization. * Extended the structured 2d example elixir for the coupled advection to 4 semidiscretizations. This hase two purpuses: 1. Users are given an example fro 2d coupling avoiding common pitfalls. 2. This increases the code coverege for the test. * Updated test results for coupled advection in 2d to reflect the 4 semidiscretizations that are now used. * Added correct errors for tests for the coupled adveciton equations in structured 2d. * Update examples/structured_2d_dgsem/elixir_advection_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update examples/structured_2d_dgsem/elixir_advection_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Update src/semidiscretization/semidiscretization_coupled.jl Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> * Corrected foreach_enumerate implementation. * Fix closing parens * Remove unused recursive rhs! * Pass equations to converter function * Apply formatting * Reverted copy_to_coupled_boundary to previou version to avoid type instability. * Corrected computation of coupled semidiscretizations and fixed memory issue. * Removed redundant nelements function, as it is no longer used. * Applied autoformatter. * Added max_abs_speed_naive( and max_abs_speed_naive for PolytropicEulerEquations2D. * Reverted coupling elixir to main branch version. The modified version should be part of a different PR. * Removed some modified coupling code as this should be part of a different PR. * Reverted changes on ooupling semidiscretization as this should be part of a different PR. * Reverted changes partaining the coupling PR. * Removed changes partaining coupling PR. * REverted to version including elixir_euler_warm_bubble.jl tests. * Update src/equations/polytropic_euler_2d.jl Co-authored-by: Andrew Winters * Update src/equations/polytropic_euler_2d.jl Co-authored-by: Andrew Winters * Update src/equations/polytropic_euler_2d.jl Co-authored-by: Andrew Winters * Update src/equations/polytropic_euler_2d.jl Co-authored-by: Andrés Rueda-Ramírez * Update src/equations/polytropic_euler_2d.jl Co-authored-by: Andrés Rueda-Ramírez * Update src/equations/polytropic_euler_2d.jl Co-authored-by: Daniel Doehring * Added consistency and rotation test for LAx-friedrich fluxes for polytropic equations in 2d. * Applied auto-formatter on polytropic 2d equation. * Update src/equations/polytropic_euler_2d.jl Co-authored-by: Andrew Winters --------- Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Hendrik Ranocha Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Co-authored-by: Erik Faulhaber <44124897+efaulhaber@users.noreply.github.com> Co-authored-by: Andrew Winters Co-authored-by: Andrés Rueda-Ramírez Co-authored-by: Daniel Doehring Co-authored-by: iomsn --- src/equations/polytropic_euler_2d.jl | 40 ++++++++++++++++++++++++++++ test/test_unit.jl | 24 +++++++++++++++++ 2 files changed, 64 insertions(+) diff --git a/src/equations/polytropic_euler_2d.jl b/src/equations/polytropic_euler_2d.jl index f5d2f7b0ba..e900fd6407 100644 --- a/src/equations/polytropic_euler_2d.jl +++ b/src/equations/polytropic_euler_2d.jl @@ -301,6 +301,46 @@ end return abs(v1) + c, abs(v2) + c end +# Calculate maximum wave speed for local Lax-Friedrichs-type dissipation as the +# maximum velocity magnitude plus the maximum speed of sound +@inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer, + equations::PolytropicEulerEquations2D) + rho_ll, v1_ll, v2_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr = cons2prim(u_rr, equations) + + # Get the velocity value in the appropriate direction + if orientation == 1 + v_ll = v1_ll + v_rr = v1_rr + else # orientation == 2 + v_ll = v2_ll + v_rr = v2_rr + end + # Calculate sound speeds (we have p = kappa * rho^gamma) + c_ll = sqrt(equations.gamma * equations.kappa * rho_ll^(equations.gamma - 1)) + c_rr = sqrt(equations.gamma * equations.kappa * rho_rr^(equations.gamma - 1)) + + λ_max = max(abs(v_ll), abs(v_rr)) + max(c_ll, c_rr) +end + +@inline function max_abs_speed_naive(u_ll, u_rr, normal_direction::AbstractVector, + equations::PolytropicEulerEquations2D) + rho_ll, v1_ll, v2_ll = cons2prim(u_ll, equations) + rho_rr, v1_rr, v2_rr = cons2prim(u_rr, equations) + + # Calculate normal velocities and sound speed (we have p = kappa * rho^gamma) + # left + v_ll = (v1_ll * normal_direction[1] + + v2_ll * normal_direction[2]) + c_ll = sqrt(equations.gamma * equations.kappa * rho_ll^(equations.gamma - 1)) + # right + v_rr = (v1_rr * normal_direction[1] + + v2_rr * normal_direction[2]) + c_rr = sqrt(equations.gamma * equations.kappa * rho_rr^(equations.gamma - 1)) + + return max(abs(v_ll), abs(v_rr)) + max(c_ll, c_rr) * norm(normal_direction) +end + # Convert conservative variables to primitive @inline function cons2prim(u, equations::PolytropicEulerEquations2D) rho, rho_v1, rho_v2 = u diff --git a/test/test_unit.jl b/test/test_unit.jl index 3b8dc3c433..c1379587cc 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -858,6 +858,30 @@ end end end +@timed_testset "Consistency check for Lax-Friedrich flux: Polytropic CEE" begin + for gamma in [1.4, 1.0, 5 / 3] + kappa = 0.5 # Scaling factor for the pressure. + equations = PolytropicEulerEquations2D(gamma, kappa) + u = SVector(1.1, -0.5, 2.34) + + orientations = [1, 2] + for orientation in orientations + @test flux_lax_friedrichs(u, u, orientation, equations) ≈ + flux(u, orientation, equations) + end + + normal_directions = [SVector(1.0, 0.0), + SVector(0.0, 1.0), + SVector(0.5, -0.5), + SVector(-1.2, 0.3)] + + for normal_direction in normal_directions + @test flux_lax_friedrichs(u, u, normal_direction, equations) ≈ + flux(u, normal_direction, equations) + end + end +end + @timed_testset "Consistency check for HLL flux with Davis wave speed estimates: LEE" begin flux_hll = FluxHLL(min_max_speed_davis) From e98a76b92fa1272b547b2abc997cfb1d885012f4 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 21 Feb 2024 11:02:15 +0100 Subject: [PATCH 16/58] improve benchmarks --- benchmark/benchmarks.jl | 12 ++++++++---- benchmark/run_benchmarks.jl | 4 ++++ 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/benchmark/benchmarks.jl b/benchmark/benchmarks.jl index 15d1d96c05..0d6fabcd4a 100644 --- a/benchmark/benchmarks.jl +++ b/benchmark/benchmarks.jl @@ -2,6 +2,9 @@ # readability #! format: off +using Pkg +Pkg.activate(@__DIR__) + using BenchmarkTools using Trixi @@ -47,13 +50,14 @@ end let SUITE["latency"] = BenchmarkGroup() SUITE["latency"]["default_example"] = @benchmarkable run( - `$(Base.julia_cmd()) -e 'using Trixi; trixi_include(default_example())'`) seconds=60 + `$(Base.julia_cmd()) --project=$(@__DIR__) -e 'using Trixi; trixi_include(default_example())'`) seconds=60 for polydeg in [3, 7] command = "using Trixi; trixi_include(joinpath(examples_dir(), \"tree_2d_dgsem\", \"elixir_advection_extended.jl\"), tspan=(0.0, 1.0e-10), polydeg=$(polydeg), save_restart=TrivialCallback(), save_solution=TrivialCallback())" - SUITE["latency"]["polydeg_$polydeg"] = @benchmarkable run($`$(Base.julia_cmd()) -e $command`) seconds=60 + SUITE["latency"]["polydeg_$polydeg"] = @benchmarkable run( + $`$(Base.julia_cmd()) --project=$(@__DIR__) -e $command`) seconds=60 end SUITE["latency"]["euler_2d"] = @benchmarkable run( - `$(Base.julia_cmd()) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_kelvin_helmholtz_instability.jl"), tspan=(0.0, 1.0e-10), save_restart=TrivialCallback(), save_solution=TrivialCallback())'`) seconds=60 + `$(Base.julia_cmd()) --project=$(@__DIR__) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_euler_kelvin_helmholtz_instability.jl"), tspan=(0.0, 1.0e-10), save_solution=TrivialCallback())'`) seconds=60 SUITE["latency"]["mhd_2d"] = @benchmarkable run( - `$(Base.julia_cmd()) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_mhd_blast_wave.jl"), tspan=(0.0, 1.0e-10), save_solution=TrivialCallback())'`) seconds=60 + `$(Base.julia_cmd()) --project=$(@__DIR__) -e 'using Trixi; trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_mhd_blast_wave.jl"), tspan=(0.0, 1.0e-10), save_solution=TrivialCallback())'`) seconds=60 end diff --git a/benchmark/run_benchmarks.jl b/benchmark/run_benchmarks.jl index 3a92a9ba70..e4e15223ea 100644 --- a/benchmark/run_benchmarks.jl +++ b/benchmark/run_benchmarks.jl @@ -1,3 +1,7 @@ +using Pkg +Pkg.activate(@__DIR__) +Pkg.develop(PackageSpec(path=dirname(@__DIR__))) +Pkg.instantiate() using PkgBenchmark using Trixi From 40b73cfde2a707dfc8d76ea92712a2287fd2e9e2 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 21 Feb 2024 11:12:54 +0100 Subject: [PATCH 17/58] format benchmarks --- benchmark/run_benchmarks.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/run_benchmarks.jl b/benchmark/run_benchmarks.jl index e4e15223ea..7b8c25752f 100644 --- a/benchmark/run_benchmarks.jl +++ b/benchmark/run_benchmarks.jl @@ -1,6 +1,6 @@ using Pkg Pkg.activate(@__DIR__) -Pkg.develop(PackageSpec(path=dirname(@__DIR__))) +Pkg.develop(PackageSpec(path = dirname(@__DIR__))) Pkg.instantiate() using PkgBenchmark From c3c0986a2eaf52a145cd59ab591ea3af75b40571 Mon Sep 17 00:00:00 2001 From: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Date: Wed, 21 Feb 2024 11:39:41 +0100 Subject: [PATCH 18/58] Create Downgrade.yml (second try) (#1848) * add downgrade.yml and bump compats * also downgrade test/Project.toml * bump lower compat for ForwardDiff in tests * allow older version of HDF5 * allow newest versions of Makie and CairoMakie * allow newest version of T8code * remove compat 0.11 from CairoMakie --- .github/workflows/Downgrade.yml | 86 +++++++++++++++++++++++++++++++++ Project.toml | 24 ++++----- test/Project.toml | 6 +-- 3 files changed, 101 insertions(+), 15 deletions(-) create mode 100644 .github/workflows/Downgrade.yml diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml new file mode 100644 index 0000000000..c84b1026d1 --- /dev/null +++ b/.github/workflows/Downgrade.yml @@ -0,0 +1,86 @@ +name: Downgrade + +on: + pull_request: + paths-ignore: + - 'AUTHORS.md' + - 'CITATION.bib' + - 'CONTRIBUTING.md' + - 'LICENSE.md' + - 'NEWS.md' + - 'README.md' + - '.zenodo.json' + - '.github/workflows/benchmark.yml' + - '.github/workflows/CompatHelper.yml' + - '.github/workflows/TagBot.yml' + - 'benchmark/**' + - 'docs/**' + - 'utils/**' + workflow_dispatch: + +# Cancel redundant CI tests automatically +concurrency: + group: ${{ github.workflow }}-${{ github.ref }} + cancel-in-progress: true + +jobs: + downgrade_test: + if: "!contains(github.event.head_commit.message, 'skip ci')" + # We could also include the Julia version as in + # name: ${{ matrix.trixi_test }} - ${{ matrix.os }} - Julia ${{ matrix.version }} - ${{ matrix.arch }} - ${{ github.event_name }} + # to be more specific. However, that requires us updating the required CI tests whenever we update Julia. + name: Downgrade ${{ matrix.trixi_test }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + version: + - '1.9' + # - '~1.9.0-0' # including development versions + # - 'nightly' + os: + - ubuntu-latest + arch: + - x64 + trixi_test: + # - tree_part1 + # - tree_part2 + # - tree_part3 + # - tree_part4 + # - tree_part5 + # - tree_part6 + # - structured + # - p4est_part1 + # - p4est_part2 + # - t8code_part1 + # - unstructured_dgmulti + # - parabolic + # - paper_self_gravitating_gas_dynamics + # - misc_part1 + # - misc_part2 + # - performance_specializations_part1 + # - performance_specializations_part2 + # - mpi + - threaded + steps: + - uses: actions/checkout@v4 + - uses: julia-actions/setup-julia@v1 + with: + version: ${{ matrix.version }} + arch: ${{ matrix.arch }} + - run: julia -e 'using InteractiveUtils; versioninfo(verbose=true)' + - uses: julia-actions/cache@v1 + - uses: julia-actions/julia-downgrade-compat@v1 + with: + skip: LinearAlgebra,Printf,SparseArrays,DiffEqBase + projects: ., test + - uses: julia-actions/julia-buildpkg@v1 + env: + PYTHON: "" + - name: Run tests without coverage + uses: julia-actions/julia-runtest@v1 + with: + coverage: false + env: + PYTHON: "" + TRIXI_TEST: ${{ matrix.trixi_test }} diff --git a/Project.toml b/Project.toml index af3e6b4d07..9bed045637 100644 --- a/Project.toml +++ b/Project.toml @@ -62,17 +62,17 @@ DiffEqCallbacks = "2.25" Downloads = "1.6" EllipsisNotation = "1.0" FillArrays = "0.13.2, 1" -ForwardDiff = "0.10.18" -HDF5 = "0.14, 0.15, 0.16, 0.17" +ForwardDiff = "0.10.24" +HDF5 = "0.16.10, 0.17" IfElse = "0.1" LinearAlgebra = "1" LinearMaps = "2.7, 3.0" -LoopVectorization = "0.12.118" +LoopVectorization = "0.12.151" MPI = "0.20" Makie = "0.19, 0.20" MuladdMacro = "0.2.2" -Octavian = "0.3.5" -OffsetArrays = "1.3" +Octavian = "0.3.21" +OffsetArrays = "1.12" P4est = "0.4.9" Polyester = "0.7.5" PrecompileTools = "1.1" @@ -81,19 +81,19 @@ RecipesBase = "1.1" Reexport = "1.0" Requires = "1.1" SciMLBase = "1.90, 2" -Setfield = "0.8, 1" +Setfield = "1" SimpleUnPack = "1.1" SparseArrays = "1" -StartUpDG = "0.17" -Static = "0.3, 0.4, 0.5, 0.6, 0.7, 0.8" +StartUpDG = "0.17.7" +Static = "0.8.7" StaticArrayInterface = "1.4" -StaticArrays = "1" -StrideArrays = "0.1.18" -StructArrays = "0.6" +StaticArrays = "1.5" +StrideArrays = "0.1.26" +StructArrays = "0.6.11" SummationByPartsOperators = "0.5.41" T8code = "0.4.3, 0.5" TimerOutputs = "0.5.7" -Triangulate = "2.0" +Triangulate = "2.2" TriplotBase = "0.1" TriplotRecipes = "0.1" TrixiBase = "0.1.1" diff --git a/test/Project.toml b/test/Project.toml index ecae0ac090..a376c2805e 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -13,13 +13,13 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [compat] Aqua = "0.8" -CairoMakie = "0.6, 0.7, 0.8, 0.9, 0.10" +CairoMakie = "0.10" Downloads = "1" -ForwardDiff = "0.10" +ForwardDiff = "0.10.24" LinearAlgebra = "1" MPI = "0.20" OrdinaryDiffEq = "6.49.1" -Plots = "1.16" +Plots = "1.19" Printf = "1" Random = "1" Test = "1" From 1b2abd00e9b74d2cd185b0d06c5cf3182011e6c4 Mon Sep 17 00:00:00 2001 From: ArseniyKholod <119304909+ArseniyKholod@users.noreply.github.com> Date: Wed, 21 Feb 2024 12:48:03 +0200 Subject: [PATCH 19/58] Doc: Core aspects of the basic setup (#1699) * Doc: Core aspects of the basic setup * update pictures * Update innards_of_the_basic_setup.jl * mention ODE-Solvers * Revert "Revert "Merge branch 'main' into semidiscretization-doc"" This reverts commit 85d6e8b50261885ad198ae4036e8599189912e56. * Revert "Merge branch 'main' into semidiscretization-doc" This reverts commit b8f8b0bc167a476b3dead39f6aadf4fe43601e7d, reversing changes made to bb518f50053cef054220fd35542f52a47214a997. * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/literate/src/files/innards_of_the_basic_setup.jl Co-authored-by: Michael Schlottke-Lakemper * Update docs/make.jl Co-authored-by: Michael Schlottke-Lakemper * Rename innards_of_the_basic_setup.jl to behind_the_scenes_simulation_setup.jl * add plot scripts * Format_and_last_review_changes * spell+output_directory_figures * spell * N->polydeg * add README for plots * line length <=100 * Update docs/literate/src/files/behind_the_scenes_simulation_setup_plots/README.md * add empty lines * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Andrew Winters * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * mention method of lines * Update behind_the_scenes_simulation_setup.jl * Update behind_the_scenes_simulation_setup.jl * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * simplify rhs description * format * add interpolation to mortars * Update behind_the_scenes_simulation_setup.jl * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * add resizability explanation * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Daniel Doehring * format * add introduction as 2nd tutorial * fix * Update docs/literate/src/files/behind_the_scenes_simulation_setup.jl Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> * add unsafe_wrap explanation --------- Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Andrew Winters Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Co-authored-by: Daniel Doehring --- .../behind_the_scenes_simulation_setup.jl | 253 ++++++++++++++++++ .../Project.toml | 2 + .../README.md | 15 ++ ...scretizationHyperbolic_structure_figure.jl | 64 +++++ .../src/generate_boundary_figure.jl | 190 +++++++++++++ .../src/generate_elements_figure.jl | 117 ++++++++ .../src/generate_interfaces_figure.jl | 157 +++++++++++ .../src/generate_mortars_figure.jl | 166 ++++++++++++ .../src/generate_nodes_figure.jl | 6 + .../src/generate_treemesh_figure.jl | 26 ++ .../src/rhs_structure_figure.jl | 43 +++ .../src/semidiscretize_structure_figure.jl | 51 ++++ docs/literate/src/files/index.jl | 45 ++-- docs/make.jl | 3 +- 14 files changed, 1119 insertions(+), 19 deletions(-) create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/Project.toml create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/README.md create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/SemidiscretizationHyperbolic_structure_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_boundary_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_elements_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_interfaces_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_mortars_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_nodes_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_treemesh_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/rhs_structure_figure.jl create mode 100644 docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/semidiscretize_structure_figure.jl diff --git a/docs/literate/src/files/behind_the_scenes_simulation_setup.jl b/docs/literate/src/files/behind_the_scenes_simulation_setup.jl new file mode 100644 index 0000000000..c93660e9bc --- /dev/null +++ b/docs/literate/src/files/behind_the_scenes_simulation_setup.jl @@ -0,0 +1,253 @@ +#src # Behind the scenes of a simulation setup + +# This tutorial will guide you through a simple Trixi.jl setup ("elixir"), giving an overview of +# what happens in the background during the initialization of a simulation. While the setup +# described herein does not cover all details, it involves relatively stable parts of Trixi.jl that +# are unlikely to undergo significant changes in the near future. The goal is to clarify some of +# the more fundamental, *technical* concepts that are applicable to a variety of +# (also more complex) configurations. + +# Trixi.jl follows the [method of lines](http://www.scholarpedia.org/article/Method_of_lines) concept for solving partial differential equations (PDEs). +# Firstly, the PDEs are reduced to a (potentially huge) system of +# ordinary differential equations (ODEs) by discretizing the spatial derivatives. Subsequently, +# these generated ODEs may be solved with methods available in OrdinaryDiffEq.jl or those specifically +# implemented in Trixi.jl. The following steps elucidate the process of transitioning from PDEs to +# ODEs within the framework of Trixi.jl. + +# ## Basic setup + +# Import essential libraries and specify an equation. + +using Trixi, OrdinaryDiffEq +equations = LinearScalarAdvectionEquation2D((-0.2, 0.7)) + +# Generate a spatial discretization using a [`TreeMesh`](@ref) with a pre-coarsened set of cells. + +coordinates_min = (-2.0, -2.0) +coordinates_max = (2.0, 2.0) + +coarsening_patches = ((type = "box", coordinates_min = [0.0, -2.0], + coordinates_max = [2.0, 0.0]),) + +mesh = TreeMesh(coordinates_min, coordinates_max, initial_refinement_level = 2, + n_cells_max = 30_000, + coarsening_patches = coarsening_patches) + +# The created `TreeMesh` looks like the following: + +# ![TreeMesh_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/d5ef76ee-8246-4730-a692-b472c06063a3) + +# Instantiate a [`DGSEM`](@ref) solver with a user-specified polynomial degree. The solver +# will define `polydeg + 1` [Gauss-Lobatto nodes](https://en.wikipedia.org/wiki/Gaussian_quadrature#Gauss%E2%80%93Lobatto_rules) and their associated weights within +# the reference interval ``[-1, 1]`` in each spatial direction. These nodes will be subsequently +# used to approximate solutions on each leaf cell of the `TreeMesh`. + +solver = DGSEM(polydeg = 3) + +# Gauss-Lobatto nodes with `polydeg = 3`: + +# ![Gauss-Lobatto_nodes_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/1d894611-801e-4f75-bff0-d77ca1c672e5) + +# ## Overview of the [`SemidiscretizationHyperbolic`](@ref) type + +# At this stage, all necessary components for configuring the spatial discretization are in place. +# The remaining task is to combine these components into a single structure that will be used +# throughout the entire simulation process. This is where [`SemidiscretizationHyperbolic`](@ref) +# comes into play. + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition_convergence_test, + solver) + +# The constructor for the `SemidiscretizationHyperbolic` object calls numerous sub-functions to +# perform the necessary initialization steps. A brief description of the key sub-functions is +# provided below. + + +# - `init_elements(leaf_cell_ids, mesh, equations, dg.basis, RealT, uEltype)` + +# The fundamental elements for approximating the solution are the leaf +# cells. The solution is constructed as a polynomial of the degree specified in the `DGSEM` +# solver in each spatial direction on each leaf cell. This polynomial approximation is evaluated +# at the Gauss-Lobatto nodes mentioned earlier. The `init_elements` function extracts +# these leaf cells from the `TreeMesh`, assigns them the label "elements", records their +# coordinates, and maps the Gauss-Lobatto nodes from the 1D interval ``[-1, 1]`` onto each coordinate axis +# of every element. + + +# ![elements_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/9f486670-b579-4e42-8697-439540c8bbb4) + +# The visualization of elements with nodes shown here includes spaces between elements, which do +# not exist in reality. This spacing is included only for illustrative purposes to underscore the +# separation of elements and the independent projection of nodes onto each element. + + +# - `init_interfaces(leaf_cell_ids, mesh, elements)` + +# At this point, the elements with nodes have been defined; however, they lack the necessary +# communication functionality. This is crucial because the local solution polynomials on the +# elements are not independent of each other. Furthermore, nodes on the boundary of adjacent +# elements share the same spatial location, which requires a method to combine this into a +# meaningful solution. +# Here [Riemann solvers](https://en.wikipedia.org/wiki/Riemann_solver#Approximate_solvers) +# come into play which can handle the principal ambiguity of a multi-valued solution at the +# same spatial location. + +# As demonstrated earlier, the elements can have varying sizes. Let us initially consider +# neighbors with equal size. For these elements, the `init_interfaces` function generates +# interfaces that store information about adjacent elements, their relative positions, and +# allocate containers for sharing solution data between neighbors during the solution process. + +# In our visualization, these interfaces would conceptually resemble tubes connecting the +# corresponding elements. + +# ![interfaces_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/bc3b6b02-afbc-4371-aaf7-c7bdc5a6c540) + + +# - `init_mortars(leaf_cell_ids, mesh, elements, dg.mortar)` + +# Returning to the consideration of different sizes among adjacent elements, within the +# `TreeMesh`, adjacent leaf cells can vary in side length by a maximum factor of two. This +# implies that a large element has one neighbor of +# equal size with a connection through an interface, or two neighbors at half the size, +# requiring a connection through so called "mortars". In 3D, a large element would have +# four small neighbor elements. + +# Mortars store information about the connected elements, their relative positions, and allocate +# containers for storing the solutions along the boundaries between these elements. + +# Due to the differing sizes of adjacent elements, it is not feasible to directly map boundary +# nodes of adjacent elements. Therefore, the concept of mortars employs a mass-conserving +# interpolation function to map boundary nodes from a larger element to a smaller one. + +# In our visualization, mortars are represented as branched tubes. + +# ![mortars_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/43a95a60-3a31-4b1f-8724-14049e7a0481) + + +# - `init_boundaries(leaf_cell_ids, mesh, elements)` + +# In order to apply boundary conditions, it is necessary to identify the locations of the +# boundaries. Therefore, we initialize a "boundaries" object, which records the elements that +# contain boundaries, specifies which side of an element is a boundary, stores the coordinates +# of boundary nodes, and allocates containers for managing solutions at these boundaries. + +# In our visualization, boundaries and their corresponding nodes are highlighted with green, +# semi-transparent lines. + +# ![boundaries_example](https://github.com/trixi-framework/Trixi.jl/assets/119304909/21996b20-4a22-4dfb-b16a-e2c22c2f29fe) + +# All the structures mentioned earlier are collected as a cache of type `NamedTuple`. Subsequently, +# an object of type `SemidiscretizationHyperbolic` is initialized using this cache, initial and +# boundary conditions, equations, mesh and solver. + +# In conclusion, the primary purpose of a `SemidiscretizationHyperbolic` is to collect equations, +# the geometric representation of the domain, and approximation instructions, creating specialized +# structures to interconnect these components in a manner that enables their utilization for +# the numerical solution of partial differential equations (PDEs). + +# As evident from the earlier description of `SemidiscretizationHyperbolic`, it comprises numerous +# functions called subsequently. Without delving into details, the structure of the primary calls +# are illustrated as follows: + +# ![SemidiscretizationHyperbolic_structure](https://github.com/trixi-framework/Trixi.jl/assets/119304909/8bf59422-0537-4d7a-9f13-d9b2253c19d7) + +# ## Overview of the [`semidiscretize`](@ref) function + +# At this stage, we have defined the equations and configured the domain's discretization. The +# final step before solving is to select a suitable time span and apply the corresponding initial +# conditions, which are already stored in the initialized `SemidiscretizationHyperbolic` object. + +# The purpose of the [`semidiscretize`](@ref) function is to wrap the semidiscretization as an +# `ODEProblem` within the specified time interval. During this procedure the approximate solution +# is created at the given initial time via the specified `initial_condition` function from the +# `SemidiscretizationHyperbolic` object. This `ODEProblem` can be subsequently passed to the +# `solve` function from the [OrdinaryDiffEq.jl](https://github.com/SciML/OrdinaryDiffEq.jl) package +# or to [`Trixi.solve`](@ref). + +ode = semidiscretize(semi, (0.0, 1.0)); + +# The `semidiscretize` function involves a deep tree of subsequent calls, with the primary ones +# explained below. + + +# - `allocate_coefficients(mesh, equations, solver, cache)` + +# To apply initial conditions, a data structure ("container") needs to be generated to store the +# initial values of the target variables for each node within each element. + +# Since only one-dimensional `Array`s are `resize!`able in Julia, we use `Vector`s as an internal +# storage for the target variables and `resize!` them whenever needed, e.g. to change the number +# of elements. Then, during the solving process the same memory is reused by `unsafe_wrap`ping +# multi-dimensional `Array`s around the internal storage. + +# - `wrap_array(u_ode, semi)` + +# As previously noted, `u_ode` is constructed as a 1D vector to ensure compatibility with +# OrdinaryDiffEq.jl. However, for internal use within Trixi.jl, identifying which part of the +# vector relates to specific variables, elements, or nodes can be challenging. + +# This is why the `u_ode` vector is wrapped by the `wrap_array` function using `unsafe_wrap` +# to form a multidimensional array `u`. In this array, the first dimension corresponds to +# variables, followed by N dimensions corresponding to nodes for each of N space dimensions. +# The last dimension corresponds to the elements. +# Consequently, navigation within this multidimensional array becomes noticeably easier. + +# "Wrapping" in this context involves the creation of a reference to the same storage location +# but with an alternative structural representation. This approach enables the use of both +# instances `u` and `u_ode` as needed, so that changes are simultaneously reflected in both. +# This is possible because, from a storage perspective, they share the same stored data, while +# access to this data is provided in different ways. + + +# - `compute_coefficients!(u, initial_conditions, t, mesh::DG, equations, solver, cache)` + +# Now the variable `u`, intended to store solutions, has been allocated and wrapped, it is time +# to apply the initial conditions. The `compute_coefficients!` function calculates the initial +# conditions for each variable at every node within each element and properly stores them in the +# `u` array. + +# At this stage, the `semidiscretize` function has all the necessary components to initialize and +# return an `ODEProblem` object, which will be used by the `solve` function to compute the +# solution. + +# In summary, the internal workings of `semidiscretize` with brief descriptions can be presented +# as follows. + +# ![semidiscretize_structure](https://github.com/trixi-framework/Trixi.jl/assets/119304909/491eddc4-aadb-4e29-8c76-a7c821d0674e) + +# ## Functions `solve` and `rhs!` + +# Once the `ODEProblem` object is initialized, the `solve` function and one of the ODE solvers from +# the OrdinaryDiffEq.jl package can be utilized to compute an approximated solution using the +# instructions contained in the `ODEProblem` object. + +sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), dt = 0.01, + save_everystep = false); + +# Since the `solve` function and the ODE solver have no knowledge +# of a particular spatial discretization, it is necessary to define a +# "right-hand-side function", `rhs!`, within Trixi.jl. + +# Trixi.jl includes a set of `rhs!` functions designed to compute `du`, i.e., +# ``\frac{\partial u}{\partial t}`` according to the structure +# of the setup. These `rhs!` functions calculate interface, mortars, and boundary fluxes, in +# addition to surface and volume integrals, in order to construct the `du` vector. This `du` vector +# is then used by the time integration method to obtain the solution at the subsequent time step. +# The `rhs!` function is called by time integration methods in each iteration of the solve loop +# within OrdinaryDiffEq.jl, with arguments `du`, `u`, `semidiscretization`, and the current time. + +# Trixi.jl uses a two-levels approach for `rhs!` functions. The first level is limited to a +# single function for each `semidiscretization` type, and its role is to redirect data to the +# target `rhs!` for specific solver and mesh types. This target `rhs!` function is responsible +# for calculating `du`. + +# Path from the `solve` function call to the appropriate `rhs!` function call: + +# ![rhs_structure](https://github.com/trixi-framework/Trixi.jl/assets/119304909/dbea9a0e-25a4-4afa-855e-01f1ad619982) + +# Computed solution: + +using Plots +plot(sol) +pd = PlotData2D(sol) +plot!(getmesh(pd)) diff --git a/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/Project.toml b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/Project.toml new file mode 100644 index 0000000000..43aec5b7f5 --- /dev/null +++ b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/Project.toml @@ -0,0 +1,2 @@ +[deps] +Plots = "91a5bcdd-55d7-5caf-9e0b-520d859cae80" diff --git a/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/README.md b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/README.md new file mode 100644 index 0000000000..011b5c7586 --- /dev/null +++ b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/README.md @@ -0,0 +1,15 @@ +# Plots for the tutorial "Behind the scenes of a simulation setup" + +To create all the images for the tutorial, execute the following command from the directory of this `README.md`: +```julia +pkg> activate . +julia> include.(readdir("src"; join=true)) +``` +To create all images from a different directory, substitute `"src"` with the path to the `src` +folder. The resulting images will be generated in your current directory as PNG files. + +To generate a specific image, run the following command while replacing `"path/to/src"` and `"file_name"` with the appropriate values: +```julia +pkg> activate . +julia> include(joinpath("path/to/src", "file_name")) +``` \ No newline at end of file diff --git a/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/SemidiscretizationHyperbolic_structure_figure.jl b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/SemidiscretizationHyperbolic_structure_figure.jl new file mode 100644 index 0000000000..cae7b19d47 --- /dev/null +++ b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/SemidiscretizationHyperbolic_structure_figure.jl @@ -0,0 +1,64 @@ +using Plots +plot(Shape([(-2.3,4.5), (2.35,4.5), (2.35,2.5), (-2.3,2.5)]), linecolor="black", fillcolor="white", label=false,linewidth=2, size=(800,600), showaxis=false, grid=false, xlim=(-2.4,2.8), ylim=(-25,5.5)) +annotate!(2.3, 3.5, ("SemidiscretizationHyperbolic(mesh, equations, initial_conditions, solver; source_terms, +boundary_conditions, RealT, uEltype, initial_cache) ", 10, :black, :right)) +annotate!(-2.3, 1.5, ("creates and returns SemidiscretizationHyperbolic object, initialized using a mesh, equations, +initial_conditions, boundary_conditions, source_terms, solver and cache", 9, :black, :left)) +plot!([-1.2,-1.2],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +plot!([-1.2,-1.4],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +plot!([-1.2,-1.],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +annotate!(-1, -0.7, ("specialized for mesh +and solver types", 9, :black, :left)) +plot!([1.25,1.25],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +plot!([1.25,1.05],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +plot!([1.25,1.45],[0.6,-2],arrow=true,color=:black,linewidth=2,label="") +annotate!(1.48, -0.7, ("specialized for mesh +and boundary_conditions +types", 9, :black, :left)) + +plot!(Shape([(-2.3,-2), (-0.1,-2), (-0.1,-4), (-2.3,-4)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.2, -3, ("create_cache(mesh::TreeMesh, equations, + solver::Dg, RealT, uEltype)", 10, :black, :center)) +plot!([-2.22,-2.22],[-4,-22],arrow=false,color=:black,linewidth=2,label="") + +plot!(Shape([(-0.05,-2), (2.6,-2), (2.6,-4), (-0.05,-4)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(1.27, -3, ("digest_boundary_conditions(boundary_conditions, + mesh, solver, cache)", 10, :black, :center)) +annotate!(2.6, -5, ("if necessary, converts passed boundary_conditions + into a suitable form for processing by Trixi.jl", 9, :black, :right)) + +plot!(Shape([(-2,-6), (-0.55,-6), (-0.55,-7.1), (-2,-7.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.95, -6.5, ("local_leaf_cells(mesh.tree)", 10, :black, :left)) +annotate!(-2, -7.5, ("returns cells for which an element needs to be created (i.e. all leaf cells)", 9, :black, :left)) +plot!([-2.22,-2],[-6.5,-6.5],arrow=true,color=:black,linewidth=2,label="") + +plot!(Shape([(-2,-9), (1.73,-9), (1.73,-10.1), (-2,-10.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.95, -9.5, ("init_elements(leaf_cell_ids, mesh, equations, dg.basis, RealT, uEltype)", 10, :black, :left)) +annotate!(-2, -10.5, ("creates and initializes elements, projects Gauss-Lobatto basis onto each of them", 9, :black, :left)) +plot!([-2.22,-2],[-9.5,-9.5],arrow=true,color=:black,linewidth=2,label="") + +plot!(Shape([(-2,-12), (0.4,-12), (0.4,-13.1), (-2,-13.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.95, -12.5, ("init_interfaces(leaf_cell_ids, mesh, elements)", 10, :black, :left)) +annotate!(-2, -13.5, ("creates and initializes interfaces between each pair of adjacent elements of the same size", 9, :black, :left)) +plot!([-2.22,-2],[-12.5,-12.5],arrow=true,color=:black,linewidth=2,label="") + +plot!(Shape([(-2,-15), (0.5,-15), (0.5,-16.1), (-2,-16.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.95, -15.5, ("init_boundaries(leaf_cell_ids, mesh, elements)", 10, :black, :left)) +annotate!(-2, -17, ("creates and initializes boundaries, remembers each boundary element, as well as the coordinates of +each boundary node", 9, :black, :left)) +plot!([-2.22,-2],[-15.5,-15.5],arrow=true,color=:black,linewidth=2,label="") + +plot!(Shape([(-1.6,-18), (1.3,-18), (1.3,-19.1), (-1.6,-19.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.55, -18.5, ("init_mortars(leaf_cell_ids, mesh, elements, dg.mortar)", 10, :black, :left)) +annotate!(-1.6, -20, ("creates and initializes mortars (type of interfaces) between each triple of adjacent coarsened +and corresponding small elements", 9, :black, :left)) +plot!([-2.22,-1.6],[-18.5,-18.5],arrow=true,color=:black,linewidth=2,label="") +annotate!(-2.15, -19, ("2D and 3D", 8, :black, :left)) + +plot!(Shape([(-2,-21), (1.5,-21), (1.5,-23.1), (-2,-23.1)]), linecolor="black", fillcolor="white", label=false,linewidth=2) +annotate!(-1.95, -22, ("create_cache(mesh, equations, dg.volume_integral, dg, uEltype) +for 2D and 3D create_cache(mesh, equations, dg.mortar, uEltype)", 10, :black, :left)) +annotate!(-2, -23.5, ("add specialized parts of the cache required to compute the volume integral, etc.", 9, :black, :left)) +plot!([-2.22,-2],[-22,-22],arrow=true,color=:black,linewidth=2,label="") + +savefig("./SemidiscretizationHyperbolic") \ No newline at end of file diff --git a/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_boundary_figure.jl b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_boundary_figure.jl new file mode 100644 index 0000000000..14475d2133 --- /dev/null +++ b/docs/literate/src/files/behind_the_scenes_simulation_setup_plots/src/generate_boundary_figure.jl @@ -0,0 +1,190 @@ +using Plots + +function min(coordinates::Vector{Tuple{Float64, Float64}}, i) + min=coordinates[1][i] + for j in coordinates + if min>j[i] + min=j[i] + end + end + return min +end + +function max(coordinates::Vector{Tuple{Float64, Float64}}, i) + max=coordinates[1][i] + for j in coordinates + if maxj[i] + min=j[i] + end + end + return min +end + +function max(coordinates::Vector{Tuple{Float64, Float64}}, i) + max=coordinates[1][i] + for j in coordinates + if maxj[i] + min=j[i] + end + end + return min +end + +function max(coordinates::Vector{Tuple{Float64, Float64}}, i) + max=coordinates[1][i] + for j in coordinates + if maxj[i] + min=j[i] + end + end + return min +end + +function max(coordinates::Vector{Tuple{Float64, Float64}}, i) + max=coordinates[1][i] + for j in coordinates + if max ("first_steps", "create_first_setup.jl"), "Changing Trixi.jl itself" => ("first_steps", "changing_trixi.jl"), ], + "Behind the scenes of a simulation setup" => "behind_the_scenes_simulation_setup.jl", # Topic: DG semidiscretizations "Introduction to DG methods" => "scalar_linear_advection_1d.jl", "DGSEM with flux differencing" => "DGSEM_FluxDiff.jl", @@ -76,7 +77,7 @@ files = [ # Topic: other stuff "Explicit time stepping" => "time_stepping.jl", "Differentiable programming" => "differentiable_programming.jl", - "Custom semidiscretizations" => "custom_semidiscretization.jl" + "Custom semidiscretizations" => "custom_semidiscretization.jl", ] tutorials = create_tutorials(files) From 88e4a09b919548ad308e94dfe1b515947b581558 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 22 Feb 2024 13:49:15 +0100 Subject: [PATCH 20/58] Update benchmarking docs (#1849) * Update benchmarking docs * Update performance.md * Update docs/src/performance.md Co-authored-by: Daniel Doehring --------- Co-authored-by: Daniel Doehring --- docs/src/performance.md | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/docs/src/performance.md b/docs/src/performance.md index 82d7f501f6..40970e58c5 100644 --- a/docs/src/performance.md +++ b/docs/src/performance.md @@ -106,7 +106,22 @@ resulting performance improvements of Trixi.jl are given in the following blog p We use [PkgBenchmark.jl](https://github.com/JuliaCI/PkgBenchmark.jl) to provide a standard set of benchmarks for Trixi.jl. The relevant benchmark script is [benchmark/benchmarks.jl](https://github.com/trixi-framework/Trixi.jl/blob/main/benchmark/benchmarks.jl). -You can run a standard set of benchmarks via +To benchmark the changes made in a PR, please proceed as follows: + +1. Check out the latest `main` branch of your Trixi.jl development repository. +2. Check out the latest development branch of your PR. +3. Change your working directory to the `benchmark` directory of Trixi.jl. +4. Execute `julia run_benchmarks.jl`. + +This will take some hours to complete and requires at least 8 GiB of RAM. When everything is finished, some +output files will be created in the `benchmark` directory of Trixi.jl. + +!!! warning + Please note that the benchmark scripts use `--check-bounds=no` at the moment. + Thus, they will not work in any useful way for Julia v1.10 (and newer?), see + [Julia issue #50985](https://github.com/JuliaLang/julia/issues/50985). + +You can also run a standard set of benchmarks manually via ```julia julia> using PkgBenchmark, Trixi From 9f7eadb3c056ee63af051c1397bfe50fa9ba9a47 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 22 Feb 2024 14:13:39 +0100 Subject: [PATCH 21/58] set version to v0.6.10 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 9bed045637..221e96643a 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.10-pre" +version = "0.6.10" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 5185abd96a0319599d01d57d196ecabdaa083477 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 22 Feb 2024 14:13:52 +0100 Subject: [PATCH 22/58] set development version to v0.6.11-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 221e96643a..551e069b93 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.10" +version = "0.6.11-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 029ddea44cedc076251a5e6c832af7f0bec4ed90 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Fri, 23 Feb 2024 05:25:00 +0100 Subject: [PATCH 23/58] Own `sqrt` and `log` returning `NaN` for "correct" multi-thread behaviour (#1781) * Introduce NaNMath for unsafe sqrt and log * performance measurements * implement log myself * Try out different log implementation * remove NaNMath, own implementation * remove unrelated * Update src/equations/compressible_euler_2d.jl * NaNSqrt for quasi 1d CEE * fmt * Update src/auxiliary/math.jl * Update src/auxiliary/math.jl * Update src/auxiliary/math.jl * for comparison * Update src/auxiliary/math.jl Co-authored-by: Hendrik Ranocha * Update src/auxiliary/math.jl Co-authored-by: Hendrik Ranocha * llvm version log * Catch ints in sqrt_ * Use sqrt_ log_ everywhere * docu * fmt * replace in comment * try exporting nan funcs * enable SIMD again * Bring back SIMD * doc * Update src/auxiliary/math.jl Co-authored-by: Hendrik Ranocha * Update src/auxiliary/math.jl Co-authored-by: Hendrik Ranocha * docstring fmt * remove redundant docstrings * no own names * fmt * revert unintended * revert * remove unintended * revert * fmt * comments * update test vals * test vals * test vals * Preferences * Update Project.toml * Update src/Trixi.jl * fmt * docstrings * docstrings * docstrings * compat info * Apply suggestions from code review Co-authored-by: Hendrik Ranocha * escape " * fmt * fix benchmarks configuration * skip UUIDs in downgrade CI job * Update src/auxiliary/math.jl Co-authored-by: Hendrik Ranocha * Update Project.toml Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> --------- Co-authored-by: Hendrik Ranocha Co-authored-by: Hendrik Ranocha Co-authored-by: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> --- .github/workflows/Downgrade.yml | 2 +- Project.toml | 4 ++ src/Trixi.jl | 6 ++ src/auxiliary/math.jl | 97 +++++++++++++++++++++++++++++++++ test/test_parabolic_1d.jl | 12 ++-- test/test_unstructured_2d.jl | 6 +- 6 files changed, 117 insertions(+), 10 deletions(-) diff --git a/.github/workflows/Downgrade.yml b/.github/workflows/Downgrade.yml index c84b1026d1..dd5d8ee7e3 100644 --- a/.github/workflows/Downgrade.yml +++ b/.github/workflows/Downgrade.yml @@ -72,7 +72,7 @@ jobs: - uses: julia-actions/cache@v1 - uses: julia-actions/julia-downgrade-compat@v1 with: - skip: LinearAlgebra,Printf,SparseArrays,DiffEqBase + skip: LinearAlgebra,Printf,SparseArrays,UUIDs,DiffEqBase projects: ., test - uses: julia-actions/julia-buildpkg@v1 env: diff --git a/Project.toml b/Project.toml index 551e069b93..6b27e6e999 100644 --- a/Project.toml +++ b/Project.toml @@ -25,6 +25,7 @@ OffsetArrays = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" P4est = "7d669430-f675-4ae7-b43e-fab78ec5a902" Polyester = "f517fe37-dbe3-4b94-8317-1923a5111588" PrecompileTools = "aea7be01-6a6a-4083-8856-8a6e6704d82a" +Preferences = "21216c6a-2e73-6563-6e65-726566657250" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" @@ -46,6 +47,7 @@ Triangulate = "f7e6ffb2-c36d-4f8f-a77e-16e897189344" TriplotBase = "981d1d27-644d-49a2-9326-4793e63143c3" TriplotRecipes = "808ab39a-a642-4abf-81ff-4cb34ebbffa3" TrixiBase = "9a0f1c46-06d5-4909-a5a3-ce25d3fa3284" +UUIDs = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" [weakdeps] Makie = "ee78f7c6-11fb-53f2-987a-cfe4a2b5a57a" @@ -76,6 +78,7 @@ OffsetArrays = "1.12" P4est = "0.4.9" Polyester = "0.7.5" PrecompileTools = "1.1" +Preferences = "1.3" Printf = "1" RecipesBase = "1.1" Reexport = "1.0" @@ -97,6 +100,7 @@ Triangulate = "2.2" TriplotBase = "0.1" TriplotRecipes = "0.1" TrixiBase = "0.1.1" +UUIDs = "1.6" julia = "1.8" [extras] diff --git a/src/Trixi.jl b/src/Trixi.jl index bf0986084a..b7f7767a9d 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -76,6 +76,12 @@ using TrixiBase: TrixiBase using SimpleUnPack: @pack! using DataStructures: BinaryHeap, FasterForward, extract_all! +using UUIDs: UUID +using Preferences: @load_preference, set_preferences! + +const _PREFERENCE_SQRT = @load_preference("sqrt", "sqrt_Trixi_NaN") +const _PREFERENCE_LOG = @load_preference("log", "log_Trixi_NaN") + # finite difference SBP operators using SummationByPartsOperators: AbstractDerivativeOperator, AbstractNonperiodicDerivativeOperator, DerivativeOperator, diff --git a/src/auxiliary/math.jl b/src/auxiliary/math.jl index 38ea0bda8c..9e3aaa181b 100644 --- a/src/auxiliary/math.jl +++ b/src/auxiliary/math.jl @@ -5,6 +5,103 @@ @muladd begin #! format: noindent +const TRIXI_UUID = UUID("a7f1ee26-1774-49b1-8366-f1abc58fbfcb") + +""" + Trixi.set_sqrt_type(type; force = true) + +Set the `type` of the square root function to be used in Trixi.jl. +The default is `"sqrt_Trixi_NaN"` which returns `NaN` for negative arguments +instead of throwing an error. +Alternatively, you can set `type` to `"sqrt_Base"` to use the Julia built-in `sqrt` function +which provides a stack-trace of the error which might come in handy when debugging code. +""" +function set_sqrt_type(type; force = true) + @assert type == "sqrt_Trixi_NaN"||type == "sqrt_Base" "Only allowed `sqrt` function types are `\"sqrt_Trixi_NaN\"` and `\"sqrt_Base\"`" + set_preferences!(TRIXI_UUID, "sqrt" => type, force = force) + @info "Please restart Julia and reload Trixi.jl for the `sqrt` computation change to take effect" +end + +@static if _PREFERENCE_SQRT == "sqrt_Trixi_NaN" + """ + Trixi.sqrt(x::Real) + + Custom square root function which returns `NaN` for negative arguments instead of throwing an error. + This is required to ensure [correct results for multithreaded computations](https://github.com/trixi-framework/Trixi.jl/issues/1766) + when using the [`Polyester` package](https://github.com/JuliaSIMD/Polyester.jl), + i.e., using the `@batch` macro instead of the Julia built-in `@threads` macro, see [`@threaded`](@ref). + + We dispatch this function for `Float64, Float32, Float16` to the LLVM intrinsics + `llvm.sqrt.f64`, `llvm.sqrt.f32`, `llvm.sqrt.f16` as for these the LLVM functions can be used out-of the box, + i.e., they return `NaN` for negative arguments. + In principle, one could also use the `sqrt_llvm` call, but for transparency and consistency with [`log`](@ref) we + spell out the datatype-dependent functions here. + For other types, such as integers or dual numbers required for algorithmic differentiation, we + fall back to the Julia built-in `sqrt` function after a check for negative arguments. + Since these cases are not performance critical, the check for negativity does not hurt here + and can (as of now) even be optimized away by the compiler due to the implementation of `sqrt` in Julia. + + When debugging code, it might be useful to change the implementation of this function to redirect to + the Julia built-in `sqrt` function, as this reports the exact place in code where the domain is violated + in the stacktrace. + + See also [`Trixi.set_sqrt_type`](@ref). + """ + @inline sqrt(x::Real) = x < zero(x) ? oftype(x, NaN) : Base.sqrt(x) + + # For `sqrt` we could use the `sqrt_llvm` call, ... + #@inline sqrt(x::Union{Float64, Float32, Float16}) = Base.sqrt_llvm(x) + + # ... but for transparency and consistency we use the direct LLVM calls here. + @inline sqrt(x::Float64) = ccall("llvm.sqrt.f64", llvmcall, Float64, (Float64,), x) + @inline sqrt(x::Float32) = ccall("llvm.sqrt.f32", llvmcall, Float32, (Float32,), x) + @inline sqrt(x::Float16) = ccall("llvm.sqrt.f16", llvmcall, Float16, (Float16,), x) +end + +""" + Trixi.set_log_type(type; force = true) + +Set the `type` of the (natural) `log` function to be used in Trixi.jl. +The default is `"sqrt_Trixi_NaN"` which returns `NaN` for negative arguments +instead of throwing an error. +Alternatively, you can set `type` to `"sqrt_Base"` to use the Julia built-in `sqrt` function +which provides a stack-trace of the error which might come in handy when debugging code. +""" +function set_log_type(type; force = true) + @assert type == "log_Trixi_NaN"||type == "log_Base" "Only allowed log function types are `\"log_Trixi_NaN\"` and `\"log_Base\"`." + set_preferences!(TRIXI_UUID, "log" => type, force = force) + @info "Please restart Julia and reload Trixi.jl for the `log` computation change to take effect" +end + +@static if _PREFERENCE_LOG == "log_Trixi_NaN" + """ + Trixi.log(x::Real) + + Custom natural logarithm function which returns `NaN` for negative arguments instead of throwing an error. + This is required to ensure [correct results for multithreaded computations](https://github.com/trixi-framework/Trixi.jl/issues/1766) + when using the [`Polyester` package](https://github.com/JuliaSIMD/Polyester.jl), + i.e., using the `@batch` macro instead of the Julia built-in `@threads` macro, see [`@threaded`](@ref). + + We dispatch this function for `Float64, Float32, Float16` to the respective LLVM intrinsics + `llvm.log.f64`, `llvm.log.f32`, `llvm.log.f16` as for this the LLVM functions can be used out-of the box, i.e., + they return `NaN` for negative arguments. + For other types, such as integers or dual numbers required for algorithmic differentiation, we + fall back to the Julia built-in `log` function after a check for negative arguments. + Since these cases are not performance critical, the check for negativity does not hurt here. + + When debugging code, it might be useful to change the implementation of this function to redirect to + the Julia built-in `log` function, as this reports the exact place in code where the domain is violated + in the stacktrace. + + See also [`Trixi.set_log_type`](@ref). + """ + @inline log(x::Real) = x < zero(x) ? oftype(x, NaN) : Base.log(x) + + @inline log(x::Float64) = ccall("llvm.log.f64", llvmcall, Float64, (Float64,), x) + @inline log(x::Float32) = ccall("llvm.log.f32", llvmcall, Float32, (Float32,), x) + @inline log(x::Float16) = ccall("llvm.log.f16", llvmcall, Float16, (Float16,), x) +end + """ ln_mean(x, y) diff --git a/test/test_parabolic_1d.jl b/test/test_parabolic_1d.jl index c1cfec052f..41d375e2e3 100644 --- a/test/test_parabolic_1d.jl +++ b/test/test_parabolic_1d.jl @@ -195,14 +195,14 @@ end Prandtl = prandtl_number(), gradient_variables = GradientVariablesEntropy()), l2=[ - 2.459359632523962e-5, - 2.3928390718460263e-5, - 0.00011252414117082376, + 2.4593501090944024e-5, + 2.3928163240907908e-5, + 0.00011252309905552921, ], linf=[ - 0.0001185052018830568, - 0.00018987717854305393, - 0.0009597503607920999, + 0.0001185048754512863, + 0.0001898766501935486, + 0.0009597450028770993, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index 139b423ead..83b8318c92 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -710,9 +710,9 @@ end 1.0066867437607972e-13, 6.889210012578449e-14, 1.568290814572709e-13], - linf=[5.963762816918461e-10, - 5.08869890669672e-11, - 1.1581377523661729e-10, + linf=[2.353373051988683e-10, + 2.801543719233024e-11, + 3.930469838486772e-11, 4.61017890529547e-11], tspan=(0.0, 0.1), atol=1.0e-11) From 62610bb6a858eedfaf8fe61f074aa757441a08d1 Mon Sep 17 00:00:00 2001 From: Patrick Ersing <114223904+patrickersing@users.noreply.github.com> Date: Fri, 23 Feb 2024 05:30:08 +0100 Subject: [PATCH 24/58] Separation of TrixiShallowWater.jl (#1809) * remove wet_dry functionality for SWE-1D * remove wet_dry functionality for SWE-2D * remove twolayer equations * remove limiters from swe_quasi_1d * remove export of min_max_speed_chen_noelle * remove elixirs * update news.md * add unit tests to increase coverage * Change news.md according to code review Co-authored-by: Hendrik Ranocha --------- Co-authored-by: Andrew Winters Co-authored-by: Hendrik Ranocha --- NEWS.md | 13 + .../elixir_shallowwater_conical_island.jl | 114 --- .../elixir_shallowwater_parabolic_bowl.jl | 119 --- ...ixir_shallowwater_well_balanced_wet_dry.jl | 207 ----- .../elixir_shallowwater_beach.jl | 123 --- .../elixir_shallowwater_parabolic_bowl.jl | 119 --- ...lixir_shallowwater_twolayer_convergence.jl | 60 -- .../elixir_shallowwater_twolayer_dam_break.jl | 94 -- ...xir_shallowwater_twolayer_well_balanced.jl | 86 -- ...ixir_shallowwater_well_balanced_wet_dry.jl | 172 ---- .../elixir_shallowwater_conical_island.jl | 117 --- .../elixir_shallowwater_parabolic_bowl.jl | 121 --- ...lixir_shallowwater_twolayer_convergence.jl | 60 -- ...xir_shallowwater_twolayer_well_balanced.jl | 81 -- ...ixir_shallowwater_well_balanced_wet_dry.jl | 206 ----- ...ixir_shallowwater_three_mound_dam_break.jl | 133 --- ...lixir_shallowwater_twolayer_convergence.jl | 63 -- .../elixir_shallowwater_twolayer_dam_break.jl | 147 ---- ...xir_shallowwater_twolayer_well_balanced.jl | 81 -- src/Trixi.jl | 12 +- src/callbacks_stage/callbacks_stage.jl | 2 - .../positivity_shallow_water.jl | 89 -- .../positivity_shallow_water_dg1d.jl | 89 -- .../positivity_shallow_water_dg2d.jl | 90 -- src/equations/equations.jl | 2 - src/equations/numerical_fluxes.jl | 23 - src/equations/shallow_water_1d.jl | 187 +--- src/equations/shallow_water_2d.jl | 266 +----- src/equations/shallow_water_quasi_1d.jl | 37 +- src/equations/shallow_water_two_layer_1d.jl | 511 ----------- src/equations/shallow_water_two_layer_2d.jl | 805 ------------------ src/solvers/dgsem_tree/indicators.jl | 76 -- src/solvers/dgsem_tree/indicators_1d.jl | 109 --- src/solvers/dgsem_tree/indicators_2d.jl | 110 --- test/test_structured_2d.jl | 78 -- test/test_tree_1d.jl | 2 - test/test_tree_1d_shallowwater.jl | 75 -- test/test_tree_1d_shallowwater_twolayer.jl | 74 -- test/test_tree_2d_part3.jl | 3 - test/test_tree_2d_shallowwater.jl | 79 -- test/test_tree_2d_shallowwater_twolayer.jl | 88 -- test/test_unit.jl | 34 +- test/test_unstructured_2d.jl | 101 --- 43 files changed, 56 insertions(+), 5002 deletions(-) delete mode 100644 examples/structured_2d_dgsem/elixir_shallowwater_conical_island.jl delete mode 100644 examples/structured_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl delete mode 100644 examples/structured_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl delete mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_beach.jl delete mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_parabolic_bowl.jl delete mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_twolayer_convergence.jl delete mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_twolayer_dam_break.jl delete mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl delete mode 100644 examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl delete mode 100644 examples/tree_2d_dgsem/elixir_shallowwater_conical_island.jl delete mode 100644 examples/tree_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl delete mode 100644 examples/tree_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl delete mode 100644 examples/tree_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl delete mode 100644 examples/tree_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl delete mode 100644 examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl delete mode 100644 examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl delete mode 100644 examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl delete mode 100644 examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl delete mode 100644 src/callbacks_stage/positivity_shallow_water.jl delete mode 100644 src/callbacks_stage/positivity_shallow_water_dg1d.jl delete mode 100644 src/callbacks_stage/positivity_shallow_water_dg2d.jl delete mode 100644 src/equations/shallow_water_two_layer_1d.jl delete mode 100644 src/equations/shallow_water_two_layer_2d.jl delete mode 100644 test/test_tree_1d_shallowwater_twolayer.jl delete mode 100644 test/test_tree_2d_shallowwater_twolayer.jl diff --git a/NEWS.md b/NEWS.md index feccd1f985..ecc91581e9 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,6 +4,19 @@ Trixi.jl follows the interpretation of [semantic versioning (semver)](https://ju used in the Julia ecosystem. Notable changes will be documented in this file for human readability. +## Changes when updating to v0.7 from v0.6.x + +#### Added + +#### Changed + +#### Deprecated + +#### Removed +- Some specialized shallow water specific features are no longer available directly in + Trixi.jl, but are moved to a dedicated repository: [TrixiShallowWater.jl](https://github.com/trixi-framework/TrixiShallowWater.jl). This includes all features related to wetting and drying, as well as the `ShallowWaterTwoLayerEquations1D` and `ShallowWaterTwoLayerEquations2D`. + However, the basic shallow water equations are still part of Trixi.jl. We'll also be updating the TrixiShallowWater.jl documentation with instructions on how to use these relocated features in the future. + ## Changes in the v0.6 lifecycle #### Added diff --git a/examples/structured_2d_dgsem/elixir_shallowwater_conical_island.jl b/examples/structured_2d_dgsem/elixir_shallowwater_conical_island.jl deleted file mode 100644 index e65ed19221..0000000000 --- a/examples/structured_2d_dgsem/elixir_shallowwater_conical_island.jl +++ /dev/null @@ -1,114 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations2D(gravity_constant = 9.81, H0 = 1.4) - -""" - initial_condition_conical_island(x, t, equations::ShallowWaterEquations2D) - -Initial condition for the [`ShallowWaterEquations2D`](@ref) to test the [`hydrostatic_reconstruction_chen_noelle`](@ref) -and its handling of discontinuous water heights at the start in combination with wetting and -drying. The bottom topography is given by a conical island in the middle of the domain. Around that -island, there is a cylindrical water column at t=0 and the rest of the domain is dry. This -discontinuous water height is smoothed by a logistic function. This simulation uses periodic -boundary conditions. -""" -function initial_condition_conical_island(x, t, equations::ShallowWaterEquations2D) - # Set the background values - - v1 = 0.0 - v2 = 0.0 - - x1, x2 = x - b = max(0.1, 1.0 - 4.0 * sqrt(x1^2 + x2^2)) - - # use a logistic function to transfer water height value smoothly - L = equations.H0 # maximum of function - x0 = 0.3 # center point of function - k = -25.0 # sharpness of transfer - - H = max(b, L / (1.0 + exp(-k * (sqrt(x1^2 + x2^2) - x0)))) - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v1, v2, b), equations) -end - -initial_condition = initial_condition_conical_island - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(4) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Get the StructuredMesh and setup a periodic mesh - -coordinates_min = (-1.0, -1.0) -coordinates_max = (1.0, 1.0) - -cells_per_dimension = (16, 16) - -mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max) - -# Create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solver - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -############################################################################### -# Callbacks - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -############################################################################### -# run the simulation - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -sol = solve(ode, SSPRK43(stage_limiter!); - ode_default_options()..., callback = callbacks); - -summary_callback() # print the timer summary diff --git a/examples/structured_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl b/examples/structured_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl deleted file mode 100644 index bc198f1883..0000000000 --- a/examples/structured_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl +++ /dev/null @@ -1,119 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations2D(gravity_constant = 9.81) - -""" - initial_condition_parabolic_bowl(x, t, equations:: ShallowWaterEquations2D) - -Well-known initial condition to test the [`hydrostatic_reconstruction_chen_noelle`](@ref) and its -wet-dry mechanics. This test has an analytical solution. The initial condition is defined by the -analytical solution at time t=0. The bottom topography defines a bowl and the water level is given -by an oscillating lake. - -The original test and its analytical solution were first presented in -- William C. Thacker (1981) - Some exact solutions to the nonlinear shallow-water wave equations - [DOI: 10.1017/S0022112081001882](https://doi.org/10.1017/S0022112081001882). - -The particular setup below is taken from Section 6.2 of -- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and Timothy Warburton (2018) - An entropy stable discontinuous Galerkin method for the shallow water equations on - curvilinear meshes with wet/dry fronts accelerated by GPUs - [DOI: 10.1016/j.jcp.2018.08.038](https://doi.org/10.1016/j.jcp.2018.08.038). -""" -function initial_condition_parabolic_bowl(x, t, equations::ShallowWaterEquations2D) - a = 1.0 - h_0 = 0.1 - sigma = 0.5 - ω = sqrt(2 * equations.gravity * h_0) / a - - v1 = -sigma * ω * sin(ω * t) - v2 = sigma * ω * cos(ω * t) - - b = h_0 * ((x[1])^2 + (x[2])^2) / a^2 - - H = sigma * h_0 / a^2 * (2 * x[1] * cos(ω * t) + 2 * x[2] * sin(ω * t) - sigma) + h_0 - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v1, v2, b), equations) -end - -initial_condition = initial_condition_parabolic_bowl - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(4) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.6, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### - -coordinates_min = (-2.0, -2.0) -coordinates_max = (2.0, 2.0) - -cells_per_dimension = (150, 150) - -mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 1.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false, - extra_analysis_integrals = (energy_kinetic, - energy_internal)) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -############################################################################### -# run the simulation - -sol = solve(ode, SSPRK43(stage_limiter!); - ode_default_options()..., callback = callbacks); - -summary_callback() # print the timer summary diff --git a/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl b/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl deleted file mode 100644 index 8e492b1ba0..0000000000 --- a/examples/structured_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl +++ /dev/null @@ -1,207 +0,0 @@ - -using OrdinaryDiffEq -using Trixi -using Printf: @printf, @sprintf - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations2D(gravity_constant = 9.812) - -""" - initial_condition_well_balanced_chen_noelle(x, t, equations:: ShallowWaterEquations2D) - -Initial condition with a complex (discontinuous) bottom topography to test the well-balanced -property for the [`hydrostatic_reconstruction_chen_noelle`](@ref) including dry areas within the -domain. The errors from the analysis callback are not important but the error for this -lake-at-rest test case `∑|H0-(h+b)|` should be around machine roundoff. - -The initial condition is taken from Section 5.2 of the paper: -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -function initial_condition_complex_bottom_well_balanced(x, t, - equations::ShallowWaterEquations2D) - v1 = 0 - v2 = 0 - b = sin(4 * pi * x[1]) + 3 - - if x[1] >= 0.5 - b = sin(4 * pi * x[1]) + 1 - end - - H = max(b, 2.5) - - if x[1] >= 0.5 - H = max(b, 1.5) - end - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v1, v2, b), equations) -end - -initial_condition = initial_condition_complex_bottom_well_balanced - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) - -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(3) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Create the StructuredMesh for the domain [0, 1]^2 - -coordinates_min = (0.0, 0.0) -coordinates_max = (1.0, 1.0) - -cells_per_dimension = (16, 16) - -mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -############################################################################### -# Workaround to set a discontinuous water and bottom topography for -# debugging and testing. Essentially, this is a slight augmentation of the -# `compute_coefficients` where the `x` node value passed here is slightly -# perturbed to the left / right in order to set a true discontinuity that avoids -# the doubled value of the LGL nodes at a particular element interface. -# -# Note! The errors from the analysis callback are not important but the error -# for this lake at rest test case `∑|H0-(h+b)|` should be near machine roundoff. - -# point to the data we want to augment -u = Trixi.wrap_array(ode.u0, semi) -# reset the initial condition -for element in eachelement(semi.solver, semi.cache) - for j in eachnode(semi.solver), i in eachnode(semi.solver) - x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, - semi.solver, i, j, element) - # We know that the discontinuity is a vertical line. Slightly augment the x value by a factor - # of unit roundoff to avoid the repeted value from the LGL nodes at at interface. - if i == 1 - x_node = SVector(nextfloat(x_node[1]), x_node[2]) - elseif i == nnodes(semi.solver) - x_node = SVector(prevfloat(x_node[1]), x_node[2]) - end - u_node = initial_condition_complex_bottom_well_balanced(x_node, first(tspan), - equations) - Trixi.set_node_vars!(u, u_node, equations, semi.solver, i, j, element) - end -end - -############################################################################### -# Callbacks - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 1000, - save_initial_solution = true, - save_final_solution = true) - -stepsize_callback = StepsizeCallback(cfl = 1.0) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -############################################################################### -# run the simulation - -sol = solve(ode, SSPRK43(stage_limiter!); dt = 1.0, - ode_default_options()..., callback = callbacks, adaptive = false); - -summary_callback() # print the timer summary - -############################################################################### -# Workaround to compute the well-balancedness error for this particular problem -# that has two reference water heights. One for a lake to the left of the -# discontinuous bottom topography `H0_upper = 2.5` and another for a lake to the -# right of the discontinuous bottom topography `H0_lower = 1.5`. - -# Declare a special version of the function to compute the lake-at-rest error -# OBS! The reference water height values are hardcoded for convenience. -function lake_at_rest_error_two_level(u, x, equations::ShallowWaterEquations2D) - h, _, _, b = u - - # For well-balancedness testing with possible wet/dry regions the reference - # water height `H0` accounts for the possibility that the bottom topography - # can emerge out of the water as well as for the threshold offset to avoid - # division by a "hard" zero water heights as well. - if x[1] < 0.5 - H0_wet_dry = max(2.5, b + equations.threshold_limiter) - else - H0_wet_dry = max(1.5, b + equations.threshold_limiter) - end - - return abs(H0_wet_dry - (h + b)) -end - -# point to the data we want to analyze -u = Trixi.wrap_array(sol[end], semi) -# Perform the actual integration of the well-balancedness error over the domain -l1_well_balance_error = Trixi.integrate_via_indices(u, mesh, equations, semi.solver, - semi.cache; - normalize = true) do u, i, j, element, - equations, solver - x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, solver, - i, j, element) - # We know that the discontinuity is a vertical line. Slightly augment the x value by a factor - # of unit roundoff to avoid the repeted value from the LGL nodes at at interface. - if i == 1 - x_node = SVector(nextfloat(x_node[1]), x_node[2]) - elseif i == nnodes(semi.solver) - x_node = SVector(prevfloat(x_node[1]), x_node[2]) - end - u_local = Trixi.get_node_vars(u, equations, solver, i, j, element) - return lake_at_rest_error_two_level(u_local, x_node, equations) -end - -# report the well-balancedness lake-at-rest error to the screen -println("─"^100) -println(" Lake-at-rest error for '", Trixi.get_name(equations), "' with ", summary(solver), - " at final time " * @sprintf("%10.8e", tspan[end])) - -@printf(" %-12s:", Trixi.pretty_form_utf(lake_at_rest_error)) -@printf(" % 10.8e", l1_well_balance_error) -println() -println("─"^100) diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_beach.jl b/examples/tree_1d_dgsem/elixir_shallowwater_beach.jl deleted file mode 100644 index 378079ca33..0000000000 --- a/examples/tree_1d_dgsem/elixir_shallowwater_beach.jl +++ /dev/null @@ -1,123 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations1D(gravity_constant = 9.812) - -""" - initial_condition_beach(x, t, equations:: ShallowWaterEquations1D) -Initial condition to simulate a wave running towards a beach and crashing. Difficult test -including both wetting and drying in the domain using slip wall boundary conditions. -The bottom topography is altered to be differentiable on the domain [0,8] and -differs from the reference below. - -The water height and speed functions used here, are adapted from the initial condition -found in section 5.2 of the paper: - - Andreas Bollermann, Sebastian Noelle, Maria Lukáčová-Medvid’ová (2011) - Finite volume evolution Galerkin methods for the shallow water equations with dry beds\n - [DOI: 10.4208/cicp.220210.020710a](https://dx.doi.org/10.4208/cicp.220210.020710a) -""" -function initial_condition_beach(x, t, equations::ShallowWaterEquations1D) - D = 1 - delta = 0.02 - gamma = sqrt((3 * delta) / (4 * D)) - x_a = sqrt((4 * D) / (3 * delta)) * acosh(sqrt(20)) - - f = D + 40 * delta * sech(gamma * (8 * x[1] - x_a))^2 - - # steep curved beach - b = 0.01 + 99 / 409600 * 4^x[1] - - if x[1] >= 6 - H = b - v = 0.0 - else - H = f - v = sqrt(equations.gravity / D) * H - end - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v, b), equations) -end - -initial_condition = initial_condition_beach -boundary_condition = boundary_condition_slip_wall - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(3) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Create the TreeMesh for the domain [0, 8] - -coordinates_min = 0.0 -coordinates_max = 8.0 - -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 7, - n_cells_max = 10_000, - periodicity = false) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - boundary_conditions = boundary_condition) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false, - extra_analysis_integrals = (energy_kinetic, - energy_internal)) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(dt = 0.5, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -############################################################################### -# run the simulation - -sol = solve(ode, SSPRK43(stage_limiter!); - ode_default_options()..., callback = callbacks); - -summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_parabolic_bowl.jl b/examples/tree_1d_dgsem/elixir_shallowwater_parabolic_bowl.jl deleted file mode 100644 index a586562af7..0000000000 --- a/examples/tree_1d_dgsem/elixir_shallowwater_parabolic_bowl.jl +++ /dev/null @@ -1,119 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations1D(gravity_constant = 9.81) - -""" - initial_condition_parabolic_bowl(x, t, equations:: ShallowWaterEquations1D) - -Well-known initial condition to test the [`hydrostatic_reconstruction_chen_noelle`](@ref) and its -wet-dry mechanics. This test has analytical solutions. The initial condition is defined by the -analytical solution at time t=0. The bottom topography defines a bowl and the water level is given -by an oscillating lake. - -The original test and its analytical solution in two dimensions were first presented in -- William C. Thacker (1981) - Some exact solutions to the nonlinear shallow-water wave equations - [DOI: 10.1017/S0022112081001882](https://doi.org/10.1017/S0022112081001882). - -The particular setup below is taken from Section 6.2 of -- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and Timothy Warburton (2018) - An entropy stable discontinuous Galerkin method for the shallow water equations on - curvilinear meshes with wet/dry fronts accelerated by GPUs - [DOI: 10.1016/j.jcp.2018.08.038](https://doi.org/10.1016/j.jcp.2018.08.038). -""" -function initial_condition_parabolic_bowl(x, t, equations::ShallowWaterEquations1D) - a = 1 - h_0 = 0.1 - sigma = 0.5 - ω = sqrt(2 * equations.gravity * h_0) / a - - v = -sigma * ω * sin(ω * t) - - b = h_0 * x[1]^2 / a^2 - - H = sigma * h_0 / a^2 * (2 * x[1] * cos(ω * t) - sigma) + h_0 - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v, b), equations) -end - -initial_condition = initial_condition_parabolic_bowl - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(5) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Create the TreeMesh for the domain [-2, 2] - -coordinates_min = -2.0 -coordinates_max = 2.0 - -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 6, - n_cells_max = 10_000) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false, - extra_analysis_integrals = (energy_kinetic, - energy_internal)) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 1000, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -############################################################################### -# run the simulation - -sol = solve(ode, SSPRK43(stage_limiter!); - ode_default_options()..., callback = callbacks); - -summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_convergence.jl b/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_convergence.jl deleted file mode 100644 index e6a0184985..0000000000 --- a/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_convergence.jl +++ /dev/null @@ -1,60 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations - -equations = ShallowWaterTwoLayerEquations1D(gravity_constant = 10.0, rho_upper = 0.9, - rho_lower = 1.0) - -initial_condition = initial_condition_convergence_test - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# Get the TreeMesh and setup a periodic mesh - -coordinates_min = 0.0 -coordinates_max = sqrt(2.0) -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 10_000, - periodicity = true) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - source_terms = source_terms_convergence_test) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 1.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 500 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 500, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -############################################################################### -# run the simulation - -# use a Runge-Kutta method with automatic (error based) time step size control -sol = solve(ode, RDPK3SpFSAL49(), abstol = 1.0e-8, reltol = 1.0e-8, - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_dam_break.jl b/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_dam_break.jl deleted file mode 100644 index 03b93754d0..0000000000 --- a/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_dam_break.jl +++ /dev/null @@ -1,94 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations for a dam break -# test with a discontinuous bottom topography function to test entropy conservation - -equations = ShallowWaterTwoLayerEquations1D(gravity_constant = 9.81, H0 = 2.0, - rho_upper = 0.9, rho_lower = 1.0) - -# Initial condition of a dam break with a discontinuous water heights and bottom topography. -# Works as intended for TreeMesh1D with `initial_refinement_level=5`. If the mesh -# refinement level is changed the initial condition below may need changed as well to -# ensure that the discontinuities lie on an element interface. -function initial_condition_dam_break(x, t, equations::ShallowWaterTwoLayerEquations1D) - v1_upper = 0.0 - v1_lower = 0.0 - - # Set the discontinuity - if x[1] <= 10.0 - H_lower = 2.0 - H_upper = 4.0 - b = 0.0 - else - H_lower = 1.5 - H_upper = 3.0 - b = 0.5 - end - - return prim2cons(SVector(H_upper, v1_upper, H_lower, v1_lower, b), equations) -end - -initial_condition = initial_condition_dam_break - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# Get the TreeMesh and setup a non-periodic mesh - -coordinates_min = 0.0 -coordinates_max = 20.0 -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 5, - n_cells_max = 10000, - periodicity = false) - -boundary_condition = boundary_condition_slip_wall - -# create the semidiscretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - boundary_conditions = boundary_condition) - -############################################################################### -# ODE solvers - -tspan = (0.0, 0.4) -ode = semidiscretize(semi, tspan) - -############################################################################### -# Callbacks - -summary_callback = SummaryCallback() - -analysis_interval = 500 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false, - extra_analysis_integrals = (energy_total, - energy_kinetic, - energy_internal)) - -stepsize_callback = StepsizeCallback(cfl = 1.0) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 500, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -############################################################################### -# run the simulation - -# use a Runge-Kutta method with automatic (error based) time step size control -sol = solve(ode, RDPK3SpFSAL49(), abstol = 1.0e-8, reltol = 1.0e-8, - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl b/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl deleted file mode 100644 index 098e3aaf60..0000000000 --- a/examples/tree_1d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl +++ /dev/null @@ -1,86 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations to test well-balancedness - -equations = ShallowWaterTwoLayerEquations1D(gravity_constant = 1.0, H0 = 0.6, - rho_upper = 0.9, rho_lower = 1.0) - -""" - initial_condition_fjordholm_well_balanced(x, t, equations::ShallowWaterTwoLayerEquations1D) - -Initial condition to test well balanced with a bottom topography from Fjordholm -- Ulrik Skre Fjordholm (2012) - Energy conservative and stable schemes for the two-layer shallow water equations. - [DOI: 10.1142/9789814417099_0039](https://doi.org/10.1142/9789814417099_0039) -""" -function initial_condition_fjordholm_well_balanced(x, t, - equations::ShallowWaterTwoLayerEquations1D) - inicenter = 0.5 - x_norm = x[1] - inicenter - r = abs(x_norm) - - H_lower = 0.5 - H_upper = 0.6 - v1_upper = 0.0 - v1_lower = 0.0 - b = r <= 0.1 ? 0.2 * (cos(10 * pi * (x[1] - 0.5)) + 1) : 0.0 - return prim2cons(SVector(H_upper, v1_upper, H_lower, v1_lower, b), equations) -end - -initial_condition = initial_condition_fjordholm_well_balanced - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_es_ersing_etal, flux_nonconservative_ersing_etal), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# Get the TreeMesh and setup a periodic mesh - -coordinates_min = 0.0 -coordinates_max = 1.0 -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 4, - n_cells_max = 10_000, - periodicity = true) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false, - extra_analysis_integrals = (lake_at_rest_error,)) - -stepsize_callback = StepsizeCallback(cfl = 1.0) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 1000, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl b/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl deleted file mode 100644 index 26a8960ab4..0000000000 --- a/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl +++ /dev/null @@ -1,172 +0,0 @@ - -using OrdinaryDiffEq -using Trixi -using Printf: @printf, @sprintf - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations1D(gravity_constant = 9.812) - -""" - initial_condition_complex_bottom_well_balanced(x, t, equations:: ShallowWaterEquations1D) - -Initial condition with a complex (discontinuous) bottom topography to test the well-balanced -property for the [`hydrostatic_reconstruction_chen_noelle`](@ref) including dry areas within the -domain. The errors from the analysis callback are not important but the error for this -lake-at-rest test case `∑|H0-(h+b)|` should be around machine roundoff. - -The initial condition is taken from Section 5.2 of the paper: -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -function initial_condition_complex_bottom_well_balanced(x, t, - equations::ShallowWaterEquations1D) - v = 0.0 - b = sin(4 * pi * x[1]) + 3 - - if x[1] >= 0.5 - b = sin(4 * pi * x[1]) + 1 - end - - H = max(b, 2.5) - - if x[1] >= 0.5 - H = max(b, 1.5) - end - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v, b), equations) -end - -initial_condition = initial_condition_complex_bottom_well_balanced - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(3) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Create the TreeMesh for the domain [0, 1] - -coordinates_min = 0.0 -coordinates_max = 1.0 - -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 6, - n_cells_max = 10_000) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 25.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 5000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 5000, - save_initial_solution = true, - save_final_solution = true) - -stepsize_callback = StepsizeCallback(cfl = 1.5) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -############################################################################### -# run the simulation - -sol = solve(ode, SSPRK43(stage_limiter!); dt = 1.0, - ode_default_options()..., callback = callbacks, adaptive = false); - -summary_callback() # print the timer summary - -############################################################################### -# Workaround to compute the well-balancedness error for this particular problem -# that has two reference water heights. One for a lake to the left of the -# discontinuous bottom topography `H0_upper = 2.5` and another for a lake to the -# right of the discontinuous bottom topography `H0_lower = 1.5`. - -# Declare a special version of the function to compute the lake-at-rest error -# OBS! The reference water height values are hardcoded for convenience. -function lake_at_rest_error_two_level(u, x, equations::ShallowWaterEquations1D) - h, _, b = u - - # For well-balancedness testing with possible wet/dry regions the reference - # water height `H0` accounts for the possibility that the bottom topography - # can emerge out of the water as well as for the threshold offset to avoid - # division by a "hard" zero water heights as well. - if x[1] < 0.5 - H0_wet_dry = max(2.5, b + equations.threshold_limiter) - else - H0_wet_dry = max(1.5, b + equations.threshold_limiter) - end - - return abs(H0_wet_dry - (h + b)) -end - -# point to the data we want to analyze -u = Trixi.wrap_array(sol[end], semi) -# Perform the actual integration of the well-balancedness error over the domain -l1_well_balance_error = Trixi.integrate_via_indices(u, mesh, equations, semi.solver, - semi.cache; - normalize = true) do u, i, element, - equations, solver - x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, solver, - i, element) - # We know that the discontinuity is a vertical line. Slightly augment the x value by a factor - # of unit roundoff to avoid the repeted value from the LGL nodes at at interface. - if i == 1 - x_node = SVector(nextfloat(x_node[1])) - elseif i == nnodes(semi.solver) - x_node = SVector(prevfloat(x_node[1])) - end - u_local = Trixi.get_node_vars(u, equations, solver, i, element) - return lake_at_rest_error_two_level(u_local, x_node, equations) -end - -# report the well-balancedness lake-at-rest error to the screen -println("─"^100) -println(" Lake-at-rest error for '", Trixi.get_name(equations), "' with ", summary(solver), - " at final time " * @sprintf("%10.8e", tspan[end])) - -@printf(" %-12s:", Trixi.pretty_form_utf(lake_at_rest_error)) -@printf(" % 10.8e", l1_well_balance_error) -println() -println("─"^100) diff --git a/examples/tree_2d_dgsem/elixir_shallowwater_conical_island.jl b/examples/tree_2d_dgsem/elixir_shallowwater_conical_island.jl deleted file mode 100644 index 349b374186..0000000000 --- a/examples/tree_2d_dgsem/elixir_shallowwater_conical_island.jl +++ /dev/null @@ -1,117 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations2D(gravity_constant = 9.81, H0 = 1.4) - -""" - initial_condition_conical_island(x, t, equations::ShallowWaterEquations2D) - -Initial condition for the [`ShallowWaterEquations2D`](@ref) to test the [`hydrostatic_reconstruction_chen_noelle`](@ref) -and its handling of discontinuous water heights at the start in combination with wetting and -drying. The bottom topography is given by a conical island in the middle of the domain. Around that -island, there is a cylindrical water column at t=0 and the rest of the domain is dry. This -discontinuous water height is smoothed by a logistic function. This simulation uses a Dirichlet -boundary condition with the initial values. Due to the dry cells at the boundary, this has the -effect of an outflow which can be seen in the simulation. -""" -function initial_condition_conical_island(x, t, equations::ShallowWaterEquations2D) - # Set the background values - - v1 = 0.0 - v2 = 0.0 - - x1, x2 = x - b = max(0.1, 1.0 - 4.0 * sqrt(x1^2 + x2^2)) - - # use a logistic function to transfer water height value smoothly - L = equations.H0 # maximum of function - x0 = 0.3 # center point of function - k = -25.0 # sharpness of transfer - - H = max(b, L / (1.0 + exp(-k * (sqrt(x1^2 + x2^2) - x0)))) - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v1, v2, b), equations) -end - -initial_condition = initial_condition_conical_island -boundary_conditions = BoundaryConditionDirichlet(initial_condition) - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(4) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Get the TreeMesh and setup a mesh - -coordinates_min = (-1.0, -1.0) -coordinates_max = (1.0, 1.0) -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 4, - n_cells_max = 10_000, - periodicity = false) - -# Create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - boundary_conditions = boundary_conditions) - -############################################################################### -# ODE solver - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -############################################################################### -# Callbacks - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -############################################################################### -# run the simulation - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -sol = solve(ode, SSPRK43(stage_limiter!); - ode_default_options()..., callback = callbacks); - -summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl b/examples/tree_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl deleted file mode 100644 index 2008019cc3..0000000000 --- a/examples/tree_2d_dgsem/elixir_shallowwater_parabolic_bowl.jl +++ /dev/null @@ -1,121 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations2D(gravity_constant = 9.81) - -""" - initial_condition_parabolic_bowl(x, t, equations:: ShallowWaterEquations2D) - -Well-known initial condition to test the [`hydrostatic_reconstruction_chen_noelle`](@ref) and its -wet-dry mechanics. This test has an analytical solution. The initial condition is defined by the -analytical solution at time t=0. The bottom topography defines a bowl and the water level is given -by an oscillating lake. - -The original test and its analytical solution were first presented in -- William C. Thacker (1981) - Some exact solutions to the nonlinear shallow-water wave equations - [DOI: 10.1017/S0022112081001882](https://doi.org/10.1017/S0022112081001882). - -The particular setup below is taken from Section 6.2 of -- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and Timothy Warburton (2018) - An entropy stable discontinuous Galerkin method for the shallow water equations on - curvilinear meshes with wet/dry fronts accelerated by GPUs - [DOI: 10.1016/j.jcp.2018.08.038](https://doi.org/10.1016/j.jcp.2018.08.038). -""" -function initial_condition_parabolic_bowl(x, t, equations::ShallowWaterEquations2D) - a = 1.0 - h_0 = 0.1 - sigma = 0.5 - ω = sqrt(2 * equations.gravity * h_0) / a - - v1 = -sigma * ω * sin(ω * t) - v2 = sigma * ω * cos(ω * t) - - b = h_0 * ((x[1])^2 + (x[2])^2) / a^2 - - H = sigma * h_0 / a^2 * (2 * x[1] * cos(ω * t) + 2 * x[2] * sin(ω * t) - sigma) + h_0 - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v1, v2, b), equations) -end - -initial_condition = initial_condition_parabolic_bowl -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(7) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.6, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Create the TreeMesh for the domain [-2, 2]^2 - -coordinates_min = (-2.0, -2.0) -coordinates_max = (2.0, 2.0) - -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 5, - n_cells_max = 10_000) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 1.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false, - extra_analysis_integrals = (energy_kinetic, - energy_internal)) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -############################################################################### -# run the simulation - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -sol = solve(ode, SSPRK43(stage_limiter!); - ode_default_options()..., callback = callbacks); - -summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl b/examples/tree_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl deleted file mode 100644 index 790916e446..0000000000 --- a/examples/tree_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl +++ /dev/null @@ -1,60 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations - -equations = ShallowWaterTwoLayerEquations2D(gravity_constant = 10.0, rho_upper = 0.9, - rho_lower = 1.0) - -initial_condition = initial_condition_convergence_test - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 3, - surface_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal), - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# Get the TreeMesh and setup a periodic mesh - -coordinates_min = (0.0, 0.0) -coordinates_max = (sqrt(2.0), sqrt(2.0)) -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 20_000, - periodicity = true) - -# Create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - source_terms = source_terms_convergence_test) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 1.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 500 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 500, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -############################################################################### -# run the simulation - -# use a Runge-Kutta method with automatic (error based) time step size control -sol = solve(ode, RDPK3SpFSAL49(), abstol = 1.0e-8, reltol = 1.0e-8, - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl b/examples/tree_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl deleted file mode 100644 index 264c26390f..0000000000 --- a/examples/tree_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl +++ /dev/null @@ -1,81 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations with a bottom topography function -# to test well-balancedness - -equations = ShallowWaterTwoLayerEquations2D(gravity_constant = 9.81, H0 = 0.6, - rho_upper = 0.9, rho_lower = 1.0) - -# An initial condition with constant total water height, zero velocities and a bottom topography to -# test well-balancedness -function initial_condition_well_balanced(x, t, equations::ShallowWaterTwoLayerEquations2D) - H_lower = 0.5 - H_upper = 0.6 - v1_upper = 0.0 - v2_upper = 0.0 - v1_lower = 0.0 - v2_lower = 0.0 - b = (((x[1] - 0.5)^2 + (x[2] - 0.5)^2) < 0.04 ? - 0.2 * (cos(4 * pi * sqrt((x[1] - 0.5)^2 + (x[2] + - -0.5)^2)) + 1) : 0.0) - - return prim2cons(SVector(H_upper, v1_upper, v2_upper, H_lower, v1_lower, v2_lower, b), - equations) -end - -initial_condition = initial_condition_well_balanced - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -surface_flux = (flux_es_ersing_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 3, surface_flux = surface_flux, - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# Get the TreeMesh and setup a periodic mesh - -coordinates_min = (0.0, 0.0) -coordinates_max = (1.0, 1.0) -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 10_000, - periodicity = true) - -# Create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solver - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - extra_analysis_integrals = (lake_at_rest_error,)) - -stepsize_callback = StepsizeCallback(cfl = 1.0) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 1000, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/tree_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl b/examples/tree_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl deleted file mode 100644 index 034411c2b5..0000000000 --- a/examples/tree_2d_dgsem/elixir_shallowwater_well_balanced_wet_dry.jl +++ /dev/null @@ -1,206 +0,0 @@ - -using OrdinaryDiffEq -using Trixi -using Printf: @printf, @sprintf - -############################################################################### -# Semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations2D(gravity_constant = 9.812) - -""" - initial_condition_well_balanced_chen_noelle(x, t, equations:: ShallowWaterEquations2D) - -Initial condition with a complex (discontinuous) bottom topography to test the well-balanced -property for the [`hydrostatic_reconstruction_chen_noelle`](@ref) including dry areas within the -domain. The errors from the analysis callback are not important but the error for this -lake-at-rest test case `∑|H0-(h+b)|` should be around machine roundoff. - -The initial condition is taken from Section 5.2 of the paper: -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -function initial_condition_complex_bottom_well_balanced(x, t, - equations::ShallowWaterEquations2D) - v1 = 0 - v2 = 0 - b = sin(4 * pi * x[1]) + 3 - - if x[1] >= 0.5 - b = sin(4 * pi * x[1]) + 1 - end - - H = max(b, 2.5) - if x[1] >= 0.5 - H = max(b, 1.5) - end - - # It is mandatory to shift the water level at dry areas to make sure the water height h - # stays positive. The system would not be stable for h set to a hard 0 due to division by h in - # the computation of velocity, e.g., (h v1) / h. Therefore, a small dry state threshold - # with a default value of 500*eps() ≈ 1e-13 in double precision, is set in the constructor above - # for the ShallowWaterEquations and added to the initial condition if h = 0. - # This default value can be changed within the constructor call depending on the simulation setup. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v1, v2, b), equations) -end - -initial_condition = initial_condition_complex_bottom_well_balanced - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(3) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Create the TreeMesh for the domain [0, 1]^2 - -coordinates_min = (0.0, 0.0) -coordinates_max = (1.0, 1.0) - -mesh = TreeMesh(coordinates_min, coordinates_max, - initial_refinement_level = 3, - n_cells_max = 10_000) - -# create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 50.0) -ode = semidiscretize(semi, tspan) - -############################################################################### -# Workaround to set a discontinuous water and bottom topography for -# debugging and testing. Essentially, this is a slight augmentation of the -# `compute_coefficients` where the `x` node value passed here is slightly -# perturbed to the left / right in order to set a true discontinuity that avoids -# the doubled value of the LGL nodes at a particular element interface. -# -# Note! The errors from the analysis callback are not important but the error -# for this lake at rest test case `∑|H0-(h+b)|` should be near machine roundoff. - -# point to the data we want to augment -u = Trixi.wrap_array(ode.u0, semi) -# reset the initial condition -for element in eachelement(semi.solver, semi.cache) - for j in eachnode(semi.solver), i in eachnode(semi.solver) - x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, - semi.solver, i, j, element) - # We know that the discontinuity is a vertical line. Slightly augment the x value by a factor - # of unit roundoff to avoid the repeted value from the LGL nodes at at interface. - if i == 1 - x_node = SVector(nextfloat(x_node[1]), x_node[2]) - elseif i == nnodes(semi.solver) - x_node = SVector(prevfloat(x_node[1]), x_node[2]) - end - u_node = initial_condition_complex_bottom_well_balanced(x_node, first(tspan), - equations) - Trixi.set_node_vars!(u, u_node, equations, semi.solver, i, j, element) - end -end - -############################################################################### -# Callbacks - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 1000, - save_initial_solution = true, - save_final_solution = true) - -stepsize_callback = StepsizeCallback(cfl = 2.0) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -############################################################################### -# run the simulation - -sol = solve(ode, SSPRK43(stage_limiter!); dt = 1.0, - ode_default_options()..., callback = callbacks, adaptive = false); - -summary_callback() # print the timer summary - -############################################################################### -# Workaround to compute the well-balancedness error for this particular problem -# that has two reference water heights. One for a lake to the left of the -# discontinuous bottom topography `H0_upper = 2.5` and another for a lake to the -# right of the discontinuous bottom topography `H0_lower = 1.5`. - -# Declare a special version of the function to compute the lake-at-rest error -# OBS! The reference water height values are hardcoded for convenience. -function lake_at_rest_error_two_level(u, x, equations::ShallowWaterEquations2D) - h, _, _, b = u - - # For well-balancedness testing with possible wet/dry regions the reference - # water height `H0` accounts for the possibility that the bottom topography - # can emerge out of the water as well as for the threshold offset to avoid - # division by a "hard" zero water heights as well. - - if x[1] < 0.5 - H0_wet_dry = max(2.5, b + equations.threshold_limiter) - else - H0_wet_dry = max(1.5, b + equations.threshold_limiter) - end - - return abs(H0_wet_dry - (h + b)) -end - -# point to the data we want to analyze -u = Trixi.wrap_array(sol[end], semi) -# Perform the actual integration of the well-balancedness error over the domain -l1_well_balance_error = Trixi.integrate_via_indices(u, mesh, equations, semi.solver, - semi.cache; - normalize = true) do u, i, j, element, - equations, solver - x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, solver, - i, j, element) - # We know that the discontinuity is a vertical line. Slightly augment the x value by a factor - # of unit roundoff to avoid the repeted value from the LGL nodes at at interface. - if i == 1 - x_node = SVector(nextfloat(x_node[1]), x_node[2]) - elseif i == nnodes(semi.solver) - x_node = SVector(prevfloat(x_node[1]), x_node[2]) - end - u_local = Trixi.get_node_vars(u, equations, solver, i, j, element) - return lake_at_rest_error_two_level(u_local, x_node, equations) -end - -# report the well-balancedness lake-at-rest error to the screen -println("─"^100) -println(" Lake-at-rest error for '", Trixi.get_name(equations), "' with ", summary(solver), - " at final time " * @sprintf("%10.8e", tspan[end])) - -@printf(" %-12s:", Trixi.pretty_form_utf(lake_at_rest_error)) -@printf(" % 10.8e", l1_well_balance_error) -println() -println("─"^100) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl deleted file mode 100644 index df321aad26..0000000000 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_three_mound_dam_break.jl +++ /dev/null @@ -1,133 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# semidiscretization of the shallow water equations -# -# TODO: TrixiShallowWater: wet/dry example elixir - -equations = ShallowWaterEquations2D(gravity_constant = 9.81, H0 = 1.875, - threshold_limiter = 1e-12, threshold_wet = 1e-14) - -""" - initial_condition_three_mounds(x, t, equations::ShallowWaterEquations2D) - -Initial condition simulating a dam break. The bottom topography is given by one large and two smaller -mounds. The mounds are flooded by the water for t > 0. To smooth the discontinuity, a logistic function -is applied. - -The initial conditions is taken from Section 6.3 of the paper: -- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and Timothy Warburton (2018) - An entropy stable discontinuous Galerkin method for the shallow water equations on - curvilinear meshes with wet/dry fronts accelerated by GPUs\n - [DOI: 10.1016/j.jcp.2018.08.038](https://doi.org/10.1016/j.jcp.2018.08.038) -""" -function initial_condition_three_mounds(x, t, equations::ShallowWaterEquations2D) - - # Set the background values - v1 = 0.0 - v2 = 0.0 - - x1, x2 = x - M_1 = 1 - 0.1 * sqrt((x1 - 30.0)^2 + (x2 - 22.5)^2) - M_2 = 1 - 0.1 * sqrt((x1 - 30.0)^2 + (x2 - 7.5)^2) - M_3 = 2.8 - 0.28 * sqrt((x1 - 47.5)^2 + (x2 - 15.0)^2) - - b = max(0.0, M_1, M_2, M_3) - - # use a logistic function to transfer water height value smoothly - L = equations.H0 # maximum of function - x0 = 8 # center point of function - k = -75.0 # sharpness of transfer - - H = max(b, L / (1.0 + exp(-k * (x1 - x0)))) - - # Avoid division by zero by adjusting the initial condition with a small dry state threshold - # that defaults to 500*eps() ≈ 1e-13 in double precision and is set in the constructor above - # for the ShallowWaterEquations struct. - H = max(H, b + equations.threshold_limiter) - return prim2cons(SVector(H, v1, v2, b), equations) -end - -initial_condition = initial_condition_three_mounds - -function boundary_condition_outflow(u_inner, normal_direction::AbstractVector, x, t, - surface_flux_function, - equations::ShallowWaterEquations2D) - # Impulse and bottom from inside, height from external state - u_outer = SVector(equations.threshold_wet, u_inner[2], u_inner[3], u_inner[4]) - - # calculate the boundary flux - flux = surface_flux_function(u_inner, u_outer, normal_direction, equations) - - return flux -end - -boundary_conditions = Dict(:Bottom => boundary_condition_slip_wall, - :Top => boundary_condition_slip_wall, - :Right => boundary_condition_outflow, - :Left => boundary_condition_slip_wall) - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -surface_flux = (FluxHydrostaticReconstruction(flux_hll_chen_noelle, - hydrostatic_reconstruction_chen_noelle), - flux_nonconservative_chen_noelle) - -basis = LobattoLegendreBasis(4) - -indicator_sc = IndicatorHennemannGassnerShallowWater(equations, basis, - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable = waterheight_pressure) -volume_integral = VolumeIntegralShockCapturingHG(indicator_sc; - volume_flux_dg = volume_flux, - volume_flux_fv = surface_flux) - -solver = DGSEM(basis, surface_flux, volume_integral) - -############################################################################### -# Get the unstructured quad mesh from a file (downloads the file if not available locally) -mesh_file = Trixi.download("https://gist.githubusercontent.com/svengoldberg/c3c87fecb3fc6e46be7f0d1c7cb35f83/raw/e817ecd9e6c4686581d63c46128f9b6468d396d3/mesh_three_mound.mesh", - joinpath(@__DIR__, "mesh_three_mound.mesh")) - -mesh = UnstructuredMesh2D(mesh_file) - -# Create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver; - boundary_conditions = boundary_conditions) - -############################################################################### -# ODE solver - -tspan = (0.0, 20.0) -ode = semidiscretize(semi, tspan) - -############################################################################### -# Callbacks - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 100, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution) - -############################################################################### -# run the simulation - -stage_limiter! = PositivityPreservingLimiterShallowWater(variables = (Trixi.waterheight,)) - -sol = solve(ode, SSPRK43(stage_limiter!); - ode_default_options()..., callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl deleted file mode 100644 index fcc08b6f99..0000000000 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_convergence.jl +++ /dev/null @@ -1,63 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations with a periodic -# bottom topography function (set in the initial conditions) - -equations = ShallowWaterTwoLayerEquations2D(gravity_constant = 10.0, rho_upper = 0.9, - rho_lower = 1.0) - -initial_condition = initial_condition_convergence_test - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -surface_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 6, surface_flux = surface_flux, - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# This setup is for the curved, split form convergence test on a periodic domain - -# Get the unstructured quad mesh from a file (downloads the file if not available locally) -mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) - -mesh = UnstructuredMesh2D(mesh_file, periodicity = true) - -# Create the semidiscretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, - source_terms = source_terms_convergence_test) - -############################################################################### -# ODE solvers, callbacks etc. - -tspan = (0.0, 1.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 500 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 500, - save_initial_solution = true, - save_final_solution = true) - -stepsize_callback = StepsizeCallback(cfl = 1.0) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl deleted file mode 100644 index 821f31c52a..0000000000 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_dam_break.jl +++ /dev/null @@ -1,147 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations for a dam break test with a -# discontinuous bottom topography function to test energy conservation - -equations = ShallowWaterTwoLayerEquations2D(gravity_constant = 1.0, rho_upper = 0.9, - rho_lower = 1.0) - -# This test case uses a special work around to setup a truly discontinuous bottom topography -# function and initial condition for this academic testcase of entropy conservation. First, a -# dummy initial_condition_dam_break is introduced to create the semidiscretization. Then the initial -# condition is reset with the true discontinuous values from initial_condition_discontinuous_dam_break. - -function initial_condition_dam_break(x, t, equations::ShallowWaterTwoLayerEquations2D) - if x[1] < sqrt(2) / 2 - H_upper = 1.0 - H_lower = 0.6 - b = 0.1 - else - H_upper = 0.9 - H_lower = 0.5 - b = 0.0 - end - - v1_upper = 0.0 - v2_upper = 0.0 - v1_lower = 0.0 - v2_lower = 0.0 - return prim2cons(SVector(H_upper, v1_upper, v2_upper, H_lower, v1_lower, v2_lower, b), - equations) -end - -initial_condition = initial_condition_dam_break - -boundary_condition_constant = BoundaryConditionDirichlet(initial_condition_dam_break) - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -surface_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 6, surface_flux = surface_flux, - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# Get the unstructured quad mesh from a file (downloads the file if not available locally) -mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) - -mesh = UnstructuredMesh2D(mesh_file, periodicity = false) - -# Boundary conditions -boundary_condition = Dict(:Top => boundary_condition_slip_wall, - :Left => boundary_condition_slip_wall, - :Right => boundary_condition_slip_wall, - :Bottom => boundary_condition_slip_wall) - -# Create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, - solver, boundary_conditions = boundary_condition) - -############################################################################### -# ODE solver - -tspan = (0.0, 0.5) -ode = semidiscretize(semi, tspan) - -############################################################################### -# Workaround to set a discontinuous bottom topography and initial condition for debugging and testing. - -# alternative version of the initial conditinon used to setup a truly discontinuous -# test case and initial condition. -# In contrast to the usual signature of initial conditions, this one get passed the -# `element_id` explicitly. In particular, this initial conditions works as intended -# only for the specific mesh loaded above! - -function initial_condition_discontinuous_dam_break(x, t, element_id, - equations::ShallowWaterTwoLayerEquations2D) - # Constant values - v1_upper = 0.0 - v2_upper = 0.0 - v1_lower = 0.0 - v2_lower = 0.0 - - # Left side of discontinuity - IDs = [1, 2, 5, 6, 9, 10, 13, 14] - if element_id in IDs - H_upper = 1.0 - H_lower = 0.6 - b = 0.0 - # Right side of discontinuity - else - H_upper = 0.9 - H_lower = 0.5 - b = 0.1 - end - - return prim2cons(SVector(H_upper, v1_upper, v2_upper, H_lower, v1_lower, v2_lower, b), - equations) -end - -# point to the data we want to augment -u = Trixi.wrap_array(ode.u0, semi) -# reset the initial condition -for element in eachelement(semi.solver, semi.cache) - for j in eachnode(semi.solver), i in eachnode(semi.solver) - x_node = Trixi.get_node_coords(semi.cache.elements.node_coordinates, equations, - semi.solver, i, j, element) - u_node = initial_condition_discontinuous_dam_break(x_node, first(tspan), element, - equations) - Trixi.set_node_vars!(u, u_node, equations, semi.solver, i, j, element) - end -end - -############################################################################### -# Callbacks - -summary_callback = SummaryCallback() - -analysis_interval = 500 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - save_analysis = false, - extra_analysis_integrals = (energy_total, - energy_kinetic, - energy_internal)) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 500, - save_initial_solution = true, - save_final_solution = true) - -stepsize_callback = StepsizeCallback(cfl = 1.0) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl deleted file mode 100644 index ca1f54595b..0000000000 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_twolayer_well_balanced.jl +++ /dev/null @@ -1,81 +0,0 @@ - -using OrdinaryDiffEq -using Trixi - -############################################################################### -# Semidiscretization of the two-layer shallow water equations with a discontinuous bottom -# topography to test well-balancedness - -equations = ShallowWaterTwoLayerEquations2D(gravity_constant = 1.0, H0 = 0.6, - rho_upper = 0.9, rho_lower = 1.0) - -# An initial condition with constant total water height, zero velocities and a bottom topography to -# test well-balancedness -function initial_condition_well_balanced(x, t, equations::ShallowWaterTwoLayerEquations2D) - H_lower = 0.5 - H_upper = 0.6 - v1_upper = 0.0 - v2_upper = 0.0 - v1_lower = 0.0 - v2_lower = 0.0 - - # Bottom Topography - b = (((x[1] - 0.5)^2 + (x[2] - 0.5)^2) < 0.04 ? - 0.2 * (cos(4 * pi * sqrt((x[1] - 0.5)^2 + (x[2] + - -0.5)^2)) + 1) : 0.0) - return prim2cons(SVector(H_upper, v1_upper, v2_upper, H_lower, v1_lower, v2_lower, b), - equations) -end - -initial_condition = initial_condition_well_balanced - -############################################################################### -# Get the DG approximation space - -volume_flux = (flux_wintermeyer_etal, flux_nonconservative_ersing_etal) -surface_flux = (flux_es_ersing_etal, flux_nonconservative_ersing_etal) -solver = DGSEM(polydeg = 6, surface_flux = surface_flux, - volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) - -############################################################################### -# This setup is for the curved, split form well-balancedness testing - -# Get the unstructured quad mesh from a file (downloads the file if not available locally) -mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/8f8cd23df27fcd494553f2a89f3c1ba4/raw/85e3c8d976bbe57ca3d559d653087b0889535295/mesh_alfven_wave_with_twist_and_flip.mesh", - joinpath(@__DIR__, "mesh_alfven_wave_with_twist_and_flip.mesh")) - -mesh = UnstructuredMesh2D(mesh_file, periodicity = true) - -# Create the semi discretization object -semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver) - -############################################################################### -# ODE solver - -tspan = (0.0, 10.0) -ode = semidiscretize(semi, tspan) - -summary_callback = SummaryCallback() - -analysis_interval = 1000 -analysis_callback = AnalysisCallback(semi, interval = analysis_interval, - extra_analysis_integrals = (lake_at_rest_error,)) - -stepsize_callback = StepsizeCallback(cfl = 1.0) - -alive_callback = AliveCallback(analysis_interval = analysis_interval) - -save_solution = SaveSolutionCallback(interval = 1000, - save_initial_solution = true, - save_final_solution = true) - -callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, - stepsize_callback) - -############################################################################### -# run the simulation - -sol = solve(ode, CarpenterKennedy2N54(williamson_condition = false), - dt = 1.0, # solve needs some value here but it will be overwritten by the stepsize_callback - save_everystep = false, callback = callbacks); -summary_callback() # print the timer summary diff --git a/src/Trixi.jl b/src/Trixi.jl index b7f7767a9d..5f8cd9cae8 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -160,7 +160,6 @@ export AcousticPerturbationEquations2D, InviscidBurgersEquation1D, LatticeBoltzmannEquations2D, LatticeBoltzmannEquations3D, ShallowWaterEquations1D, ShallowWaterEquations2D, - ShallowWaterTwoLayerEquations1D, ShallowWaterTwoLayerEquations2D, ShallowWaterEquationsQuasi1D, LinearizedEulerEquations2D, PolytropicEulerEquations2D, @@ -179,16 +178,12 @@ export flux, flux_central, flux_lax_friedrichs, flux_hll, flux_hllc, flux_hlle, flux_kennedy_gruber, flux_shima_etal, flux_ec, flux_fjordholm_etal, flux_nonconservative_fjordholm_etal, flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal, - flux_es_ersing_etal, flux_nonconservative_ersing_etal, + flux_nonconservative_ersing_etal, flux_chan_etal, flux_nonconservative_chan_etal, flux_winters_etal, hydrostatic_reconstruction_audusse_etal, flux_nonconservative_audusse_etal, -# TODO: TrixiShallowWater: move anything with "chen_noelle" to new file - hydrostatic_reconstruction_chen_noelle, flux_nonconservative_chen_noelle, - flux_hll_chen_noelle, FluxPlusDissipation, DissipationGlobalLaxFriedrichs, DissipationLocalLaxFriedrichs, FluxLaxFriedrichs, max_abs_speed_naive, FluxHLL, min_max_speed_naive, min_max_speed_davis, min_max_speed_einfeldt, - min_max_speed_chen_noelle, FluxLMARS, FluxRotated, flux_shima_etal_turbo, flux_ranocha_turbo, @@ -239,8 +234,6 @@ export DG, VolumeIntegralFluxDifferencing, VolumeIntegralPureLGLFiniteVolume, VolumeIntegralShockCapturingHG, IndicatorHennemannGassner, -# TODO: TrixiShallowWater: move new indicator - IndicatorHennemannGassnerShallowWater, VolumeIntegralUpwind, SurfaceIntegralWeakForm, SurfaceIntegralStrongForm, SurfaceIntegralUpwind, @@ -276,8 +269,7 @@ export load_mesh, load_time, load_timestep, load_timestep!, load_dt, export ControllerThreeLevel, ControllerThreeLevelCombined, IndicatorLöhner, IndicatorLoehner, IndicatorMax -# TODO: TrixiShallowWater: move new limiter -export PositivityPreservingLimiterZhangShu, PositivityPreservingLimiterShallowWater +export PositivityPreservingLimiterZhangShu export trixi_include, examples_dir, get_examples, default_example, default_example_unstructured, ode_default_options diff --git a/src/callbacks_stage/callbacks_stage.jl b/src/callbacks_stage/callbacks_stage.jl index 70d60de791..d5abc1d227 100644 --- a/src/callbacks_stage/callbacks_stage.jl +++ b/src/callbacks_stage/callbacks_stage.jl @@ -8,6 +8,4 @@ include("positivity_zhang_shu.jl") include("subcell_limiter_idp_correction.jl") include("subcell_bounds_check.jl") -# TODO: TrixiShallowWater: move specific limiter file -include("positivity_shallow_water.jl") end # @muladd diff --git a/src/callbacks_stage/positivity_shallow_water.jl b/src/callbacks_stage/positivity_shallow_water.jl deleted file mode 100644 index 36276026fe..0000000000 --- a/src/callbacks_stage/positivity_shallow_water.jl +++ /dev/null @@ -1,89 +0,0 @@ -# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). -# Since these FMAs can increase the performance of many numerical algorithms, -# we need to opt-in explicitly. -# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. -@muladd begin -#! format: noindent - -# TODO: TrixiShallowWater: generic wet/dry limiter - -""" - PositivityPreservingLimiterShallowWater(; variables) - -The limiter is specifically designed for the shallow water equations. -It is applied to all scalar `variables` in their given order -using the defined `threshold_limiter` from the [`ShallowWaterEquations1D`](@ref) struct -or the [`ShallowWaterEquations2D`](@ref) struct to determine the minimal acceptable values. -The order of the `variables` is important and might have a strong influence -on the robustness. - -As opposed to the standard version of the [`PositivityPreservingLimiterZhangShu`](@ref), -nodes with a water height below the `threshold_limiter` are treated in a special way. -To avoid numerical problems caused by velocities close to zero, -the velocity is cut off, such that the node can be identified as "dry". The special feature of the -`ShallowWaterEquations` used here is that the bottom topography is stored as an additional -quantity in the solution vector `u`. However, the value of the bottom topography -should not be changed. That is why, it is not limited. - -After the limiting process is applied to all degrees of freedom, for safety reasons, -the `threshold_limiter` is applied again on all the DG nodes in order to avoid water height below. -In the case where the cell mean value is below the threshold before applying the limiter, -there could still be dry nodes afterwards due to the logic of the limiter. - -This fully-discrete positivity-preserving limiter is based on the work of -- Zhang, Shu (2011) - Maximum-principle-satisfying and positivity-preserving high-order schemes - for conservation laws: survey and new developments - [doi: 10.1098/rspa.2011.0153](https://doi.org/10.1098/rspa.2011.0153) -""" -struct PositivityPreservingLimiterShallowWater{N, Variables <: NTuple{N, Any}} - variables::Variables -end - -function PositivityPreservingLimiterShallowWater(; variables) - PositivityPreservingLimiterShallowWater(variables) -end - -function (limiter!::PositivityPreservingLimiterShallowWater)(u_ode, integrator, - semi::AbstractSemidiscretization, - t) - u = wrap_array(u_ode, semi) - @trixi_timeit timer() "positivity-preserving limiter" limiter_shallow_water!(u, - limiter!.variables, - mesh_equations_solver_cache(semi)...) -end - -# Iterate over tuples in a type-stable way using "lispy tuple programming", -# similar to https://stackoverflow.com/a/55849398: -# Iterating over tuples of different functions isn't type-stable in general -# but accessing the first element of a tuple is type-stable. Hence, it's good -# to process one element at a time and replace iteration by recursion here. -# Note that you shouldn't use this with too many elements per tuple since the -# compile times can increase otherwise - but a handful of elements per tuple -# is definitely fine. -function limiter_shallow_water!(u, variables::NTuple{N, Any}, - mesh, - equations::Union{ShallowWaterEquations1D, - ShallowWaterEquations2D}, - solver, cache) where {N} - variable = first(variables) - remaining_variables = Base.tail(variables) - - limiter_shallow_water!(u, equations.threshold_limiter, variable, mesh, equations, - solver, cache) - limiter_shallow_water!(u, remaining_variables, mesh, equations, solver, cache) - return nothing -end - -# terminate the type-stable iteration over tuples -function limiter_shallow_water!(u, variables::Tuple{}, - mesh, - equations::Union{ShallowWaterEquations1D, - ShallowWaterEquations2D}, - solver, cache) - nothing -end - -include("positivity_shallow_water_dg1d.jl") -include("positivity_shallow_water_dg2d.jl") -end # @muladd diff --git a/src/callbacks_stage/positivity_shallow_water_dg1d.jl b/src/callbacks_stage/positivity_shallow_water_dg1d.jl deleted file mode 100644 index 13c6866e89..0000000000 --- a/src/callbacks_stage/positivity_shallow_water_dg1d.jl +++ /dev/null @@ -1,89 +0,0 @@ -# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). -# Since these FMAs can increase the performance of many numerical algorithms, -# we need to opt-in explicitly. -# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. -@muladd begin -#! format: noindent - -# TODO: TrixiShallowWater: 1D wet/dry limiter should move - -function limiter_shallow_water!(u, threshold::Real, variable, - mesh::AbstractMesh{1}, - equations::ShallowWaterEquations1D, - dg::DGSEM, cache) - @unpack weights = dg.basis - - @threaded for element in eachelement(dg, cache) - # determine minimum value - value_min = typemax(eltype(u)) - for i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, element) - value_min = min(value_min, variable(u_node, equations)) - end - - # detect if limiting is necessary - value_min < threshold || continue - - # compute mean value - u_mean = zero(get_node_vars(u, equations, dg, 1, element)) - for i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, element) - u_mean += u_node * weights[i] - end - # note that the reference element is [-1,1]^ndims(dg), thus the weights sum to 2 - u_mean = u_mean / 2^ndims(mesh) - - # We compute the value directly with the mean values, as we assume that - # Jensen's inequality holds (e.g. pressure for compressible Euler equations). - value_mean = variable(u_mean, equations) - theta = (value_mean - threshold) / (value_mean - value_min) - for i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, element) - - # Cut off velocity in case that the waterheight is smaller than the threshold - - h_node, h_v_node, b_node = u_node - h_mean, h_v_mean, _ = u_mean # b_mean is not used as b_node must not be overwritten - - # Set them both to zero to apply linear combination correctly - if h_node <= threshold - h_v_node = zero(eltype(u)) - h_v_mean = zero(eltype(u)) - end - - u_node = SVector(h_node, h_v_node, b_node) - u_mean = SVector(h_mean, h_v_mean, b_node) - - # When velocity is cut off, the only averaged value is the waterheight, - # because the velocity is set to zero and this value is passed. - # Otherwise, the velocity is averaged, as well. - # Note that the auxiliary bottom topography variable `b` is never limited. - set_node_vars!(u, theta * u_node + (1 - theta) * u_mean, - equations, dg, i, element) - end - end - - # "Safety" application of the wet/dry thresholds over all the DG nodes - # on the current `element` after the limiting above in order to avoid dry nodes. - # If the value_mean < threshold before applying limiter, there - # could still be dry nodes afterwards due to logic of the limiting - @threaded for element in eachelement(dg, cache) - for i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, element) - - h, hv, b = u_node - - if h <= threshold - h = threshold - hv = zero(eltype(u)) - end - - u_node = SVector(h, hv, b) - - set_node_vars!(u, u_node, equations, dg, i, element) - end - end - - return nothing -end -end # @muladd diff --git a/src/callbacks_stage/positivity_shallow_water_dg2d.jl b/src/callbacks_stage/positivity_shallow_water_dg2d.jl deleted file mode 100644 index da3a25fdcf..0000000000 --- a/src/callbacks_stage/positivity_shallow_water_dg2d.jl +++ /dev/null @@ -1,90 +0,0 @@ -# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). -# Since these FMAs can increase the performance of many numerical algorithms, -# we need to opt-in explicitly. -# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. -@muladd begin -#! format: noindent - -# TODO: TrixiShallowWater: 2D wet/dry limiter should move - -function limiter_shallow_water!(u, threshold::Real, variable, - mesh::AbstractMesh{2}, - equations::ShallowWaterEquations2D, dg::DGSEM, cache) - @unpack weights = dg.basis - - @threaded for element in eachelement(dg, cache) - # determine minimum value - value_min = typemax(eltype(u)) - for j in eachnode(dg), i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, j, element) - value_min = min(value_min, variable(u_node, equations)) - end - - # detect if limiting is necessary - value_min < threshold || continue - - # compute mean value - u_mean = zero(get_node_vars(u, equations, dg, 1, 1, element)) - for j in eachnode(dg), i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, j, element) - u_mean += u_node * weights[i] * weights[j] - end - # note that the reference element is [-1,1]^ndims(dg), thus the weights sum to 2 - u_mean = u_mean / 2^ndims(mesh) - - # We compute the value directly with the mean values, as we assume that - # Jensen's inequality holds (e.g. pressure for compressible Euler equations). - value_mean = variable(u_mean, equations) - theta = (value_mean - threshold) / (value_mean - value_min) - for j in eachnode(dg), i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, j, element) - - # Cut off velocity in case that the water height is smaller than the threshold - - h_node, h_v1_node, h_v2_node, b_node = u_node - h_mean, h_v1_mean, h_v2_mean, _ = u_mean # b_mean is not used as it must not be overwritten - - if h_node <= threshold - h_v1_node = zero(eltype(u)) - h_v2_node = zero(eltype(u)) - h_v1_mean = zero(eltype(u)) - h_v2_mean = zero(eltype(u)) - end - - u_node = SVector(h_node, h_v1_node, h_v2_node, b_node) - u_mean = SVector(h_mean, h_v1_mean, h_v2_mean, b_node) - - # When velocities are cut off, the only averaged value is the water height, - # because the velocities are set to zero and this value is passed. - # Otherwise, the velocities are averaged, as well. - # Note that the auxiliary bottom topography variable `b` is never limited. - set_node_vars!(u, theta * u_node + (1 - theta) * u_mean, - equations, dg, i, j, element) - end - end - - # "Safety" application of the wet/dry thresholds over all the DG nodes - # on the current `element` after the limiting above in order to avoid dry nodes. - # If the value_mean < threshold before applying limiter, there - # could still be dry nodes afterwards due to logic of the limiting - @threaded for element in eachelement(dg, cache) - for j in eachnode(dg), i in eachnode(dg) - u_node = get_node_vars(u, equations, dg, i, j, element) - - h, h_v1, h_v2, b = u_node - - if h <= threshold - h = threshold - h_v1 = zero(eltype(u)) - h_v2 = zero(eltype(u)) - end - - u_node = SVector(h, h_v1, h_v2, b) - - set_node_vars!(u, u_node, equations, dg, i, j, element) - end - end - - return nothing -end -end # @muladd diff --git a/src/equations/equations.jl b/src/equations/equations.jl index 65875a2a7e..8f476cf6f1 100644 --- a/src/equations/equations.jl +++ b/src/equations/equations.jl @@ -405,8 +405,6 @@ abstract type AbstractShallowWaterEquations{NDIMS, NVARS} <: AbstractEquations{NDIMS, NVARS} end include("shallow_water_1d.jl") include("shallow_water_2d.jl") -include("shallow_water_two_layer_1d.jl") -include("shallow_water_two_layer_2d.jl") include("shallow_water_quasi_1d.jl") # CompressibleEulerEquations diff --git a/src/equations/numerical_fluxes.jl b/src/equations/numerical_fluxes.jl index 44d523b6e8..87fcb41224 100644 --- a/src/equations/numerical_fluxes.jl +++ b/src/equations/numerical_fluxes.jl @@ -326,29 +326,6 @@ This is a [`FluxHLL`](@ref)-type two-wave solver with special estimates of the w """ const flux_hlle = FluxHLL(min_max_speed_einfeldt) -# TODO: TrixiShallowWater: move the chen_noelle flux structure to the new package - -# An empty version of the `min_max_speed_chen_noelle` function is declared here -# in order to create a dimension agnostic version of `flux_hll_chen_noelle`. -# The full description of this wave speed estimate can be found in the docstrings -# for `min_max_speed_chen_noelle` in `shallow_water_1d.jl` or `shallow_water_2d.jl`. -function min_max_speed_chen_noelle end - -""" - flux_hll_chen_noelle = FluxHLL(min_max_speed_chen_noelle) - -An instance of [`FluxHLL`](@ref) specific to the shallow water equations that -uses the wave speed estimates from [`min_max_speed_chen_noelle`](@ref). -This HLL flux is guaranteed to have zero numerical mass flux out of a "dry" element, -maintain positivity of the water height, and satisfy an entropy inequality. - -For complete details see Section 2.4 of the following reference -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI: 10.1137/15M1053074](https://doi.org/10.1137/15M1053074) -""" -const flux_hll_chen_noelle = FluxHLL(min_max_speed_chen_noelle) - """ flux_shima_etal_turbo(u_ll, u_rr, orientation_or_normal_direction, equations) diff --git a/src/equations/shallow_water_1d.jl b/src/equations/shallow_water_1d.jl index 25ce0fa79f..e348ef946b 100644 --- a/src/equations/shallow_water_1d.jl +++ b/src/equations/shallow_water_1d.jl @@ -6,7 +6,7 @@ #! format: noindent @doc raw""" - ShallowWaterEquations1D(; gravity, H0 = 0, threshold_limiter = nothing threshold_wet = nothing) + ShallowWaterEquations1D(; gravity, H0 = 0) Shallow water equations (SWE) in one space dimension. The equations are given by ```math @@ -24,12 +24,6 @@ also defines the total water height as ``H = h + b``. The additional quantity ``H_0`` is also available to store a reference value for the total water height that is useful to set initial conditions or test the "lake-at-rest" well-balancedness. -Also, there are two thresholds which prevent numerical problems as well as instabilities. Both of them do not -have to be passed, as default values are defined within the struct. The first one, `threshold_limiter`, is -used in [`PositivityPreservingLimiterShallowWater`](@ref) on the water height, as a (small) shift on the initial -condition and cutoff before the next time step. The second one, `threshold_wet`, is applied on the water height to -define when the flow is "wet" before calculating the numerical flux. - The bottom topography function ``b(x)`` is set inside the initial condition routine for a particular problem setup. To test the conservative form of the SWE one can set the bottom topography variable `b` to zero. @@ -51,35 +45,16 @@ References for the SWE are many but a good introduction is available in Chapter [DOI: 10.1017/CBO9780511791253](https://doi.org/10.1017/CBO9780511791253) """ struct ShallowWaterEquations1D{RealT <: Real} <: AbstractShallowWaterEquations{1, 3} - # TODO: TrixiShallowWater: where should the `threshold_limiter` and `threshold_wet` live? - # how to "properly" export these constants across the two packages? gravity::RealT # gravitational constant H0::RealT # constant "lake-at-rest" total water height - # `threshold_limiter` used in `PositivityPreservingLimiterShallowWater` on water height, - # as a (small) shift on the initial condition and cutoff before the next time step. - # Default is 500*eps() which in double precision is ≈1e-13. - threshold_limiter::RealT - # `threshold_wet` applied on water height to define when the flow is "wet" - # before calculating the numerical flux. - # Default is 5*eps() which in double precision is ≈1e-15. - threshold_wet::RealT end # Allow for flexibility to set the gravitational constant within an elixir depending on the # application where `gravity_constant=1.0` or `gravity_constant=9.81` are common values. # The reference total water height H0 defaults to 0.0 but is used for the "lake-at-rest" # well-balancedness test cases. -# Strict default values for thresholds that performed well in many numerical experiments -function ShallowWaterEquations1D(; gravity_constant, H0 = zero(gravity_constant), - threshold_limiter = nothing, threshold_wet = nothing) - T = promote_type(typeof(gravity_constant), typeof(H0)) - if threshold_limiter === nothing - threshold_limiter = 500 * eps(T) - end - if threshold_wet === nothing - threshold_wet = 5 * eps(T) - end - ShallowWaterEquations1D(gravity_constant, H0, threshold_limiter, threshold_wet) +function ShallowWaterEquations1D(; gravity_constant, H0 = zero(gravity_constant)) + ShallowWaterEquations1D(gravity_constant, H0) end have_nonconservative_terms(::ShallowWaterEquations1D) = True() @@ -332,54 +307,6 @@ Further details on the hydrostatic reconstruction and its motivation can be foun z) end -# TODO: TrixiShallowWater: move wet/dry specific routine -""" - flux_nonconservative_chen_noelle(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterEquations1D) - -Non-symmetric two-point surface flux that discretizes the nonconservative (source) term. -The discretization uses the `hydrostatic_reconstruction_chen_noelle` on the conservative -variables. - -Should be used together with [`FluxHydrostaticReconstruction`](@ref) and -[`hydrostatic_reconstruction_chen_noelle`](@ref) in the surface flux to ensure consistency. - -Further details on the hydrostatic reconstruction and its motivation can be found in -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -@inline function flux_nonconservative_chen_noelle(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterEquations1D) - - # Pull the water height and bottom topography on the left - h_ll, _, b_ll = u_ll - h_rr, _, b_rr = u_rr - - H_ll = h_ll + b_ll - H_rr = h_rr + b_rr - - b_star = min(max(b_ll, b_rr), min(H_ll, H_rr)) - - # Create the hydrostatic reconstruction for the left solution state - u_ll_star, _ = hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, equations) - - # Copy the reconstructed water height for easier to read code - h_ll_star = u_ll_star[1] - - z = zero(eltype(u_ll)) - # Includes two parts: - # (i) Diagonal (consistent) term from the volume flux that uses `b_ll` to avoid - # cross-averaging across a discontinuous bottom topography - # (ii) True surface part that uses `h_ll` and `h_ll_star` to handle discontinuous bathymetry - return SVector(z, - equations.gravity * h_ll * b_ll - - equations.gravity * (h_ll_star + h_ll) * (b_ll - b_star), - z) -end - """ flux_nonconservative_ersing_etal(u_ll, u_rr, orientation::Integer, equations::ShallowWaterEquations1D) @@ -521,67 +448,6 @@ Further details on this hydrostatic reconstruction and its motivation can be fou return u_ll_star, u_rr_star end -# TODO: TrixiShallowWater: move wet/dry specific routine -""" - hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterEquations1D) - -A particular type of hydrostatic reconstruction of the water height to guarantee well-balancedness -for a general bottom topography of the [`ShallowWaterEquations1D`](@ref). The reconstructed solution states -`u_ll_star` and `u_rr_star` variables are used to evaluate the surface numerical flux at the interface. -The key idea is a linear reconstruction of the bottom and water height at the interfaces using subcells. -Use in combination with the generic numerical flux routine [`FluxHydrostaticReconstruction`](@ref). - -Further details on this hydrostatic reconstruction and its motivation can be found in -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -@inline function hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, - equations::ShallowWaterEquations1D) - # Unpack left and right water heights and bottom topographies - h_ll, _, b_ll = u_ll - h_rr, _, b_rr = u_rr - - # Get the velocities on either side - v_ll = velocity(u_ll, equations) - v_rr = velocity(u_rr, equations) - - H_ll = b_ll + h_ll - H_rr = b_rr + h_rr - - b_star = min(max(b_ll, b_rr), min(H_ll, H_rr)) - - # Compute the reconstructed water heights - h_ll_star = min(H_ll - b_star, h_ll) - h_rr_star = min(H_rr - b_star, h_rr) - - # Set the water height to be at least the value stored in the variable threshold after - # the hydrostatic reconstruction is applied and before the numerical flux is calculated - # to avoid numerical problem with arbitrary small values. Interfaces with a water height - # lower or equal to the threshold can be declared as dry. - # The default value for `threshold_wet` is ≈ 5*eps(), or 1e-15 in double precision, is set - # in the `ShallowWaterEquations1D` struct. This threshold value can be changed in the constructor - # call of this equation struct in an elixir. - threshold = equations.threshold_wet - - if (h_ll_star <= threshold) - h_ll_star = threshold - v_ll = zero(v_ll) - end - - if (h_rr_star <= threshold) - h_rr_star = threshold - v_rr = zero(v_rr) - end - - # Create the conservative variables using the reconstruted water heights - u_ll_star = SVector(h_ll_star, h_ll_star * v_ll, b_ll) - u_rr_star = SVector(h_rr_star, h_rr_star * v_rr, b_rr) - - return u_ll_star, u_rr_star -end - # Calculate maximum wave speed for local Lax-Friedrichs-type dissipation as the # maximum velocity magnitude plus the maximum speed of sound @inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer, @@ -646,39 +512,6 @@ end return λ_min, λ_max end -# TODO: TrixiShallowWater: move wet/dry specific routine -""" - min_max_speed_chen_noelle(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterEquations1D) - -The approximated speeds for the HLL type numerical flux used by Chen and Noelle for their -hydrostatic reconstruction. As they state in the paper, these speeds are chosen for the numerical -flux to ensure positivity and to satisfy an entropy inequality. - -Further details on this hydrostatic reconstruction and its motivation can be found in -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -@inline function min_max_speed_chen_noelle(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterEquations1D) - # Get the velocity quantities - v_ll = velocity(u_ll, equations) - v_rr = velocity(u_rr, equations) - - # Calculate the wave celerity on the left and right - h_ll = waterheight(u_ll, equations) - h_rr = waterheight(u_rr, equations) - - a_ll = sqrt(equations.gravity * h_ll) - a_rr = sqrt(equations.gravity * h_rr) - - λ_min = min(v_ll - a_ll, v_rr - a_rr, zero(eltype(u_ll))) - λ_max = max(v_ll + a_ll, v_rr + a_rr, zero(eltype(u_ll))) - - return λ_min, λ_max -end - # More refined estimates for minimum and maximum wave speeds for HLL-type fluxes @inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer, equations::ShallowWaterEquations1D) @@ -841,20 +674,10 @@ end end # Calculate the error for the "lake-at-rest" test case where H = h+b should -# be a constant value over time. Note, assumes there is a single reference -# water height `H0` with which to compare. -# -# TODO: TrixiShallowWater: where should `threshold_limiter` live? May need -# to modify or have different versions of the `lake_at_rest_error` function +# be a constant value over time. @inline function lake_at_rest_error(u, equations::ShallowWaterEquations1D) h, _, b = u - # For well-balancedness testing with possible wet/dry regions the reference - # water height `H0` accounts for the possibility that the bottom topography - # can emerge out of the water as well as for the threshold offset to avoid - # division by a "hard" zero water heights as well. - H0_wet_dry = max(equations.H0, b + equations.threshold_limiter) - - return abs(H0_wet_dry - (h + b)) + return abs(equations.H0 - (h + b)) end end # @muladd diff --git a/src/equations/shallow_water_2d.jl b/src/equations/shallow_water_2d.jl index 6728d7d555..74a299a51e 100644 --- a/src/equations/shallow_water_2d.jl +++ b/src/equations/shallow_water_2d.jl @@ -6,7 +6,7 @@ #! format: noindent @doc raw""" - ShallowWaterEquations2D(; gravity, H0 = 0, threshold_limiter = nothing, threshold_wet = nothing) + ShallowWaterEquations2D(; gravity, H0 = 0) Shallow water equations (SWE) in two space dimensions. The equations are given by ```math @@ -27,12 +27,6 @@ also defines the total water height as ``H = h + b``. The additional quantity ``H_0`` is also available to store a reference value for the total water height that is useful to set initial conditions or test the "lake-at-rest" well-balancedness. -Also, there are two thresholds which prevent numerical problems as well as instabilities. Both of them do not -have to be passed, as default values are defined within the struct. The first one, `threshold_limiter`, is -used in [`PositivityPreservingLimiterShallowWater`](@ref) on the water height, as a (small) shift on the initial -condition and cutoff before the next time step. The second one, `threshold_wet`, is applied on the water height to -define when the flow is "wet" before calculating the numerical flux. - The bottom topography function ``b(x,y)`` is set inside the initial condition routine for a particular problem setup. To test the conservative form of the SWE one can set the bottom topography variable `b` to zero. @@ -54,18 +48,8 @@ References for the SWE are many but a good introduction is available in Chapter [DOI: 10.1017/CBO9780511791253](https://doi.org/10.1017/CBO9780511791253) """ struct ShallowWaterEquations2D{RealT <: Real} <: AbstractShallowWaterEquations{2, 4} - # TODO: TrixiShallowWater: where should the `threshold_limiter` and `threshold_wet` live? - # how to "properly" export these constants across the two packages? gravity::RealT # gravitational constant H0::RealT # constant "lake-at-rest" total water height - # `threshold_limiter` used in `PositivityPreservingLimiterShallowWater` on water height, - # as a (small) shift on the initial condition and cutoff before the next time step. - # Default is 500*eps() which in double precision is ≈1e-13. - threshold_limiter::RealT - # `threshold_wet` applied on water height to define when the flow is "wet" - # before calculating the numerical flux. - # Default is 5*eps() which in double precision is ≈1e-15. - threshold_wet::RealT end # Allow for flexibility to set the gravitational constant within an elixir depending on the @@ -73,16 +57,8 @@ end # The reference total water height H0 defaults to 0.0 but is used for the "lake-at-rest" # well-balancedness test cases. # Strict default values for thresholds that performed well in many numerical experiments -function ShallowWaterEquations2D(; gravity_constant, H0 = zero(gravity_constant), - threshold_limiter = nothing, threshold_wet = nothing) - T = promote_type(typeof(gravity_constant), typeof(H0)) - if threshold_limiter === nothing - threshold_limiter = 500 * eps(T) - end - if threshold_wet === nothing - threshold_wet = 5 * eps(T) - end - ShallowWaterEquations2D(gravity_constant, H0, threshold_limiter, threshold_wet) +function ShallowWaterEquations2D(; gravity_constant, H0 = zero(gravity_constant)) + ShallowWaterEquations2D(gravity_constant, H0) end have_nonconservative_terms(::ShallowWaterEquations2D) = True() @@ -460,69 +436,6 @@ Further details for the hydrostatic reconstruction and its motivation can be fou return u_ll_star, u_rr_star end -# TODO: TrixiShallowWater: move wet/dry specific routine -""" - hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterEquations2D) - -A particular type of hydrostatic reconstruction of the water height to guarantee well-balancedness -for a general bottom topography of the [`ShallowWaterEquations2D`](@ref). The reconstructed solution states -`u_ll_star` and `u_rr_star` variables are then used to evaluate the surface numerical flux at the interface. -The key idea is a linear reconstruction of the bottom and water height at the interfaces using subcells. -Use in combination with the generic numerical flux routine [`FluxHydrostaticReconstruction`](@ref). - -Further details on this hydrostatic reconstruction and its motivation can be found in -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -@inline function hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, - equations::ShallowWaterEquations2D) - # Unpack left and right water heights and bottom topographies - h_ll, _, _, b_ll = u_ll - h_rr, _, _, b_rr = u_rr - - # Get the velocities on either side - v1_ll, v2_ll = velocity(u_ll, equations) - v1_rr, v2_rr = velocity(u_rr, equations) - - H_ll = b_ll + h_ll - H_rr = b_rr + h_rr - - b_star = min(max(b_ll, b_rr), min(H_ll, H_rr)) - - # Compute the reconstructed water heights - h_ll_star = min(H_ll - b_star, h_ll) - h_rr_star = min(H_rr - b_star, h_rr) - - # Set the water height to be at least the value stored in the variable threshold after - # the hydrostatic reconstruction is applied and before the numerical flux is calculated - # to avoid numerical problem with arbitrary small values. Interfaces with a water height - # lower or equal to the threshold can be declared as dry. - # The default value for `threshold_wet` is ≈5*eps(), or 1e-15 in double precision, is set - # in the `ShallowWaterEquations2D` struct. This threshold value can be changed in the constructor - # call of this equation struct in an elixir. - threshold = equations.threshold_wet - - if (h_ll_star <= threshold) - h_ll_star = threshold - v1_ll = zero(v1_ll) - v2_ll = zero(v2_ll) - end - - if (h_rr_star <= threshold) - h_rr_star = threshold - v1_rr = zero(v1_rr) - v2_rr = zero(v2_rr) - end - - # Create the conservative variables using the reconstruted water heights - u_ll_star = SVector(h_ll_star, h_ll_star * v1_ll, h_ll_star * v2_ll, b_ll) - u_rr_star = SVector(h_rr_star, h_rr_star * v1_rr, h_rr_star * v2_rr, b_rr) - - return u_ll_star, u_rr_star -end - """ flux_nonconservative_audusse_etal(u_ll, u_rr, orientation::Integer, equations::ShallowWaterEquations2D) @@ -608,104 +521,6 @@ end return SVector(f1, f2, f3, f4) end -# TODO: TrixiShallowWater: move wet/dry specific routine -""" - flux_nonconservative_chen_noelle(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterEquations2D) - flux_nonconservative_chen_noelle(u_ll, u_rr, - normal_direction_ll ::AbstractVector, - normal_direction_average ::AbstractVector, - equations::ShallowWaterEquations2D) - -Non-symmetric two-point surface flux that discretizes the nonconservative (source) term. -The discretization uses the [`hydrostatic_reconstruction_chen_noelle`](@ref) on the conservative -variables. - -Should be used together with [`FluxHydrostaticReconstruction`](@ref) and -[`hydrostatic_reconstruction_chen_noelle`](@ref) in the surface flux to ensure consistency. - -Further details on the hydrostatic reconstruction and its motivation can be found in -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -@inline function flux_nonconservative_chen_noelle(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterEquations2D) - # Pull the water height and bottom topography on the left - h_ll, _, _, b_ll = u_ll - h_rr, _, _, b_rr = u_rr - - H_ll = h_ll + b_ll - H_rr = h_rr + b_rr - - b_star = min(max(b_ll, b_rr), min(H_ll, H_rr)) - - # Create the hydrostatic reconstruction for the left solution state - u_ll_star, _ = hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, equations) - - # Copy the reconstructed water height for easier to read code - h_ll_star = u_ll_star[1] - - z = zero(eltype(u_ll)) - # Includes two parts: - # (i) Diagonal (consistent) term from the volume flux that uses `b_ll` to avoid - # cross-averaging across a discontinuous bottom topography - # (ii) True surface part that uses `h_ll` and `h_ll_star` to handle discontinuous bathymetry - g = equations.gravity - if orientation == 1 - f = SVector(z, - g * h_ll * b_ll - g * (h_ll_star + h_ll) * (b_ll - b_star), - z, z) - else # orientation == 2 - f = SVector(z, z, - g * h_ll * b_ll - g * (h_ll_star + h_ll) * (b_ll - b_star), - z) - end - - return f -end - -@inline function flux_nonconservative_chen_noelle(u_ll, u_rr, - normal_direction_ll::AbstractVector, - normal_direction_average::AbstractVector, - equations::ShallowWaterEquations2D) - # Pull the water height and bottom topography on the left - h_ll, _, _, b_ll = u_ll - h_rr, _, _, b_rr = u_rr - - H_ll = h_ll + b_ll - H_rr = h_rr + b_rr - - b_star = min(max(b_ll, b_rr), min(H_ll, H_rr)) - - # Create the hydrostatic reconstruction for the left solution state - u_ll_star, _ = hydrostatic_reconstruction_chen_noelle(u_ll, u_rr, equations) - - # Copy the reconstructed water height for easier to read code - h_ll_star = u_ll_star[1] - - # Comes in two parts: - # (i) Diagonal (consistent) term from the volume flux that uses `normal_direction_average` - # but we use `b_ll` to avoid cross-averaging across a discontinuous bottom topography - - f2 = normal_direction_average[1] * equations.gravity * h_ll * b_ll - f3 = normal_direction_average[2] * equations.gravity * h_ll * b_ll - - # (ii) True surface part that uses `normal_direction_ll`, `h_ll` and `h_ll_star` - # to handle discontinuous bathymetry - - f2 -= normal_direction_ll[1] * equations.gravity * (h_ll_star + h_ll) * - (b_ll - b_star) - f3 -= normal_direction_ll[2] * equations.gravity * (h_ll_star + h_ll) * - (b_ll - b_star) - - # First and last equations do not have a nonconservative flux - f1 = f4 = zero(eltype(u_ll)) - - return SVector(f1, f2, f3, f4) -end - """ flux_nonconservative_ersing_etal(u_ll, u_rr, orientation::Integer, equations::ShallowWaterEquations2D) @@ -1020,67 +835,6 @@ end return λ_min, λ_max end -# TODO: TrixiShallowWater: move wet/dry specific routine -""" - min_max_speed_chen_noelle(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterEquations2D) - min_max_speed_chen_noelle(u_ll, u_rr, normal_direction::AbstractVector, - equations::ShallowWaterEquations2D) - -Special estimate of the minimal and maximal wave speed of the shallow water equations for -the left and right states `u_ll, u_rr`. These approximate speeds are used for the HLL-type -numerical flux [`flux_hll_chen_noelle`](@ref). These wave speed estimates -together with a particular hydrostatic reconstruction technique guarantee -that the numerical flux is positive and satisfies an entropy inequality. - -Further details on this hydrostatic reconstruction and its motivation can be found in -the reference below. The definition of the wave speeds are given in Equation (2.20). -- Guoxian Chen and Sebastian Noelle (2017) - A new hydrostatic reconstruction scheme based on subcell reconstructions - [DOI:10.1137/15M1053074](https://dx.doi.org/10.1137/15M1053074) -""" -@inline function min_max_speed_chen_noelle(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterEquations2D) - h_ll = waterheight(u_ll, equations) - v1_ll, v2_ll = velocity(u_ll, equations) - h_rr = waterheight(u_rr, equations) - v1_rr, v2_rr = velocity(u_rr, equations) - - a_ll = sqrt(equations.gravity * h_ll) - a_rr = sqrt(equations.gravity * h_rr) - - if orientation == 1 # x-direction - λ_min = min(v1_ll - a_ll, v1_rr - a_rr, zero(eltype(u_ll))) - λ_max = max(v1_ll + a_ll, v1_rr + a_rr, zero(eltype(u_ll))) - else # y-direction - λ_min = min(v2_ll - a_ll, v2_rr - a_rr, zero(eltype(u_ll))) - λ_max = max(v2_ll + a_ll, v2_rr + a_rr, zero(eltype(u_ll))) - end - - return λ_min, λ_max -end - -@inline function min_max_speed_chen_noelle(u_ll, u_rr, normal_direction::AbstractVector, - equations::ShallowWaterEquations2D) - h_ll = waterheight(u_ll, equations) - v1_ll, v2_ll = velocity(u_ll, equations) - h_rr = waterheight(u_rr, equations) - v1_rr, v2_rr = velocity(u_rr, equations) - - v_normal_ll = v1_ll * normal_direction[1] + v2_ll * normal_direction[2] - v_normal_rr = v1_rr * normal_direction[1] + v2_rr * normal_direction[2] - - norm_ = norm(normal_direction) - - a_ll = sqrt(equations.gravity * h_ll) * norm_ - a_rr = sqrt(equations.gravity * h_rr) * norm_ - - λ_min = min(v_normal_ll - a_ll, v_normal_rr - a_rr, zero(eltype(u_ll))) - λ_max = max(v_normal_ll + a_ll, v_normal_rr + a_rr, zero(eltype(u_ll))) - - return λ_min, λ_max -end - # More refined estimates for minimum and maximum wave speeds for HLL-type fluxes @inline function min_max_speed_davis(u_ll, u_rr, orientation::Integer, equations::ShallowWaterEquations2D) @@ -1327,20 +1081,10 @@ end end # Calculate the error for the "lake-at-rest" test case where H = h+b should -# be a constant value over time. Note, assumes there is a single reference -# water height `H0` with which to compare. -# -# TODO: TrixiShallowWater: where should `threshold_limiter` live? May need -# to modify or have different versions of the `lake_at_rest_error` function +# be a constant value over time. @inline function lake_at_rest_error(u, equations::ShallowWaterEquations2D) h, _, _, b = u - # For well-balancedness testing with possible wet/dry regions the reference - # water height `H0` accounts for the possibility that the bottom topography - # can emerge out of the water as well as for the threshold offset to avoid - # division by a "hard" zero water heights as well. - H0_wet_dry = max(equations.H0, b + equations.threshold_limiter) - - return abs(H0_wet_dry - (h + b)) + return abs(equations.H0 - (h + b)) end end # @muladd diff --git a/src/equations/shallow_water_quasi_1d.jl b/src/equations/shallow_water_quasi_1d.jl index d52fbab841..51c360104a 100644 --- a/src/equations/shallow_water_quasi_1d.jl +++ b/src/equations/shallow_water_quasi_1d.jl @@ -22,12 +22,6 @@ The gravitational constant is denoted by `g`, the (possibly) variable bottom top The additional quantity ``H_0`` is also available to store a reference value for the total water height that is useful to set initial conditions or test the "lake-at-rest" well-balancedness. -Also, there are two thresholds which prevent numerical problems as well as instabilities. Both of them do not -have to be passed, as default values are defined within the struct. The first one, `threshold_limiter`, is -used in [`PositivityPreservingLimiterShallowWater`](@ref) on the water height, as a (small) shift on the initial -condition and cutoff before the next time step. The second one, `threshold_wet`, is applied on the water height to -define when the flow is "wet" before calculating the numerical flux. - The bottom topography function ``b(x)`` and channel width ``a(x)`` are set inside the initial condition routine for a particular problem setup. To test the conservative form of the SWE one can set the bottom topography variable `b` to zero and ``a`` to one. @@ -47,14 +41,6 @@ struct ShallowWaterEquationsQuasi1D{RealT <: Real} <: AbstractShallowWaterEquations{1, 4} gravity::RealT # gravitational constant H0::RealT # constant "lake-at-rest" total water height - # `threshold_limiter` used in `PositivityPreservingLimiterShallowWater` on water height, - # as a (small) shift on the initial condition and cutoff before the next time step. - # Default is 500*eps() which in double precision is ≈1e-13. - threshold_limiter::RealT - # `threshold_wet` applied on water height to define when the flow is "wet" - # before calculating the numerical flux. - # Default is 5*eps() which in double precision is ≈1e-15. - threshold_wet::RealT end # Allow for flexibility to set the gravitational constant within an elixir depending on the @@ -62,17 +48,8 @@ end # The reference total water height H0 defaults to 0.0 but is used for the "lake-at-rest" # well-balancedness test cases. # Strict default values for thresholds that performed well in many numerical experiments -function ShallowWaterEquationsQuasi1D(; gravity_constant, H0 = zero(gravity_constant), - threshold_limiter = nothing, - threshold_wet = nothing) - T = promote_type(typeof(gravity_constant), typeof(H0)) - if threshold_limiter === nothing - threshold_limiter = 500 * eps(T) - end - if threshold_wet === nothing - threshold_wet = 5 * eps(T) - end - ShallowWaterEquationsQuasi1D(gravity_constant, H0, threshold_limiter, threshold_wet) +function ShallowWaterEquationsQuasi1D(; gravity_constant, H0 = zero(gravity_constant)) + ShallowWaterEquationsQuasi1D(gravity_constant, H0) end have_nonconservative_terms(::ShallowWaterEquationsQuasi1D) = True() @@ -338,18 +315,10 @@ end # be a constant value over time. Note, assumes there is a single reference # water height `H0` with which to compare. # -# TODO: TrixiShallowWater: where should `threshold_limiter` live? May need -# to modify or have different versions of the `lake_at_rest_error` function @inline function lake_at_rest_error(u, equations::ShallowWaterEquationsQuasi1D) _, _, b, _ = u h = waterheight(u, equations) - # For well-balancedness testing with possible wet/dry regions the reference - # water height `H0` accounts for the possibility that the bottom topography - # can emerge out of the water as well as for the threshold offset to avoid - # division by a "hard" zero water heights as well. - H0_wet_dry = max(equations.H0, b + equations.threshold_limiter) - - return abs(H0_wet_dry - (h + b)) + return abs(equations.H0 - (h + b)) end end # @muladd diff --git a/src/equations/shallow_water_two_layer_1d.jl b/src/equations/shallow_water_two_layer_1d.jl deleted file mode 100644 index 42ff393593..0000000000 --- a/src/equations/shallow_water_two_layer_1d.jl +++ /dev/null @@ -1,511 +0,0 @@ -# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). -# Since these FMAs can increase the performance of many numerical algorithms, -# we need to opt-in explicitly. -# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. -@muladd begin -#! format: noindent - -# TODO: TrixiShallowWater: 1D two layer equations should move to new package - -@doc raw""" - ShallowWaterTwoLayerEquations1D(gravity, H0, rho_upper, rho_lower) - -Two-Layer Shallow Water equations (2LSWE) in one space dimension. The equations are given by -```math -\begin{alignat*}{4} -&\frac{\partial}{\partial t}h_{upper} -&&+ \frac{\partial}{\partial x}\left(h_{upper} v_{1,upper}\right) -&&= 0 \\ -&\frac{\partial}{\partial t}\left(h_{upper}v_{1,upper}\right) -&&+ \frac{\partial}{\partial x}\left(h_{upper}v_{1,upper}^2 + \dfrac{gh_{upper}^2}{2}\right) -&&= -gh_{upper}\frac{\partial}{\partial x}\left(b+h_{lower}\right)\\ -&\frac{\partial}{\partial t}h_{lower} -&&+ \frac{\partial}{\partial x}\left(h_{lower}v_{1,lower}\right) -&&= 0 \\ -&\frac{\partial}{\partial t}\left(h_{lower}v_{1,lower}\right) -&&+ \frac{\partial}{\partial x}\left(h_{lower}v_{1,lower}^2 + \dfrac{gh_{lower}^2}{2}\right) -&&= -gh_{lower}\frac{\partial}{\partial x}\left(b+\dfrac{\rho_{upper}}{\rho_{lower}}h_{upper}\right). -\end{alignat*} -``` -The unknown quantities of the 2LSWE are the water heights of the {lower} layer ``h_{lower}`` and the -{upper} layer ``h_{upper}`` with respective velocities ``v_{1,upper}`` and ``v_{1,lower}``. The gravitational constant is -denoted by `g`, the layer densitites by ``\rho_{upper}``and ``\rho_{lower}`` and the (possibly) variable -bottom topography function ``b(x)``. The conservative variable water height ``h_{lower}`` is measured -from the bottom topography ``b`` and ``h_{upper}`` relative to ``h_{lower}``, therefore one also defines the -total water heights as ``H_{upper} = h_{upper} + h_{upper} + b`` and ``H_{lower} = h_{lower} + b``. - -The densities must be chosen such that ``\rho_{upper} < \rho_{lower}``, to make sure that the heavier fluid -``\rho_{lower}`` is in the bottom layer and the lighter fluid ``\rho_{upper}`` in the {upper} layer. - -The additional quantity ``H_0`` is also available to store a reference value for the total water -height that is useful to set initial conditions or test the "lake-at-rest" well-balancedness. - -The bottom topography function ``b(x)`` is set inside the initial condition routine -for a particular problem setup. - -In addition to the unknowns, Trixi currently stores the bottom topography values at the -approximation points despite being fixed in time. This is done for convenience of computing the -bottom topography gradients on the fly during the approximation as well as computing auxiliary -quantities like the total water height ``H`` or the entropy variables. -This affects the implementation and use of these equations in various ways: -* The flux values corresponding to the bottom topography must be zero. -* The bottom topography values must be included when defining initial conditions, boundary - conditions or source terms. -* [`AnalysisCallback`](@ref) analyzes this variable. -* Trixi's visualization tools will visualize the bottom topography by default. - -A good introduction for the 2LSWE is available in Chapter 12 of the book: -- Benoit Cushman-Roisin (2011)\ - Introduction to geophyiscal fluid dynamics: physical and numerical aspects\ - \ - ISBN: 978-0-12-088759-0 -""" -struct ShallowWaterTwoLayerEquations1D{RealT <: Real} <: - AbstractShallowWaterEquations{1, 5} - gravity::RealT # gravitational constant - H0::RealT # constant "lake-at-rest" total water height - rho_upper::RealT # lower layer density - rho_lower::RealT # upper layer density - r::RealT # ratio of rho_upper / rho_lower -end - -# Allow for flexibility to set the gravitational constant within an elixir depending on the -# application where `gravity_constant=1.0` or `gravity_constant=9.81` are common values. -# The reference total water height H0 defaults to 0.0 but is used for the "lake-at-rest" -# well-balancedness test cases. Densities must be specified such that rho_upper <= rho_lower. -function ShallowWaterTwoLayerEquations1D(; gravity_constant, - H0 = zero(gravity_constant), rho_upper, - rho_lower) - # Assign density ratio if rho_upper <= rho_lower - if rho_upper > rho_lower - error("Invalid input: Densities must be chosen such that rho_upper <= rho_lower") - else - r = rho_upper / rho_lower - end - ShallowWaterTwoLayerEquations1D(gravity_constant, H0, rho_upper, rho_lower, r) -end - -have_nonconservative_terms(::ShallowWaterTwoLayerEquations1D) = True() -function varnames(::typeof(cons2cons), ::ShallowWaterTwoLayerEquations1D) - ("h_upper", "h_v_upper", - "h_lower", "h_v_lower", "b") -end -# Note, we use the total water height, H_lower = h_upper + h_lower + b, and first layer total height -# H_upper = h_upper + b as the first primitive variable for easier visualization and setting initial -# conditions -function varnames(::typeof(cons2prim), ::ShallowWaterTwoLayerEquations1D) - ("H_upper", "v_upper", - "H_lower", "v_lower", "b") -end - -# Set initial conditions at physical location `x` for time `t` -""" - initial_condition_convergence_test(x, t, equations::ShallowWaterTwoLayerEquations1D) - -A smooth initial condition used for convergence tests in combination with -[`source_terms_convergence_test`](@ref) (and -[`BoundaryConditionDirichlet(initial_condition_convergence_test)`](@ref) in non-periodic domains). -""" -function initial_condition_convergence_test(x, t, - equations::ShallowWaterTwoLayerEquations1D) - # some constants are chosen such that the function is periodic on the domain [0,sqrt(2)] - ω = 2.0 * pi * sqrt(2.0) - - H_lower = 2.0 + 0.1 * sin(ω * x[1] + t) - H_upper = 4.0 + 0.1 * cos(ω * x[1] + t) - v_lower = 1.0 - v_upper = 0.9 - b = 1.0 + 0.1 * cos(2.0 * ω * x[1]) - - return prim2cons(SVector(H_upper, v_upper, H_lower, v_lower, b), equations) -end - -""" - source_terms_convergence_test(u, x, t, equations::ShallowWaterTwoLayerEquations1D) - -Source terms used for convergence tests in combination with -[`initial_condition_convergence_test`](@ref) -(and [`BoundaryConditionDirichlet(initial_condition_convergence_test)`](@ref) -in non-periodic domains). -""" -@inline function source_terms_convergence_test(u, x, t, - equations::ShallowWaterTwoLayerEquations1D) - # Same settings as in `initial_condition_convergence_test`. Some derivative simplify because - # this manufactured solution velocity is taken to be constant - ω = 2 * pi * sqrt(2.0) - - du1 = (-0.1 * cos(t + ω * x[1]) - 0.1 * sin(t + ω * x[1]) - - 0.09 * ω * cos(t + ω * x[1]) + - -0.09 * ω * sin(t + ω * x[1])) - du2 = (5.0 * (-0.1 * ω * cos(t + ω * x[1]) - 0.1 * ω * sin(t + ω * x[1])) * - (4.0 + 0.2 * cos(t + ω * x[1]) + - -0.2 * sin(t + ω * x[1])) + - 0.1 * ω * (20.0 + cos(t + ω * x[1]) - sin(t + ω * x[1])) * - cos(t + - ω * x[1]) - 0.09 * cos(t + ω * x[1]) - 0.09 * sin(t + ω * x[1]) - - 0.081 * ω * cos(t + ω * x[1]) + - -0.081 * ω * sin(t + ω * x[1])) - du3 = 0.1 * cos(t + ω * x[1]) + 0.1 * ω * cos(t + ω * x[1]) + - 0.2 * ω * sin(2.0 * ω * x[1]) - du4 = ((10.0 + sin(t + ω * x[1]) - cos(2ω * x[1])) * - (-0.09 * ω * cos(t + ω * x[1]) - 0.09 * ω * sin(t + - ω * x[1]) - - 0.2 * ω * sin(2 * ω * x[1])) + 0.1 * cos(t + ω * x[1]) + - 0.1 * ω * cos(t + ω * x[1]) + - 5.0 * (0.1 * ω * cos(t + ω * x[1]) + 0.2 * ω * sin(2.0 * ω * x[1])) * - (2.0 + 0.2 * sin(t + ω * x[1]) + - -0.2 * cos(2.0 * ω * x[1])) + 0.2 * ω * sin(2.0 * ω * x[1])) - - return SVector(du1, du2, du3, du4, zero(eltype(u))) -end - -""" - boundary_condition_slip_wall(u_inner, orientation_or_normal, x, t, surface_flux_function, - equations::ShallowWaterTwoLayerEquations1D) - -Create a boundary state by reflecting the normal velocity component and keep -the tangential velocity component unchanged. The boundary water height is taken from -the internal value. - -For details see Section 9.2.5 of the book: -- Eleuterio F. Toro (2001) - Shock-Capturing Methods for Free-Surface Shallow Flows - 1st edition - ISBN 0471987662 -""" -@inline function boundary_condition_slip_wall(u_inner, orientation_or_normal, direction, - x, t, surface_flux_function, - equations::ShallowWaterTwoLayerEquations1D) - # create the "external" boundary solution state - u_boundary = SVector(u_inner[1], - -u_inner[2], - u_inner[3], - -u_inner[4], - u_inner[5]) - - # calculate the boundary flux - if iseven(direction) # u_inner is "left" of boundary, u_boundary is "right" of boundary - f = surface_flux_function(u_inner, u_boundary, orientation_or_normal, equations) - else # u_boundary is "left" of boundary, u_inner is "right" of boundary - f = surface_flux_function(u_boundary, u_inner, orientation_or_normal, equations) - end - return f -end - -# Calculate 1D flux for a single point -# Note, the bottom topography has no flux -@inline function flux(u, orientation::Integer, - equations::ShallowWaterTwoLayerEquations1D) - h_upper, h_v_upper, h_lower, h_v_lower, _ = u - - # Calculate velocities - v_upper, v_lower = velocity(u, equations) - # Calculate pressure - p_upper = 0.5 * equations.gravity * h_upper^2 - p_lower = 0.5 * equations.gravity * h_lower^2 - - f1 = h_v_upper - f2 = h_v_upper * v_upper + p_upper - f3 = h_v_lower - f4 = h_v_lower * v_lower + p_lower - - return SVector(f1, f2, f3, f4, zero(eltype(u))) -end - -""" - flux_nonconservative_ersing_etal(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterTwoLayerEquations1D) - -!!! warning "Experimental code" - This numerical flux is experimental and may change in any future release. - -Non-symmetric path-conservative two-point volume flux discretizing the nonconservative (source) term -that contains the gradient of the bottom topography [`ShallowWaterTwoLayerEquations1D`](@ref) and an -additional term that couples the momentum of both layers. - -This is a modified version of [`flux_nonconservative_wintermeyer_etal`](@ref) that gives entropy -conservation and well-balancedness in both the volume and surface when combined with -[`flux_wintermeyer_etal`](@ref). - -For further details see: -- Patrick Ersing, Andrew R. Winters (2023) - An entropy stable discontinuous Galerkin method for the two-layer shallow water equations on - curvilinear meshes - [DOI: 10.48550/arXiv.2306.12699](https://doi.org/10.48550/arXiv.2306.12699) -""" -@inline function flux_nonconservative_ersing_etal(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterTwoLayerEquations1D) - # Pull the necessary left and right state information - h_upper_ll, h_lower_ll = waterheight(u_ll, equations) - h_upper_rr, h_lower_rr = waterheight(u_rr, equations) - b_rr = u_rr[5] - b_ll = u_ll[5] - - # Calculate jumps - h_upper_jump = (h_upper_rr - h_upper_ll) - h_lower_jump = (h_lower_rr - h_lower_ll) - b_jump = (b_rr - b_ll) - - z = zero(eltype(u_ll)) - - # Bottom gradient nonconservative term: (0, g*h_upper*(b+h_lower)_x, - # 0, g*h_lower*(b+r*h_upper)_x, 0) - f = SVector(z, - equations.gravity * h_upper_ll * (b_jump + h_lower_jump), - z, - equations.gravity * h_lower_ll * (b_jump + equations.r * h_upper_jump), - z) - return f -end - -""" - flux_wintermeyer_etal(u_ll, u_rr, orientation, - equations::ShallowWaterTwoLayerEquations1D) - -Total energy conservative (mathematical entropy for two-layer shallow water equations) split form. -When the bottom topography is nonzero this scheme will be well-balanced when used with the -nonconservative [`flux_nonconservative_ersing_etal`](@ref). To obtain the flux for the -two-layer shallow water equations the flux that is described in the paper for the normal shallow -water equations is used within each layer. - -Further details are available in Theorem 1 of the paper: -- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and David A. Kopriva (2017) - An entropy stable nodal discontinuous Galerkin method for the two dimensional - shallow water equations on unstructured curvilinear meshes with discontinuous bathymetry - [DOI: 10.1016/j.jcp.2017.03.036](https://doi.org/10.1016/j.jcp.2017.03.036) -""" -@inline function flux_wintermeyer_etal(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterTwoLayerEquations1D) - # Unpack left and right state - h_upper_ll, h_v_upper_ll, h_lower_ll, h_v_lower_ll, _ = u_ll - h_upper_rr, h_v_upper_rr, h_lower_rr, h_v_lower_rr, _ = u_rr - - # Get the velocities on either side - v_upper_ll, v_lower_ll = velocity(u_ll, equations) - v_upper_rr, v_lower_rr = velocity(u_rr, equations) - - # Average each factor of products in flux - v_upper_avg = 0.5 * (v_upper_ll + v_upper_rr) - v_lower_avg = 0.5 * (v_lower_ll + v_lower_rr) - p_upper_avg = 0.5 * equations.gravity * h_upper_ll * h_upper_rr - p_lower_avg = 0.5 * equations.gravity * h_lower_ll * h_lower_rr - - # Calculate fluxes - f1 = 0.5 * (h_v_upper_ll + h_v_upper_rr) - f2 = f1 * v_upper_avg + p_upper_avg - f3 = 0.5 * (h_v_lower_ll + h_v_lower_rr) - f4 = f3 * v_lower_avg + p_lower_avg - - return SVector(f1, f2, f3, f4, zero(eltype(u_ll))) -end - -""" - flux_es_ersing_etal(u_ll, u_rr, orientation_or_normal_direction, - equations::ShallowWaterTwoLayerEquations1D) -Entropy stable surface flux for the two-layer shallow water equations. Uses the entropy conservative -[`flux_wintermeyer_etal`](@ref) and adds a Lax-Friedrichs type dissipation dependent on the jump of -entropy variables. - -For further details see: -- Patrick Ersing, Andrew R. Winters (2023) - An entropy stable discontinuous Galerkin method for the two-layer shallow water equations on - curvilinear meshes - [DOI: 10.48550/arXiv.2306.12699](https://doi.org/10.48550/arXiv.2306.12699) -""" -@inline function flux_es_ersing_etal(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterTwoLayerEquations1D) - # Compute entropy conservative flux but without the bottom topography - f_ec = flux_wintermeyer_etal(u_ll, u_rr, - orientation, - equations) - - # Get maximum signal velocity - λ = max_abs_speed_naive(u_ll, u_rr, orientation, equations) - # Get entropy variables but without the bottom topography - q_rr = cons2entropy(u_rr, equations) - q_ll = cons2entropy(u_ll, equations) - - # Average values from left and right - u_avg = (u_ll + u_rr) / 2 - - # Introduce variables for better readability - rho_upper = equations.rho_upper - rho_lower = equations.rho_lower - g = equations.gravity - drho = rho_upper - rho_lower - - # Compute entropy Jacobian coefficients - h11 = -rho_lower / (g * rho_upper * drho) - h12 = -rho_lower * u_avg[2] / (g * rho_upper * u_avg[1] * drho) - h13 = 1.0 / (g * drho) - h14 = u_avg[4] / (g * u_avg[3] * drho) - h21 = -rho_lower * u_avg[2] / (g * rho_upper * u_avg[1] * drho) - h22 = ((g * rho_upper * u_avg[1]^3 - g * rho_lower * u_avg[1]^3 + - -rho_lower * u_avg[2]^2) / (g * rho_upper * u_avg[1]^2 * drho)) - h23 = u_avg[2] / (g * u_avg[1] * drho) - h24 = u_avg[2] * u_avg[4] / (g * u_avg[1] * u_avg[3] * drho) - h31 = 1.0 / (g * drho) - h32 = u_avg[2] / (g * u_avg[1] * drho) - h33 = -1.0 / (g * drho) - h34 = -u_avg[4] / (g * u_avg[3] * drho) - h41 = u_avg[4] / (g * u_avg[3] * drho) - h42 = u_avg[2] * u_avg[4] / (g * u_avg[1] * u_avg[3] * drho) - h43 = -u_avg[4] / (g * u_avg[3] * drho) - h44 = ((g * rho_upper * u_avg[3]^3 - g * rho_lower * u_avg[3]^3 + - -rho_lower * u_avg[4]^2) / (g * rho_lower * u_avg[3]^2 * drho)) - - # Entropy Jacobian matrix - H = @SMatrix [[h11;; h12;; h13;; h14;; 0]; - [h21;; h22;; h23;; h24;; 0]; - [h31;; h32;; h33;; h34;; 0]; - [h41;; h42;; h43;; h44;; 0]; - [0;; 0;; 0;; 0;; 0]] - - # Add dissipation to entropy conservative flux to obtain entropy stable flux - f_es = f_ec - 0.5 * λ * H * (q_rr - q_ll) - - return SVector(f_es[1], f_es[2], f_es[3], f_es[4], zero(eltype(u_ll))) -end - -# Calculate approximation for maximum wave speed for local Lax-Friedrichs-type dissipation as the -# maximum velocity magnitude plus the maximum speed of sound. This function uses approximate -# eigenvalues using the speed of the barotropic mode as there is no simple way to calculate them -# analytically. -# -# A good overview of the derivation is given in: -# - Jonas Nycander, Andrew McC. Hogg, Leela M. Frankcombe (2008) -# Open boundary conditions for nonlinear channel Flows -# [DOI: 10.1016/j.ocemod.2008.06.003](https://doi.org/10.1016/j.ocemod.2008.06.003) -@inline function max_abs_speed_naive(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterTwoLayerEquations1D) - # Unpack left and right state - h_upper_ll, h_v_upper_ll, h_lower_ll, h_v_lower_ll, _ = u_ll - h_upper_rr, h_v_upper_rr, h_lower_rr, h_v_lower_rr, _ = u_rr - - # Get the averaged velocity - v_m_ll = (h_v_upper_ll + h_v_lower_ll) / (h_upper_ll + h_lower_ll) - v_m_rr = (h_v_upper_rr + h_v_lower_rr) / (h_upper_rr + h_lower_rr) - - # Calculate the wave celerity on the left and right - h_upper_ll, h_lower_ll = waterheight(u_ll, equations) - h_upper_rr, h_lower_rr = waterheight(u_rr, equations) - c_ll = sqrt(equations.gravity * (h_upper_ll + h_lower_ll)) - c_rr = sqrt(equations.gravity * (h_upper_rr + h_lower_rr)) - - return (max(abs(v_m_ll) + c_ll, abs(v_m_rr) + c_rr)) -end - -# Specialized `DissipationLocalLaxFriedrichs` to avoid spurious dissipation in the bottom -# topography -@inline function (dissipation::DissipationLocalLaxFriedrichs)(u_ll, u_rr, - orientation_or_normal_direction, - equations::ShallowWaterTwoLayerEquations1D) - λ = dissipation.max_abs_speed(u_ll, u_rr, orientation_or_normal_direction, - equations) - diss = -0.5 * λ * (u_rr - u_ll) - return SVector(diss[1], diss[2], diss[3], diss[4], zero(eltype(u_ll))) -end - -# Absolute speed of the barotropic mode -@inline function max_abs_speeds(u, equations::ShallowWaterTwoLayerEquations1D) - h_upper, h_v_upper, h_lower, h_v_lower, _ = u - - # Calculate averaged velocity of both layers - v_m = (h_v_upper + h_v_lower) / (h_upper + h_lower) - c = sqrt(equations.gravity * (h_upper + h_lower)) - - return (abs(v_m) + c) -end - -# Helper function to extract the velocity vector from the conservative variables -@inline function velocity(u, equations::ShallowWaterTwoLayerEquations1D) - h_upper, h_v_upper, h_lower, h_v_lower, _ = u - - v_upper = h_v_upper / h_upper - v_lower = h_v_lower / h_lower - return SVector(v_upper, v_lower) -end - -# Convert conservative variables to primitive -@inline function cons2prim(u, equations::ShallowWaterTwoLayerEquations1D) - h_upper, _, h_lower, _, b = u - - H_lower = h_lower + b - H_upper = h_lower + h_upper + b - v_upper, v_lower = velocity(u, equations) - return SVector(H_upper, v_upper, H_lower, v_lower, b) -end - -# Convert conservative variables to entropy variables -# Note, only the first four are the entropy variables, the fifth entry still just carries the -# bottom topography values for convenience -@inline function cons2entropy(u, equations::ShallowWaterTwoLayerEquations1D) - h_upper, _, h_lower, _, b = u - v_upper, v_lower = velocity(u, equations) - - w1 = (equations.rho_upper * - (equations.gravity * (h_upper + h_lower + b) - 0.5 * v_upper^2)) - w2 = equations.rho_upper * v_upper - w3 = (equations.rho_lower * - (equations.gravity * (equations.r * h_upper + h_lower + b) - 0.5 * v_lower^2)) - w4 = equations.rho_lower * v_lower - return SVector(w1, w2, w3, w4, b) -end - -# Convert primitive to conservative variables -@inline function prim2cons(prim, equations::ShallowWaterTwoLayerEquations1D) - H_upper, v_upper, H_lower, v_lower, b = prim - - h_lower = H_lower - b - h_upper = H_upper - h_lower - b - h_v_upper = h_upper * v_upper - h_v_lower = h_lower * v_lower - return SVector(h_upper, h_v_upper, h_lower, h_v_lower, b) -end - -@inline function waterheight(u, equations::ShallowWaterTwoLayerEquations1D) - return SVector(u[1], u[3]) -end - -# Entropy function for the shallow water equations is the total energy -@inline function entropy(cons, equations::ShallowWaterTwoLayerEquations1D) - energy_total(cons, equations) -end - -# Calculate total energy for a conservative state `cons` -@inline function energy_total(cons, equations::ShallowWaterTwoLayerEquations1D) - h_upper, h_v_upper, h_lower, h_v_lower, b = cons - # Set new variables for better readability - g = equations.gravity - rho_upper = equations.rho_upper - rho_lower = equations.rho_lower - - e = (0.5 * rho_upper * (h_v_upper^2 / h_upper + g * h_upper^2) + - 0.5 * rho_lower * (h_v_lower^2 / h_lower + g * h_lower^2) + - g * rho_lower * h_lower * b + g * rho_upper * h_upper * (h_lower + b)) - return e -end - -# Calculate kinetic energy for a conservative state `cons` -@inline function energy_kinetic(u, equations::ShallowWaterTwoLayerEquations1D) - h_upper, h_v_upper, h_lower, h_v_lower, _ = u - return (0.5 * equations.rho_upper * h_v_upper^2 / h_upper + - 0.5 * equations.rho_lower * h_v_lower^2 / h_lower) -end - -# Calculate potential energy for a conservative state `cons` -@inline function energy_internal(cons, equations::ShallowWaterTwoLayerEquations1D) - return energy_total(cons, equations) - energy_kinetic(cons, equations) -end - -# Calculate the error for the "lake-at-rest" test case where H = h_upper+h_lower+b should -# be a constant value over time -@inline function lake_at_rest_error(u, equations::ShallowWaterTwoLayerEquations1D) - h_upper, _, h_lower, _, b = u - return abs(equations.H0 - (h_upper + h_lower + b)) -end -end # @muladd diff --git a/src/equations/shallow_water_two_layer_2d.jl b/src/equations/shallow_water_two_layer_2d.jl deleted file mode 100644 index a31d881f2e..0000000000 --- a/src/equations/shallow_water_two_layer_2d.jl +++ /dev/null @@ -1,805 +0,0 @@ -# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). -# Since these FMAs can increase the performance of many numerical algorithms, -# we need to opt-in explicitly. -# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. -@muladd begin -#! format: noindent - -# TODO: TrixiShallowWater: 2D two layer equations should move to new package - -@doc raw""" - ShallowWaterTwoLayerEquations2D(gravity, H0, rho_upper, rho_lower) - -Two-Layer Shallow water equations (2LSWE) in two space dimension. The equations are given by -```math -\begin{alignat*}{8} -&\frac{\partial}{\partial t}h_{upper} -&&+ \frac{\partial}{\partial x}\left(h_{upper} v_{1,upper}\right) -&&+ \frac{\partial}{\partial y}\left(h_{upper} v_{2,upper}\right) \quad -&&= \quad 0 \\ -&\frac{\partial}{\partial t}\left(h_{upper} v_{1,upper}\right) -&&+ \frac{\partial}{\partial x}\left(h_{upper} v_{1,upper}^2 + \frac{gh_{upper}^2}{2}\right) -&&+ \frac{\partial}{\partial y}\left(h_{upper} v_{1,upper} v_{2,upper}\right) \quad -&&= -gh_{upper}\frac{\partial}{\partial x}\left(b+h_{lower}\right) \\ -&\frac{\partial}{\partial t}\left(h_{upper} v_{2,upper}\right) -&&+ \frac{\partial}{\partial x}\left(h_{upper} v_{1,upper} v_{2,upper}\right) -&&+ \frac{\partial}{\partial y}\left(h_{upper} v_{2,upper}^2 + \frac{gh_{upper}^2}{2}\right) -&&= -gh_{upper}\frac{\partial}{\partial y}\left(b+h_{lower}\right)\\ -&\frac{\partial}{\partial t}h_{lower} -&&+ \frac{\partial}{\partial x}\left(h_{lower} v_{1,lower}\right) -&&+ \frac{\partial}{\partial y}\left(h_{lower} v_{2,lower}\right) -&&= \quad 0 \\ -&\frac{\partial}{\partial t}\left(h_{lower} v_{1,lower}\right) -&&+ \frac{\partial}{\partial x}\left(h_{lower} v_{1,lower}^2 + \frac{gh_{lower}^2}{2}\right) -&&+ \frac{\partial}{\partial y}\left(h_{lower} v_{1,lower} v_{2,lower}\right) -&&= -gh_{lower}\frac{\partial}{\partial x}\left(b+\frac{\rho_{upper}}{\rho_{lower}} h_{upper}\right)\\ -&\frac{\partial}{\partial t}\left(h_{lower} v_{2,lower}\right) -&&+ \frac{\partial}{\partial x}\left(h_{lower} v_{1,lower} v_{2,lower}\right) -&&+ \frac{\partial}{\partial y}\left(h_{lower} v_{2,lower}^2 + \frac{gh_{lower}^2}{2}\right) -&&= -gh_{lower}\frac{\partial}{\partial y}\left(b+\frac{\rho_{upper}}{\rho_{lower}} h_{upper}\right) -\end{alignat*} -``` -The unknown quantities of the 2LSWE are the water heights of the lower layer ``h_{lower}`` and the -upper -layer ``h_{upper}`` and the respective velocities in x-direction ``v_{1,lower}`` and ``v_{1,upper}`` and in y-direction -``v_{2,lower}`` and ``v_{2,upper}``. The gravitational constant is denoted by `g`, the layer densitites by -``\rho_{upper}``and ``\rho_{lower}`` and the (possibly) variable bottom topography function by ``b(x)``. -Conservative variable water height ``h_{lower}`` is measured from the bottom topography ``b`` and ``h_{upper}`` -relative to ``h_{lower}``, therefore one also defines the total water heights as ``H_{lower} = h_{lower} + b`` and -``H_{upper} = h_{upper} + h_{lower} + b``. - -The densities must be chosen such that ``\rho_{upper} < \rho_{lower}``, to make sure that the heavier fluid -``\rho_{lower}`` is in the bottom layer and the lighter fluid ``\rho_{upper}`` in the upper layer. - -The additional quantity ``H_0`` is also available to store a reference value for the total water -height that is useful to set initial conditions or test the "lake-at-rest" well-balancedness. - -The bottom topography function ``b(x)`` is set inside the initial condition routine -for a particular problem setup. - -In addition to the unknowns, Trixi currently stores the bottom topography values at the -approximation points despite being fixed in time. This is done for convenience of computing the -bottom topography gradients on the fly during the approximation as well as computing auxiliary -quantities like the total water height ``H`` or the entropy variables. -This affects the implementation and use of these equations in various ways: -* The flux values corresponding to the bottom topography must be zero. -* The bottom topography values must be included when defining initial conditions, boundary - conditions or source terms. -* [`AnalysisCallback`](@ref) analyzes this variable. -* Trixi's visualization tools will visualize the bottom topography by default. - -A good introduction for the 2LSWE is available in Chapter 12 of the book: - - Benoit Cushman-Roisin (2011)\ - Introduction to geophyiscal fluid dynamics: physical and numerical aspects\ - \ - ISBN: 978-0-12-088759-0 -""" -struct ShallowWaterTwoLayerEquations2D{RealT <: Real} <: - AbstractShallowWaterEquations{2, 7} - gravity::RealT # gravitational constant - H0::RealT # constant "lake-at-rest" total water height - rho_upper::RealT # lower layer density - rho_lower::RealT # upper layer density - r::RealT # ratio of rho_upper / rho_lower -end - -# Allow for flexibility to set the gravitational constant within an elixir depending on the -# application where `gravity_constant=1.0` or `gravity_constant=9.81` are common values. -# The reference total water height H0 defaults to 0.0 but is used for the "lake-at-rest" -# well-balancedness test cases. Densities must be specified such that rho_upper < rho_lower. -function ShallowWaterTwoLayerEquations2D(; gravity_constant, - H0 = zero(gravity_constant), rho_upper, - rho_lower) - # Assign density ratio if rho_upper <= rho_lower - if rho_upper > rho_lower - error("Invalid input: Densities must be chosen such that rho_upper <= rho_lower") - else - r = rho_upper / rho_lower - end - ShallowWaterTwoLayerEquations2D(gravity_constant, H0, rho_upper, rho_lower, r) -end - -have_nonconservative_terms(::ShallowWaterTwoLayerEquations2D) = True() -function varnames(::typeof(cons2cons), ::ShallowWaterTwoLayerEquations2D) - ("h_upper", "h_v1_upper", "h_v2_upper", "h_lower", "h_v1_lower", "h_v2_lower", "b") -end -# Note, we use the total water height, H_upper = h_upper + h_lower + b, and first layer total height -# H_lower = h_lower + b as the first primitive variable for easier visualization and setting initial -# conditions -function varnames(::typeof(cons2prim), ::ShallowWaterTwoLayerEquations2D) - ("H_upper", "v1_upper", "v2_upper", "H_lower", "v1_lower", "v2_lower", "b") -end - -# Set initial conditions at physical location `x` for time `t` -""" - initial_condition_convergence_test(x, t, equations::ShallowWaterTwoLayerEquations2D) - -A smooth initial condition used for convergence tests in combination with -[`source_terms_convergence_test`](@ref). Constants must be set to ``rho_{upper} = 0.9``, -``rho_{lower} = 1.0``, ``g = 10.0``. -""" -function initial_condition_convergence_test(x, t, - equations::ShallowWaterTwoLayerEquations2D) - # some constants are chosen such that the function is periodic on the domain [0,sqrt(2)]^2] - ω = 2.0 * pi * sqrt(2.0) - - H_lower = 2.0 + 0.1 * sin(ω * x[1] + t) * cos(ω * x[2] + t) - H_upper = 4.0 + 0.1 * cos(ω * x[1] + t) * sin(ω * x[2] + t) - v1_lower = 1.0 - v1_upper = 0.9 - v2_lower = 0.9 - v2_upper = 1.0 - b = 1.0 + 0.1 * cos(0.5 * ω * x[1]) * sin(0.5 * ω * x[2]) - - return prim2cons(SVector(H_upper, v1_upper, v2_upper, H_lower, v1_lower, v2_lower, - b), equations) -end - -""" - source_terms_convergence_test(u, x, t, equations::ShallowWaterTwoLayerEquations2D) - -Source terms used for convergence tests in combination with -[`initial_condition_convergence_test`](@ref). -""" -@inline function source_terms_convergence_test(u, x, t, - equations::ShallowWaterTwoLayerEquations2D) - # Same settings as in `initial_condition_convergence_test`. - # some constants are chosen such that the function is periodic on the domain [0,sqrt(2)]^2] - ω = 2.0 * pi * sqrt(2.0) - - # Source terms obtained with SymPy - du1 = 0.01 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.01 * ω * sin(t + ω * x[1]) * sin(t + ω * x[2]) - du2 = (5.0 * - (-0.1 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) - - 0.1 * ω * sin(t + ω * x[1]) * sin(t + - ω * x[2])) * - (4.0 + 0.2cos(t + ω * x[1]) * sin(t + ω * x[2]) - - 0.2 * sin(t + ω * x[1]) * cos(t + - ω * x[2])) + - 0.009 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.009 * ω * sin(t + ω * x[1]) * sin(t + - ω * x[2]) + - 0.1 * ω * - (20.0 + cos(t + ω * x[1]) * sin(t + ω * x[2]) - - sin(t + ω * x[1]) * cos(t + - ω * x[2])) * cos(t + ω * x[1]) * cos(t + ω * x[2])) - du3 = (5.0 * - (0.1 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.1 * ω * sin(t + ω * x[1]) * sin(t + - ω * x[2])) * - (4.0 + 0.2 * cos(t + ω * x[1]) * sin(t + ω * x[2]) - - 0.2 * sin(t + ω * x[1]) * cos(t + - ω * x[2])) + - 0.01ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.01 * ω * sin(t + ω * x[1]) * sin(t + ω * x[2]) + - -0.1 * ω * - (20.0 + cos(t + ω * x[1]) * sin(t + ω * x[2]) - - sin(t + ω * x[1]) * cos(t + ω * x[2])) * sin(t + - ω * x[1]) * sin(t + ω * x[2])) - du4 = (0.1 * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.1 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.05 * ω * sin(0.5 * ω * x[1]) * sin(0.5 * ω * x[2]) - - 0.1 * sin(t + ω * x[1]) * sin(t + ω * x[2]) + - -0.045 * ω * cos(0.5 * ω * x[1]) * cos(0.5 * ω * x[2]) - - 0.09 * ω * sin(t + ω * x[1]) * sin(t + ω * x[2])) - du5 = ((10.0 + sin(t + ω * x[1]) * cos(t + ω * x[2]) - - cos(0.5 * ω * x[1]) * sin(0.5 * ω * x[2])) * (-0.09 * ω * cos(t + - ω * x[1]) * cos(t + ω * x[2]) - - 0.09 * ω * sin(t + ω * x[1]) * sin(t + ω * x[2]) + - -0.05 * ω * sin(0.5 * ω * x[1]) * sin(0.5 * ω * x[2])) + - 5.0 * - (0.1 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.05 * ω * sin(0.5 * ω * x[1]) * sin(0.5 * ω * x[2])) * - (2.0 + 0.2 * sin(t + ω * x[1]) * cos(t + ω * x[2]) + - -0.2 * cos(0.5 * ω * x[1]) * sin(0.5 * ω * x[2])) + - 0.1 * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.1 * ω * cos(t + - ω * x[1]) * cos(t + ω * x[2]) + - 0.05 * ω * sin(0.5 * ω * x[1]) * sin(0.5 * ω * x[2]) - - 0.1 * sin(t + - ω * x[1]) * sin(t + ω * x[2]) - - 0.045 * ω * cos(0.5 * ω * x[1]) * cos(0.5 * ω * x[2]) - - 0.09 * ω * sin(t + - ω * x[1]) * sin(t + ω * x[2])) - du6 = ((10.0 + sin(t + ω * x[1]) * cos(t + ω * x[2]) + - -cos(0.5 * ω * x[1]) * sin(0.5 * ω * x[2])) * - (0.05 * ω * cos(0.5 * ω * x[1]) * cos(0.5 * ω * x[2]) + - 0.09 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.09 * ω * sin(t + ω * x[1]) * sin(t + ω * x[2])) + - 5.0 * - (-0.05 * ω * cos(0.5 * ω * x[1]) * cos(0.5 * ω * x[2]) - - 0.1 * ω * sin(t + ω * x[1]) * sin(t + - ω * x[2])) * - (2.0 + 0.2 * sin(t + ω * x[1]) * cos(t + ω * x[2]) + - -0.2 * cos(0.5 * ω * x[1]) * sin(0.5 * ω * x[2])) + - 0.09cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.09 * ω * cos(t + ω * x[1]) * cos(t + ω * x[2]) + - 0.045 * ω * sin(0.5 * ω * x[1]) * sin(0.5 * ω * x[2]) + - -0.09 * sin(t + ω * x[1]) * sin(t + ω * x[2]) - - 0.0405 * ω * cos(0.5 * ω * x[1]) * cos(0.5 * ω * x[2]) + - -0.081 * ω * sin(t + ω * x[1]) * sin(t + ω * x[2])) - - return SVector(du1, du2, du3, du4, du5, du6, zero(eltype(u))) -end - -""" - boundary_condition_slip_wall(u_inner, normal_direction, x, t, surface_flux_function, - equations::ShallowWaterTwoLayerEquations2D) - -Create a boundary state by reflecting the normal velocity component and keep -the tangential velocity component unchanged. The boundary water height is taken from -the internal value. - -For details see Section 9.2.5 of the book: -- Eleuterio F. Toro (2001) - Shock-Capturing Methods for Free-Surface Shallow Flows - 1st edition - ISBN 0471987662 -""" -@inline function boundary_condition_slip_wall(u_inner, normal_direction::AbstractVector, - x, t, surface_flux_function, - equations::ShallowWaterTwoLayerEquations2D) - # normalize the outward pointing direction - normal = normal_direction / norm(normal_direction) - - # compute the normal velocity - v_normal_upper = normal[1] * u_inner[2] + normal[2] * u_inner[3] - v_normal_lower = normal[1] * u_inner[5] + normal[2] * u_inner[6] - - # create the "external" boundary solution state - u_boundary = SVector(u_inner[1], - u_inner[2] - 2.0 * v_normal_upper * normal[1], - u_inner[3] - 2.0 * v_normal_upper * normal[2], - u_inner[4], - u_inner[5] - 2.0 * v_normal_lower * normal[1], - u_inner[6] - 2.0 * v_normal_lower * normal[2], - u_inner[7]) - - # calculate the boundary flux - flux = surface_flux_function(u_inner, u_boundary, normal_direction, equations) - return flux -end - -# Calculate 1D flux for a single point -# Note, the bottom topography has no flux -@inline function flux(u, orientation::Integer, - equations::ShallowWaterTwoLayerEquations2D) - h_upper, h_v1_upper, h_v2_upper, h_lower, h_v1_lower, h_v2_lower, _ = u - - # Calculate velocities - v1_upper, v2_upper, v1_lower, v2_lower = velocity(u, equations) - - # Calculate pressure - p_upper = 0.5 * equations.gravity * h_upper^2 - p_lower = 0.5 * equations.gravity * h_lower^2 - - # Calculate fluxes depending on orientation - if orientation == 1 - f1 = h_v1_upper - f2 = h_v1_upper * v1_upper + p_upper - f3 = h_v1_upper * v2_upper - f4 = h_v1_lower - f5 = h_v1_lower * v1_lower + p_lower - f6 = h_v1_lower * v2_lower - else - f1 = h_v2_upper - f2 = h_v2_upper * v1_upper - f3 = h_v2_upper * v2_upper + p_upper - f4 = h_v2_lower - f5 = h_v2_lower * v1_lower - f6 = h_v2_lower * v2_lower + p_lower - end - return SVector(f1, f2, f3, f4, f5, f6, zero(eltype(u))) -end - -# Calculate 1D flux for a single point in the normal direction -# Note, this directional vector is not normalized and the bottom topography has no flux -@inline function flux(u, normal_direction::AbstractVector, - equations::ShallowWaterTwoLayerEquations2D) - h_upper, h_lower = waterheight(u, equations) - v1_upper, v2_upper, v1_lower, v2_lower = velocity(u, equations) - - v_normal_upper = v1_upper * normal_direction[1] + v2_upper * normal_direction[2] - v_normal_lower = v1_lower * normal_direction[1] + v2_lower * normal_direction[2] - h_v_upper_normal = h_upper * v_normal_upper - h_v_lower_normal = h_lower * v_normal_lower - - p_upper = 0.5 * equations.gravity * h_upper^2 - p_lower = 0.5 * equations.gravity * h_lower^2 - - f1 = h_v_upper_normal - f2 = h_v_upper_normal * v1_upper + p_upper * normal_direction[1] - f3 = h_v_upper_normal * v2_upper + p_upper * normal_direction[2] - f4 = h_v_lower_normal - f5 = h_v_lower_normal * v1_lower + p_lower * normal_direction[1] - f6 = h_v_lower_normal * v2_lower + p_lower * normal_direction[2] - - return SVector(f1, f2, f3, f4, f5, f6, zero(eltype(u))) -end - -""" - flux_nonconservative_ersing_etal(u_ll, u_rr, orientation::Integer, - equations::ShallowWaterTwoLayerEquations2D) - flux_nonconservative_ersing_etal(u_ll, u_rr, - normal_direction_ll::AbstractVector, - normal_direction_average::AbstractVector, - equations::ShallowWaterTwoLayerEquations2D) - -!!! warning "Experimental code" - This numerical flux is experimental and may change in any future release. - -Non-symmetric path-conservative two-point volume flux discretizing the nonconservative (source) term -that contains the gradient of the bottom topography [`ShallowWaterTwoLayerEquations2D`](@ref) and an -additional term that couples the momentum of both layers. - -This is a modified version of [`flux_nonconservative_wintermeyer_etal`](@ref) that gives entropy -conservation and well-balancedness in both the volume and surface when combined with -[`flux_wintermeyer_etal`](@ref). - -For further details see: -- Patrick Ersing, Andrew R. Winters (2023) - An entropy stable discontinuous Galerkin method for the two-layer shallow water equations on - curvilinear meshes - [DOI: 10.48550/arXiv.2306.12699](https://doi.org/10.48550/arXiv.2306.12699) -""" -@inline function flux_nonconservative_ersing_etal(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterTwoLayerEquations2D) - # Pull the necessary left and right state information - h_upper_ll, h_lower_ll = waterheight(u_ll, equations) - h_upper_rr, h_lower_rr = waterheight(u_rr, equations) - b_rr = u_rr[7] - b_ll = u_ll[7] - - # Calculate jumps - h_upper_jump = (h_upper_rr - h_upper_ll) - h_lower_jump = (h_lower_rr - h_lower_ll) - b_jump = (b_rr - b_ll) - - z = zero(eltype(u_ll)) - - # Bottom gradient nonconservative term: (0, g*h_upper*(b + h_lower)_x, g*h_upper*(b + h_lower)_y , - # 0, g*h_lower*(b + r*h_upper)_x, - # g*h_lower*(b + r*h_upper)_y, 0) - if orientation == 1 - f = SVector(z, - equations.gravity * h_upper_ll * (b_jump + h_lower_jump), - z, z, - equations.gravity * h_lower_ll * - (b_jump + equations.r * h_upper_jump), - z, z) - else # orientation == 2 - f = SVector(z, z, - equations.gravity * h_upper_ll * (b_jump + h_lower_jump), - z, z, - equations.gravity * h_lower_ll * - (b_jump + equations.r * h_upper_jump), - z) - end - - return f -end - -@inline function flux_nonconservative_ersing_etal(u_ll, u_rr, - normal_direction_ll::AbstractVector, - normal_direction_average::AbstractVector, - equations::ShallowWaterTwoLayerEquations2D) - # Pull the necessary left and right state information - h_upper_ll, h_lower_ll = waterheight(u_ll, equations) - h_upper_rr, h_lower_rr = waterheight(u_rr, equations) - b_rr = u_rr[7] - b_ll = u_ll[7] - - # Calculate jumps - h_upper_jump = (h_upper_rr - h_upper_ll) - h_lower_jump = (h_lower_rr - h_lower_ll) - b_jump = (b_rr - b_ll) - - # Note this routine only uses the `normal_direction_average` and the average of the - # bottom topography to get a quadratic split form DG gradient on curved elements - return SVector(zero(eltype(u_ll)), - normal_direction_average[1] * equations.gravity * h_upper_ll * - (b_jump + h_lower_jump), - normal_direction_average[2] * equations.gravity * h_upper_ll * - (b_jump + h_lower_jump), - zero(eltype(u_ll)), - normal_direction_average[1] * equations.gravity * h_lower_ll * - (b_jump + equations.r * h_upper_jump), - normal_direction_average[2] * equations.gravity * h_lower_ll * - (b_jump + equations.r * h_upper_jump), - zero(eltype(u_ll))) -end - -""" - flux_wintermeyer_etal(u_ll, u_rr, orientation, - equations::ShallowWaterTwoLayerEquations2D) - flux_wintermeyer_etal(u_ll, u_rr, - normal_direction::AbstractVector, - equations::ShallowWaterTwoLayerEquations2D) - -Total energy conservative (mathematical entropy for two-layer shallow water equations) split form. -When the bottom topography is nonzero this scheme will be well-balanced when used with the -nonconservative [`flux_nonconservative_ersing_etal`](@ref). To obtain the flux for the -two-layer shallow water equations the flux that is described in the paper for the normal shallow -water equations is used within each layer. - -Further details are available in Theorem 1 of the paper: -- Niklas Wintermeyer, Andrew R. Winters, Gregor J. Gassner and David A. Kopriva (2017) - An entropy stable nodal discontinuous Galerkin method for the two dimensional - shallow water equations on unstructured curvilinear meshes with discontinuous bathymetry - [DOI: 10.1016/j.jcp.2017.03.036](https://doi.org/10.1016/j.jcp.2017.03.036) -""" -@inline function flux_wintermeyer_etal(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterTwoLayerEquations2D) - # Unpack left and right state - h_upper_ll, h_v1_upper_ll, h_v2_upper_ll, h_lower_ll, h_v1_lower_ll, h_v2_lower_ll, _ = u_ll - h_upper_rr, h_v1_upper_rr, h_v2_upper_rr, h_lower_rr, h_v1_lower_rr, h_v2_lower_rr, _ = u_rr - - # Get the velocities on either side - v1_upper_ll, v2_upper_ll, v1_lower_ll, v2_lower_ll = velocity(u_ll, equations) - v1_upper_rr, v2_upper_rr, v1_lower_rr, v2_lower_rr = velocity(u_rr, equations) - - # Average each factor of products in flux - v1_upper_avg = 0.5 * (v1_upper_ll + v1_upper_rr) - v1_lower_avg = 0.5 * (v1_lower_ll + v1_lower_rr) - v2_upper_avg = 0.5 * (v2_upper_ll + v2_upper_rr) - v2_lower_avg = 0.5 * (v2_lower_ll + v2_lower_rr) - p_upper_avg = 0.5 * equations.gravity * h_upper_ll * h_upper_rr - p_lower_avg = 0.5 * equations.gravity * h_lower_ll * h_lower_rr - - # Calculate fluxes depending on orientation - if orientation == 1 - f1 = 0.5 * (h_v1_upper_ll + h_v1_upper_rr) - f2 = f1 * v1_upper_avg + p_upper_avg - f3 = f1 * v2_upper_avg - f4 = 0.5 * (h_v1_lower_ll + h_v1_lower_rr) - f5 = f4 * v1_lower_avg + p_lower_avg - f6 = f4 * v2_lower_avg - else - f1 = 0.5 * (h_v2_upper_ll + h_v2_upper_rr) - f2 = f1 * v1_upper_avg - f3 = f1 * v2_upper_avg + p_upper_avg - f4 = 0.5 * (h_v2_lower_ll + h_v2_lower_rr) - f5 = f4 * v1_lower_avg - f6 = f4 * v2_lower_avg + p_lower_avg - end - - return SVector(f1, f2, f3, f4, f5, f6, zero(eltype(u_ll))) -end - -@inline function flux_wintermeyer_etal(u_ll, u_rr, - normal_direction::AbstractVector, - equations::ShallowWaterTwoLayerEquations2D) - # Unpack left and right state - h_upper_ll, h_v1_upper_ll, h_v2_upper_ll, h_lower_ll, h_v1_lower_ll, h_v2_lower_ll, _ = u_ll - h_upper_rr, h_v1_upper_rr, h_v2_upper_rr, h_lower_rr, h_v1_lower_rr, h_v2_lower_rr, _ = u_rr - - # Get the velocities on either side - v1_upper_ll, v2_upper_ll, v1_lower_ll, v2_lower_ll = velocity(u_ll, equations) - v1_upper_rr, v2_upper_rr, v1_lower_rr, v2_lower_rr = velocity(u_rr, equations) - - # Average each factor of products in flux - v1_upper_avg = 0.5 * (v1_upper_ll + v1_upper_rr) - v1_lower_avg = 0.5 * (v1_lower_ll + v1_lower_rr) - v2_upper_avg = 0.5 * (v2_upper_ll + v2_upper_rr) - v2_lower_avg = 0.5 * (v2_lower_ll + v2_lower_rr) - p_upper_avg = 0.5 * equations.gravity * h_upper_ll * h_upper_rr - p_lower_avg = 0.5 * equations.gravity * h_lower_ll * h_lower_rr - h_v1_upper_avg = 0.5 * (h_v1_upper_ll + h_v1_upper_rr) - h_v2_upper_avg = 0.5 * (h_v2_upper_ll + h_v2_upper_rr) - h_v1_lower_avg = 0.5 * (h_v1_lower_ll + h_v1_lower_rr) - h_v2_lower_avg = 0.5 * (h_v2_lower_ll + h_v2_lower_rr) - - # Calculate fluxes depending on normal_direction - f1 = h_v1_upper_avg * normal_direction[1] + h_v2_upper_avg * normal_direction[2] - f2 = f1 * v1_upper_avg + p_upper_avg * normal_direction[1] - f3 = f1 * v2_upper_avg + p_upper_avg * normal_direction[2] - f4 = h_v1_lower_avg * normal_direction[1] + h_v2_lower_avg * normal_direction[2] - f5 = f4 * v1_lower_avg + p_lower_avg * normal_direction[1] - f6 = f4 * v2_lower_avg + p_lower_avg * normal_direction[2] - - return SVector(f1, f2, f3, f4, f5, f6, zero(eltype(u_ll))) -end - -""" - flux_es_ersing_etal(u_ll, u_rr, orientation_or_normal_direction, - equations::ShallowWaterTwoLayerEquations2D) - -Entropy stable surface flux for the two-layer shallow water equations. Uses the entropy conservative -[`flux_wintermeyer_etal`](@ref) and adds a Lax-Friedrichs type dissipation dependent on the jump of -entropy variables. - -For further details see: -- Patrick Ersing, Andrew R. Winters (2023) - An entropy stable discontinuous Galerkin method for the two-layer shallow water equations on - curvilinear meshes - [DOI: 10.48550/arXiv.2306.12699](https://doi.org/10.48550/arXiv.2306.12699) -""" -@inline function flux_es_ersing_etal(u_ll, u_rr, - orientation_or_normal_direction, - equations::ShallowWaterTwoLayerEquations2D) - # Compute entropy conservative flux but without the bottom topography - f_ec = flux_wintermeyer_etal(u_ll, u_rr, - orientation_or_normal_direction, - equations) - - # Get maximum signal velocity - λ = max_abs_speed_naive(u_ll, u_rr, orientation_or_normal_direction, equations) - - # Get entropy variables but without the bottom topography - q_rr = cons2entropy(u_rr, equations) - q_ll = cons2entropy(u_ll, equations) - - # Average values from left and right - u_avg = (u_ll + u_rr) / 2 - - # Introduce variables for better readability - rho_upper = equations.rho_upper - rho_lower = equations.rho_lower - g = equations.gravity - drho = rho_upper - rho_lower - - # Compute entropy Jacobian coefficients - h11 = -rho_lower / (g * rho_upper * drho) - h12 = -rho_lower * u_avg[2] / (g * rho_upper * u_avg[1] * drho) - h13 = -rho_lower * u_avg[3] / (g * rho_upper * u_avg[1] * drho) - h14 = 1.0 / (g * drho) - h15 = u_avg[5] / (g * u_avg[4] * drho) - h16 = u_avg[6] / (g * u_avg[4] * drho) - h21 = -rho_lower * u_avg[2] / (g * rho_upper * u_avg[1] * drho) - h22 = ((g * rho_upper * u_avg[1]^3 - g * rho_lower * u_avg[1]^3 + - -rho_lower * u_avg[2]^2) / (g * rho_upper * u_avg[1]^2 * drho)) - h23 = -rho_lower * u_avg[2] * u_avg[3] / (g * rho_upper * u_avg[1]^2 * drho) - h24 = u_avg[2] / (g * u_avg[1] * drho) - h25 = u_avg[2] * u_avg[5] / (g * u_avg[1] * u_avg[4] * drho) - h26 = u_avg[2] * u_avg[6] / (g * u_avg[1] * u_avg[4] * drho) - h31 = -rho_lower * u_avg[3] / (g * rho_upper * u_avg[1] * drho) - h32 = -rho_lower * u_avg[2] * u_avg[3] / (g * rho_upper * u_avg[1]^2 * drho) - h33 = ((g * rho_upper * u_avg[1]^3 - g * rho_lower * u_avg[1]^3 + - -rho_lower * u_avg[3]^2) / (g * rho_upper * u_avg[1]^2 * drho)) - h34 = u_avg[3] / (g * u_avg[1] * drho) - h35 = u_avg[3] * u_avg[5] / (g * u_avg[1] * u_avg[4] * drho) - h36 = u_avg[3] * u_avg[6] / (g * u_avg[1] * u_avg[4] * drho) - h41 = 1.0 / (g * drho) - h42 = u_avg[2] / (g * u_avg[1] * drho) - h43 = u_avg[3] / (g * u_avg[1] * drho) - h44 = -1.0 / (g * drho) - h45 = -u_avg[5] / (g * u_avg[4] * drho) - h46 = -u_avg[6] / (g * u_avg[4] * drho) - h51 = u_avg[5] / (g * u_avg[4] * drho) - h52 = u_avg[2] * u_avg[5] / (g * u_avg[1] * u_avg[4] * drho) - h53 = u_avg[3] * u_avg[5] / (g * u_avg[1] * u_avg[4] * drho) - h54 = -u_avg[5] / (g * u_avg[4] * drho) - h55 = ((g * rho_upper * u_avg[4]^3 - g * rho_lower * u_avg[4]^3 + - -rho_lower * u_avg[5]^2) / (g * rho_lower * u_avg[4]^2 * drho)) - h56 = -u_avg[5] * u_avg[6] / (g * u_avg[4]^2 * drho) - h61 = u_avg[6] / (g * u_avg[4] * drho) - h62 = u_avg[2] * u_avg[6] / (g * u_avg[1] * u_avg[4] * drho) - h63 = u_avg[3] * u_avg[6] / (g * u_avg[1] * u_avg[4] * drho) - h64 = -u_avg[6] / (g * u_avg[4] * drho) - h65 = -u_avg[5] * u_avg[6] / (g * u_avg[4]^2 * drho) - h66 = ((g * rho_upper * u_avg[4]^3 - g * rho_lower * u_avg[4]^3 + - -rho_lower * u_avg[6]^2) / (g * rho_lower * u_avg[4]^2 * drho)) - - # Entropy Jacobian matrix - H = @SMatrix [[h11;; h12;; h13;; h14;; h15;; h16;; 0]; - [h21;; h22;; h23;; h24;; h25;; h26;; 0]; - [h31;; h32;; h33;; h34;; h35;; h36;; 0]; - [h41;; h42;; h43;; h44;; h45;; h46;; 0]; - [h51;; h52;; h53;; h54;; h55;; h56;; 0]; - [h61;; h62;; h63;; h64;; h65;; h66;; 0]; - [0;; 0;; 0;; 0;; 0;; 0;; 0]] - - # Add dissipation to entropy conservative flux to obtain entropy stable flux - f_es = f_ec - 0.5 * λ * H * (q_rr - q_ll) - - return SVector(f_es[1], f_es[2], f_es[3], f_es[4], f_es[5], f_es[6], - zero(eltype(u_ll))) -end - -# Calculate approximation for maximum wave speed for local Lax-Friedrichs-type dissipation as the -# maximum velocity magnitude plus the maximum speed of sound. This function uses approximate -# eigenvalues using the speed of the barotropic mode as there is no simple way to calculate them -# analytically. -# -# A good overview of the derivation is given in: -# - Jonas Nycander, Andrew McC. Hogg, Leela M. Frankcombe (2008) -# Open boundary conditions for nonlinear channel Flows -# [DOI: 10.1016/j.ocemod.2008.06.003](https://doi.org/10.1016/j.ocemod.2008.06.003) -@inline function max_abs_speed_naive(u_ll, u_rr, - orientation::Integer, - equations::ShallowWaterTwoLayerEquations2D) - # Unpack left and right state - h_upper_ll, h_v1_upper_ll, h_v2_upper_ll, h_lower_ll, h_v1_lower_ll, h_v2_lower_ll, _ = u_ll - h_upper_rr, h_v1_upper_rr, h_v2_upper_rr, h_lower_rr, h_v1_lower_rr, h_v2_lower_rr, _ = u_rr - - # Calculate averaged velocity of both layers - if orientation == 1 - v_m_ll = (h_v1_upper_ll + h_v1_lower_ll) / (h_upper_ll + h_lower_ll) - v_m_rr = (h_v1_upper_rr + h_v1_lower_rr) / (h_upper_rr + h_lower_rr) - else - v_m_ll = (h_v2_upper_ll + h_v2_lower_ll) / (h_upper_ll + h_lower_ll) - v_m_rr = (h_v2_upper_rr + h_v2_lower_rr) / (h_upper_rr + h_lower_rr) - end - - # Calculate the wave celerity on the left and right - h_upper_ll, h_lower_ll = waterheight(u_ll, equations) - h_upper_rr, h_lower_rr = waterheight(u_rr, equations) - - c_ll = sqrt(equations.gravity * (h_upper_ll + h_lower_ll)) - c_rr = sqrt(equations.gravity * (h_upper_rr + h_lower_rr)) - - return (max(abs(v_m_ll), abs(v_m_rr)) + max(c_ll, c_rr)) -end - -@inline function max_abs_speed_naive(u_ll, u_rr, - normal_direction::AbstractVector, - equations::ShallowWaterTwoLayerEquations2D) - # Unpack left and right state - h_upper_ll, _, _, h_lower_ll, _, _, _ = u_ll - h_upper_rr, _, _, h_lower_rr, _, _, _ = u_rr - - # Extract and compute the velocities in the normal direction - v1_upper_ll, v2_upper_ll, v1_lower_ll, v2_lower_ll = velocity(u_ll, equations) - v1_upper_rr, v2_upper_rr, v1_lower_rr, v2_lower_rr = velocity(u_rr, equations) - - v_upper_dot_n_ll = v1_upper_ll * normal_direction[1] + - v2_upper_ll * normal_direction[2] - v_upper_dot_n_rr = v1_upper_rr * normal_direction[1] + - v2_upper_rr * normal_direction[2] - v_lower_dot_n_ll = v1_lower_ll * normal_direction[1] + - v2_lower_ll * normal_direction[2] - v_lower_dot_n_rr = v1_lower_rr * normal_direction[1] + - v2_lower_rr * normal_direction[2] - - # Calculate averaged velocity of both layers - v_m_ll = (v_upper_dot_n_ll * h_upper_ll + v_lower_dot_n_ll * h_lower_ll) / - (h_upper_ll + h_lower_ll) - v_m_rr = (v_upper_dot_n_rr * h_upper_rr + v_lower_dot_n_rr * h_lower_rr) / - (h_upper_rr + h_lower_rr) - - # Compute the wave celerity on the left and right - h_upper_ll, h_lower_ll = waterheight(u_ll, equations) - h_upper_rr, h_lower_rr = waterheight(u_rr, equations) - - c_ll = sqrt(equations.gravity * (h_upper_ll + h_lower_ll)) - c_rr = sqrt(equations.gravity * (h_upper_rr + h_lower_rr)) - - # The normal velocities are already scaled by the norm - return max(abs(v_m_ll), abs(v_m_rr)) + max(c_ll, c_rr) * norm(normal_direction) -end - -# Specialized `DissipationLocalLaxFriedrichs` to avoid spurious dissipation in the bottom topography -@inline function (dissipation::DissipationLocalLaxFriedrichs)(u_ll, u_rr, - orientation_or_normal_direction, - equations::ShallowWaterTwoLayerEquations2D) - λ = dissipation.max_abs_speed(u_ll, u_rr, orientation_or_normal_direction, - equations) - diss = -0.5 * λ * (u_rr - u_ll) - return SVector(diss[1], diss[2], diss[3], diss[4], diss[5], diss[6], - zero(eltype(u_ll))) -end - -# Absolute speed of the barotropic mode -@inline function max_abs_speeds(u, equations::ShallowWaterTwoLayerEquations2D) - h_upper, h_v1_upper, h_v2_upper, h_lower, h_v1_lower, h_v2_lower, _ = u - - # Calculate averaged velocity of both layers - v1_m = (h_v1_upper + h_v1_lower) / (h_upper + h_lower) - v2_m = (h_v2_upper + h_v2_lower) / (h_upper + h_lower) - - h_upper, h_lower = waterheight(u, equations) - v1_upper, v2_upper, v1_lower, v2_lower = velocity(u, equations) - - c = sqrt(equations.gravity * (h_upper + h_lower)) - return (max(abs(v1_m) + c, abs(v1_upper), abs(v1_lower)), - max(abs(v2_m) + c, abs(v2_upper), abs(v2_lower))) -end - -# Helper function to extract the velocity vector from the conservative variables -@inline function velocity(u, equations::ShallowWaterTwoLayerEquations2D) - h_upper, h_v1_upper, h_v2_upper, h_lower, h_v1_lower, h_v2_lower, _ = u - - v1_upper = h_v1_upper / h_upper - v2_upper = h_v2_upper / h_upper - v1_lower = h_v1_lower / h_lower - v2_lower = h_v2_lower / h_lower - - return SVector(v1_upper, v2_upper, v1_lower, v2_lower) -end - -# Convert conservative variables to primitive -@inline function cons2prim(u, equations::ShallowWaterTwoLayerEquations2D) - h_upper, _, _, h_lower, _, _, b = u - - H_lower = h_lower + b - H_upper = h_lower + h_upper + b - v1_upper, v2_upper, v1_lower, v2_lower = velocity(u, equations) - - return SVector(H_upper, v1_upper, v2_upper, H_lower, v1_lower, v2_lower, b) -end - -# Convert conservative variables to entropy variables -# Note, only the first four are the entropy variables, the fifth entry still just carries the bottom -# topography values for convenience. -# In contrast to general usage the entropy variables are denoted with q instead of w, because w is -# already used for velocity in y-Direction -@inline function cons2entropy(u, equations::ShallowWaterTwoLayerEquations2D) - h_upper, _, _, h_lower, _, _, b = u - # Assign new variables for better readability - rho_upper = equations.rho_upper - rho_lower = equations.rho_lower - v1_upper, v2_upper, v1_lower, v2_lower = velocity(u, equations) - - w1 = (rho_upper * (equations.gravity * (h_upper + h_lower + b) + - -0.5 * (v1_upper^2 + v2_upper^2))) - w2 = rho_upper * v1_upper - w3 = rho_upper * v2_upper - w4 = (rho_lower * (equations.gravity * (equations.r * h_upper + h_lower + b) + - -0.5 * (v1_lower^2 + v2_lower^2))) - w5 = rho_lower * v1_lower - w6 = rho_lower * v2_lower - return SVector(w1, w2, w3, w4, w5, w6, b) -end - -# Convert primitive to conservative variables -@inline function prim2cons(prim, equations::ShallowWaterTwoLayerEquations2D) - H_upper, v1_upper, v2_upper, H_lower, v1_lower, v2_lower, b = prim - - h_lower = H_lower - b - h_upper = H_upper - h_lower - b - h_v1_upper = h_upper * v1_upper - h_v2_upper = h_upper * v2_upper - h_v1_lower = h_lower * v1_lower - h_v2_lower = h_lower * v2_lower - return SVector(h_upper, h_v1_upper, h_v2_upper, h_lower, h_v1_lower, h_v2_lower, b) -end - -@inline function waterheight(u, equations::ShallowWaterTwoLayerEquations2D) - return SVector(u[1], u[4]) -end - -# Entropy function for the shallow water equations is the total energy -@inline function entropy(cons, equations::ShallowWaterTwoLayerEquations2D) - energy_total(cons, equations) -end - -# Calculate total energy for a conservative state `cons` -@inline function energy_total(cons, equations::ShallowWaterTwoLayerEquations2D) - h_upper, h_v1_upper, h_v2_upper, h_lower, h_v2_lower, h_v2_lower, b = cons - g = equations.gravity - rho_upper = equations.rho_upper - rho_lower = equations.rho_lower - - e = (0.5 * rho_upper * - (h_v1_upper^2 / h_upper + h_v2_upper^2 / h_upper + g * h_upper^2) + - 0.5 * rho_lower * - (h_v2_lower^2 / h_lower + h_v2_lower^2 / h_lower + g * h_lower^2) + - g * rho_lower * h_lower * b + g * rho_upper * h_upper * (h_lower + b)) - return e -end - -# Calculate kinetic energy for a conservative state `cons` -@inline function energy_kinetic(u, equations::ShallowWaterTwoLayerEquations2D) - h_upper, h_v1_upper, h_v2_upper, h_lower, h_v2_lower, h_v2_lower, _ = u - - return (0.5 * equations.rho_upper * h_v1_upper^2 / h_upper + - 0.5 * equations.rho_upper * h_v2_upper^2 / h_upper + - 0.5 * equations.rho_lower * h_v2_lower^2 / h_lower + - 0.5 * equations.rho_lower * h_v2_lower^2 / h_lower) -end - -# Calculate potential energy for a conservative state `cons` -@inline function energy_internal(cons, equations::ShallowWaterTwoLayerEquations2D) - return energy_total(cons, equations) - energy_kinetic(cons, equations) -end - -# Calculate the error for the "lake-at-rest" test case where H = h_upper+h_lower+b should -# be a constant value over time -@inline function lake_at_rest_error(u, equations::ShallowWaterTwoLayerEquations2D) - h_upper, _, _, h_lower, _, _, b = u - return abs(equations.H0 - (h_upper + h_lower + b)) -end -end # @muladd diff --git a/src/solvers/dgsem_tree/indicators.jl b/src/solvers/dgsem_tree/indicators.jl index bb9109f276..9f25a6d2db 100644 --- a/src/solvers/dgsem_tree/indicators.jl +++ b/src/solvers/dgsem_tree/indicators.jl @@ -101,82 +101,6 @@ function Base.show(io::IO, ::MIME"text/plain", indicator::IndicatorHennemannGass summary_box(io, "IndicatorHennemannGassner", setup) end -# TODO: TrixiShallowWater: move the new indicator and all associated routines to the new package -""" - IndicatorHennemannGassnerShallowWater(equations::AbstractEquations, basis; - alpha_max=0.5, - alpha_min=0.001, - alpha_smooth=true, - variable) - -Modified version of the [`IndicatorHennemannGassner`](@ref) -indicator used for shock-capturing for shallow water equations. After -the element-wise values for the blending factors are computed an additional check -is made to see if the element is partially wet. In this case, partially wet elements -are set to use the pure finite volume scheme that is guaranteed to be well-balanced -for this wet/dry transition state of the flow regime. - -See also [`VolumeIntegralShockCapturingHG`](@ref). - -## References - -- Hennemann, Gassner (2020) - "A provably entropy stable subcell shock capturing approach for high order split form DG" - [arXiv: 2008.12044](https://arxiv.org/abs/2008.12044) -""" -struct IndicatorHennemannGassnerShallowWater{RealT <: Real, Variable, Cache} <: - AbstractIndicator - alpha_max::RealT - alpha_min::RealT - alpha_smooth::Bool - variable::Variable - cache::Cache -end - -# this method is used when the indicator is constructed as for shock-capturing volume integrals -# of the shallow water equations -# It modifies the shock-capturing indicator to use full FV method in dry cells -function IndicatorHennemannGassnerShallowWater(equations::AbstractShallowWaterEquations, - basis; - alpha_max = 0.5, - alpha_min = 0.001, - alpha_smooth = true, - variable) - alpha_max, alpha_min = promote(alpha_max, alpha_min) - cache = create_cache(IndicatorHennemannGassner, equations, basis) - IndicatorHennemannGassnerShallowWater{typeof(alpha_max), typeof(variable), - typeof(cache)}(alpha_max, alpha_min, - alpha_smooth, variable, cache) -end - -function Base.show(io::IO, indicator::IndicatorHennemannGassnerShallowWater) - @nospecialize indicator # reduce precompilation time - - print(io, "IndicatorHennemannGassnerShallowWater(") - print(io, indicator.variable) - print(io, ", alpha_max=", indicator.alpha_max) - print(io, ", alpha_min=", indicator.alpha_min) - print(io, ", alpha_smooth=", indicator.alpha_smooth) - print(io, ")") -end - -function Base.show(io::IO, ::MIME"text/plain", - indicator::IndicatorHennemannGassnerShallowWater) - @nospecialize indicator # reduce precompilation time - - if get(io, :compact, false) - show(io, indicator) - else - setup = [ - "indicator variable" => indicator.variable, - "max. α" => indicator.alpha_max, - "min. α" => indicator.alpha_min, - "smooth α" => (indicator.alpha_smooth ? "yes" : "no"), - ] - summary_box(io, "IndicatorHennemannGassnerShallowWater", setup) - end -end - function (indicator_hg::IndicatorHennemannGassner)(u, mesh, equations, dg::DGSEM, cache; kwargs...) @unpack alpha_smooth = indicator_hg diff --git a/src/solvers/dgsem_tree/indicators_1d.jl b/src/solvers/dgsem_tree/indicators_1d.jl index dff87bfe06..4796ddcc60 100644 --- a/src/solvers/dgsem_tree/indicators_1d.jl +++ b/src/solvers/dgsem_tree/indicators_1d.jl @@ -24,115 +24,6 @@ function create_cache(typ::Type{IndicatorHennemannGassner}, mesh, create_cache(typ, equations, dg.basis) end -# Modified indicator for ShallowWaterEquations1D to apply full FV method on cells -# containing some "dry" LGL nodes. That is, if an element is partially "wet" then it becomes a -# full FV element. -# -# TODO: TrixiShallowWater: move new indicator type -function (indicator_hg::IndicatorHennemannGassnerShallowWater)(u::AbstractArray{<:Any, - 3}, - mesh, - equations::ShallowWaterEquations1D, - dg::DGSEM, cache; - kwargs...) - @unpack alpha_max, alpha_min, alpha_smooth, variable = indicator_hg - @unpack alpha, alpha_tmp, indicator_threaded, modal_threaded = indicator_hg.cache - # TODO: Taal refactor, when to `resize!` stuff changed possibly by AMR? - # Shall we implement `resize!(semi::AbstractSemidiscretization, new_size)` - # or just `resize!` whenever we call the relevant methods as we do now? - resize!(alpha, nelements(dg, cache)) - if alpha_smooth - resize!(alpha_tmp, nelements(dg, cache)) - end - - # magic parameters - threshold = 0.5 * 10^(-1.8 * (nnodes(dg))^0.25) - parameter_s = log((1 - 0.0001) / 0.0001) - - # If the water height `h` at one LGL node is lower than `threshold_partially_wet` - # the indicator sets the element-wise blending factor alpha[element] = 1 - # via the local variable `indicator_wet`. In turn, this ensures that a pure - # FV method is used in partially wet cells and guarantees the well-balanced property. - # - # Hard-coded cut-off value of `threshold_partially_wet = 1e-4` was determined through many numerical experiments. - # Overall idea is to increase robustness when computing the velocity on (nearly) dry cells which - # could be "dangerous" due to division of conservative variables, e.g., v = hv / h. - # Here, the impact of the threshold on the number of cells being updated with FV is not that - # significant. However, its impact on the robustness is very significant. - # The value can be seen as a trade-off between accuracy and stability. - # Well-balancedness of the scheme on partially wet cells with hydrostatic reconstruction - # can only be proven for the FV method (see Chen and Noelle). - # Therefore we set alpha to one regardless of its given maximum value. - threshold_partially_wet = 1e-4 - - @threaded for element in eachelement(dg, cache) - indicator = indicator_threaded[Threads.threadid()] - modal = modal_threaded[Threads.threadid()] - - # (Re-)set dummy variable for alpha_dry - indicator_wet = 1 - - # Calculate indicator variables at Gauss-Lobatto nodes - for i in eachnode(dg) - u_local = get_node_vars(u, equations, dg, i, element) - h, _, _ = u_local - - if h <= threshold_partially_wet - indicator_wet = 0 - end - - indicator[i] = indicator_hg.variable(u_local, equations) - end - - # Convert to modal representation - multiply_scalar_dimensionwise!(modal, dg.basis.inverse_vandermonde_legendre, - indicator) - - # Calculate total energies for all modes, without highest, without two highest - total_energy = zero(eltype(modal)) - for i in 1:nnodes(dg) - total_energy += modal[i]^2 - end - total_energy_clip1 = zero(eltype(modal)) - for i in 1:(nnodes(dg) - 1) - total_energy_clip1 += modal[i]^2 - end - total_energy_clip2 = zero(eltype(modal)) - for i in 1:(nnodes(dg) - 2) - total_energy_clip2 += modal[i]^2 - end - - # Calculate energy in higher modes - energy = max((total_energy - total_energy_clip1) / total_energy, - (total_energy_clip1 - total_energy_clip2) / total_energy_clip1) - - alpha_element = 1 / (1 + exp(-parameter_s / threshold * (energy - threshold))) - - # Take care of the case close to pure DG - if alpha_element < alpha_min - alpha_element = zero(alpha_element) - end - - # Take care of the case close to pure FV - if alpha_element > 1 - alpha_min - alpha_element = one(alpha_element) - end - - # Clip the maximum amount of FV allowed or set to one depending on indicator_wet - if indicator_wet == 0 - alpha[element] = 1 - else # Element is not defined as dry but wet - alpha[element] = min(alpha_max, alpha_element) - end - end - - if alpha_smooth - apply_smoothing!(mesh, alpha, alpha_tmp, dg, cache) - end - - return alpha -end - # Use this function barrier and unpack inside to avoid passing closures to Polyester.jl # with @batch (@threaded). # Otherwise, @threaded does not work here with Julia ARM on macOS. diff --git a/src/solvers/dgsem_tree/indicators_2d.jl b/src/solvers/dgsem_tree/indicators_2d.jl index fa8ed481eb..665d2254e5 100644 --- a/src/solvers/dgsem_tree/indicators_2d.jl +++ b/src/solvers/dgsem_tree/indicators_2d.jl @@ -28,116 +28,6 @@ function create_cache(typ::Type{IndicatorHennemannGassner}, mesh, create_cache(typ, equations, dg.basis) end -# Modified indicator for ShallowWaterEquations2D to apply full FV method on cells -# containing some "dry" LGL nodes. That is, if an element is partially "wet" then it becomes a -# full FV element. -# -# TODO: TrixiShallowWater: move new indicator type -function (indicator_hg::IndicatorHennemannGassnerShallowWater)(u::AbstractArray{<:Any, - 4}, - mesh, - equations::ShallowWaterEquations2D, - dg::DGSEM, cache; - kwargs...) - @unpack alpha_max, alpha_min, alpha_smooth, variable = indicator_hg - @unpack alpha, alpha_tmp, indicator_threaded, modal_threaded, modal_tmp1_threaded = indicator_hg.cache - # TODO: Taal refactor, when to `resize!` stuff changed possibly by AMR? - # Shall we implement `resize!(semi::AbstractSemidiscretization, new_size)` - # or just `resize!` whenever we call the relevant methods as we do now? - resize!(alpha, nelements(dg, cache)) - if alpha_smooth - resize!(alpha_tmp, nelements(dg, cache)) - end - - # magic parameters - threshold = 0.5 * 10^(-1.8 * (nnodes(dg))^0.25) - parameter_s = log((1 - 0.0001) / 0.0001) - - # If the water height `h` at one LGL node is lower than `threshold_partially_wet` - # the indicator sets the element-wise blending factor alpha[element] = 1 - # via the local variable `indicator_wet`. In turn, this ensures that a pure - # FV method is used in partially wet cells and guarantees the well-balanced property. - # - # Hard-coded cut-off value of `threshold_partially_wet = 1e-4` was determined through many numerical experiments. - # Overall idea is to increase robustness when computing the velocity on (nearly) dry cells which - # could be "dangerous" due to division of conservative variables, e.g., v1 = hv1 / h. - # Here, the impact of the threshold on the number of cells being updated with FV is not that - # significant. However, its impact on the robustness is very significant. - # The value can be seen as a trade-off between accuracy and stability. - # Well-balancedness of the scheme on partially wet cells with hydrostatic reconstruction - # can only be proven for the FV method (see Chen and Noelle). - # Therefore we set alpha to be one regardless of its given value from the modal indicator. - threshold_partially_wet = 1e-4 - - @threaded for element in eachelement(dg, cache) - indicator = indicator_threaded[Threads.threadid()] - modal = modal_threaded[Threads.threadid()] - modal_tmp1 = modal_tmp1_threaded[Threads.threadid()] - - # (Re-)set dummy variable for alpha_dry - indicator_wet = 1 - - # Calculate indicator variables at Gauss-Lobatto nodes - for j in eachnode(dg), i in eachnode(dg) - u_local = get_node_vars(u, equations, dg, i, j, element) - h, _, _, _ = u_local - - if h <= threshold_partially_wet - indicator_wet = 0 - end - - indicator[i, j] = indicator_hg.variable(u_local, equations) - end - - # Convert to modal representation - multiply_scalar_dimensionwise!(modal, dg.basis.inverse_vandermonde_legendre, - indicator, modal_tmp1) - - # Calculate total energies for all modes, without highest, without two highest - total_energy = zero(eltype(modal)) - for j in 1:nnodes(dg), i in 1:nnodes(dg) - total_energy += modal[i, j]^2 - end - total_energy_clip1 = zero(eltype(modal)) - for j in 1:(nnodes(dg) - 1), i in 1:(nnodes(dg) - 1) - total_energy_clip1 += modal[i, j]^2 - end - total_energy_clip2 = zero(eltype(modal)) - for j in 1:(nnodes(dg) - 2), i in 1:(nnodes(dg) - 2) - total_energy_clip2 += modal[i, j]^2 - end - - # Calculate energy in higher modes - energy = max((total_energy - total_energy_clip1) / total_energy, - (total_energy_clip1 - total_energy_clip2) / total_energy_clip1) - - alpha_element = 1 / (1 + exp(-parameter_s / threshold * (energy - threshold))) - - # Take care of the case close to pure DG - if alpha_element < alpha_min - alpha_element = zero(alpha_element) - end - - # Take care of the case close to pure FV - if alpha_element > 1 - alpha_min - alpha_element = one(alpha_element) - end - - # Clip the maximum amount of FV allowed or set to 1 depending on indicator_wet - if indicator_wet == 0 - alpha[element] = 1 - else # Element is not defined as dry but wet - alpha[element] = min(alpha_max, alpha_element) - end - end - - if alpha_smooth - apply_smoothing!(mesh, alpha, alpha_tmp, dg, cache) - end - - return alpha -end - # Use this function barrier and unpack inside to avoid passing closures to Polyester.jl # with @batch (@threaded). # Otherwise, @threaded does not work here with Julia ARM on macOS. diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index 522510a42e..f5fb169033 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -1,7 +1,5 @@ module TestExamplesStructuredMesh2D -# TODO: TrixiShallowWater: move any wet/dry tests to new package - using Test using Trixi @@ -907,82 +905,6 @@ end end end -@trixi_testset "elixir_shallowwater_well_balanced_wet_dry.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_well_balanced_wet_dry.jl"), - l2=[ - 0.019731646454942086, - 1.0694532773278277e-14, - 1.1969913383405568e-14, - 0.0771517260037954, - ], - linf=[ - 0.4999999999998892, - 6.067153702623552e-14, - 4.4849667259339357e-14, - 1.9999999999999993, - ], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - -@trixi_testset "elixir_shallowwater_conical_island.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_conical_island.jl"), - l2=[ - 0.04593154164306353, - 0.1644534881916908, - 0.16445348819169076, - 0.0011537702354532122, - ], - linf=[ - 0.21100717610846442, - 0.9501592344310412, - 0.950159234431041, - 0.021790250683516296, - ], - tspan=(0.0, 0.025)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - -@trixi_testset "elixir_shallowwater_parabolic_bowl.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_parabolic_bowl.jl"), - l2=[ - 0.00015285369980313484, - 1.9536806395943226e-5, - 9.936906607758672e-5, - 5.0686313334616055e-15, - ], - linf=[ - 0.003316119030459211, - 0.0005075409427972817, - 0.001986721761060583, - 4.701794509287538e-14, - ], - tspan=(0.0, 0.025), cells_per_dimension=(40, 40)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - @trixi_testset "elixir_mhd_ec_shockcapturing.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_ec_shockcapturing.jl"), l2=[0.0364192725149364, 0.0426667193422069, 0.04261673001449095, diff --git a/test/test_tree_1d.jl b/test/test_tree_1d.jl index 8b470278ff..4a25a51a45 100644 --- a/test/test_tree_1d.jl +++ b/test/test_tree_1d.jl @@ -42,8 +42,6 @@ isdir(outdir) && rm(outdir, recursive = true) # Shallow water include("test_tree_1d_shallowwater.jl") - # Two-layer Shallow Water - include("test_tree_1d_shallowwater_twolayer.jl") # FDSBP methods on the TreeMesh include("test_tree_1d_fdsbp.jl") diff --git a/test/test_tree_1d_shallowwater.jl b/test/test_tree_1d_shallowwater.jl index 2269e85892..f9be63b87f 100644 --- a/test/test_tree_1d_shallowwater.jl +++ b/test/test_tree_1d_shallowwater.jl @@ -1,7 +1,5 @@ module TestExamples1DShallowWater -# TODO: TrixiShallowWater: move any wet/dry tests to new package - using Test using Trixi @@ -119,32 +117,6 @@ end end end -@trixi_testset "elixir_shallowwater_well_balanced_wet_dry.jl with FluxHydrostaticReconstruction" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_well_balanced_wet_dry.jl"), - l2=[ - 0.00965787167169024, - 5.345454081916856e-14, - 0.03857583749209928, - ], - linf=[ - 0.4999999999998892, - 2.2447689894899726e-13, - 1.9999999999999714, - ], - tspan=(0.0, 0.25), - # Soften the tolerance as test results vary between different CPUs - atol=1000 * eps()) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - @trixi_testset "elixir_shallowwater_source_terms.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), l2=[ @@ -339,53 +311,6 @@ end end end -@trixi_testset "elixir_shallowwater_beach.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_beach.jl"), - l2=[ - 0.17979210479598923, - 1.2377495706611434, - 6.289818963361573e-8, - ], - linf=[ - 0.845938394800688, - 3.3740800777086575, - 4.4541473087633676e-7, - ], - tspan=(0.0, 0.05), - atol=3e-10) # see https://github.com/trixi-framework/Trixi.jl/issues/1617 - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - -@trixi_testset "elixir_shallowwater_parabolic_bowl.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_parabolic_bowl.jl"), - l2=[ - 8.965981683033589e-5, - 1.8565707397810857e-5, - 4.1043039226164336e-17, - ], - linf=[ - 0.00041080213807871235, - 0.00014823261488938177, - 2.220446049250313e-16, - ], - tspan=(0.0, 0.05)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - @trixi_testset "elixir_shallow_water_quasi_1d_source_terms.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallow_water_quasi_1d_source_terms.jl"), diff --git a/test/test_tree_1d_shallowwater_twolayer.jl b/test/test_tree_1d_shallowwater_twolayer.jl deleted file mode 100644 index 180fb3ec3b..0000000000 --- a/test/test_tree_1d_shallowwater_twolayer.jl +++ /dev/null @@ -1,74 +0,0 @@ -module TestExamples1DShallowWaterTwoLayer - -# TODO: TrixiShallowWater: move two layer tests to new package - -using Test -using Trixi - -include("test_trixi.jl") - -EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") - -@testset "Shallow Water Two layer" begin - @trixi_testset "elixir_shallowwater_twolayer_convergence.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_convergence.jl"), - l2=[0.005012009872109003, 0.002091035326731071, - 0.005049271397924551, - 0.0024633066562966574, 0.0004744186597732739], - linf=[0.0213772149343594, 0.005385752427290447, - 0.02175023787351349, - 0.008212004668840978, 0.0008992474511784199], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end - end - - @trixi_testset "elixir_shallowwater_twolayer_well_balanced.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_well_balanced.jl"), - l2=[8.949288784402005e-16, 4.0636427176237915e-17, - 0.001002881985401548, - 2.133351105037203e-16, 0.0010028819854016578], - linf=[2.6229018956769323e-15, 1.878451903240623e-16, - 0.005119880996670156, - 8.003199803957679e-16, 0.005119880996670666], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end - end - - @trixi_testset "elixir_shallowwater_twolayer_dam_break.jl with flux_lax_friedrichs" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_dam_break.jl"), - l2=[0.1000774903431289, 0.5670692949571057, 0.08764242501014498, - 0.45412307886094555, 0.013638618139749523], - linf=[0.586718937495144, 2.1215606128311584, 0.5185911311186155, - 1.820382495072612, 0.5], - surface_flux=(flux_lax_friedrichs, - flux_nonconservative_ersing_etal), - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end - end -end - -end # module diff --git a/test/test_tree_2d_part3.jl b/test/test_tree_2d_part3.jl index ce9b3bc04f..0eff564132 100644 --- a/test/test_tree_2d_part3.jl +++ b/test/test_tree_2d_part3.jl @@ -26,9 +26,6 @@ isdir(outdir) && rm(outdir, recursive = true) # Shallow water include("test_tree_2d_shallowwater.jl") - # Two-Layer Shallow Water - include("test_tree_2d_shallowwater_twolayer.jl") - # FDSBP methods on the TreeMesh include("test_tree_2d_fdsbp.jl") end diff --git a/test/test_tree_2d_shallowwater.jl b/test/test_tree_2d_shallowwater.jl index 1f3dfbf526..93a8cb6366 100644 --- a/test/test_tree_2d_shallowwater.jl +++ b/test/test_tree_2d_shallowwater.jl @@ -1,7 +1,5 @@ module TestExamples2DShallowWater -# TODO: TrixiShallowWater: move any wet/dry tests to new package - using Test using Trixi @@ -145,32 +143,6 @@ end end end -@trixi_testset "elixir_shallowwater_well_balanced_wet_dry.jl with FluxHydrostaticReconstruction" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_well_balanced_wet_dry.jl"), - l2=[ - 0.030186039395610056, - 2.513287752536758e-14, - 1.3631397744897607e-16, - 0.10911781485920438, - ], - linf=[ - 0.49999999999993505, - 5.5278950497971455e-14, - 7.462550826772548e-16, - 2.0, - ], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - @trixi_testset "elixir_shallowwater_source_terms.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), l2=[ @@ -277,57 +249,6 @@ end end end -@trixi_testset "elixir_shallowwater_conical_island.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_conical_island.jl"), - l2=[ - 0.0459315416430658, - 0.1644534881916991, - 0.16445348819169914, - 0.0011537702354532694, - ], - linf=[ - 0.21100717610846464, - 0.9501592344310412, - 0.9501592344310417, - 0.021790250683516282, - ], - tspan=(0.0, 0.025)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - -@trixi_testset "elixir_shallowwater_parabolic_bowl.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_parabolic_bowl.jl"), - l2=[ - 0.00025345501281482687, - 4.4525120338817177e-5, - 0.00015991819160294247, - 7.750412064917294e-15, - ], - linf=[ - 0.004664246019836723, - 0.0004972780116736669, - 0.0028735707270457628, - 6.866729407306593e-14, - ], - tspan=(0.0, 0.025), - basis=LobattoLegendreBasis(3)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - @trixi_testset "elixir_shallowwater_wall.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_wall.jl"), l2=[ diff --git a/test/test_tree_2d_shallowwater_twolayer.jl b/test/test_tree_2d_shallowwater_twolayer.jl deleted file mode 100644 index 802bf4e021..0000000000 --- a/test/test_tree_2d_shallowwater_twolayer.jl +++ /dev/null @@ -1,88 +0,0 @@ -module TestExamples2DShallowWaterTwoLayer - -# TODO: TrixiShallowWater: move two layer tests to new package - -using Test -using Trixi - -include("test_trixi.jl") - -EXAMPLES_DIR = joinpath(examples_dir(), "tree_2d_dgsem") - -@testset "Two-Layer Shallow Water" begin - @trixi_testset "elixir_shallowwater_twolayer_convergence.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_convergence.jl"), - l2=[0.0004016779699408397, 0.005466339651545468, - 0.006148841330156112, - 0.0002882339012602492, 0.0030120142442780313, - 0.002680752838455618, - 8.873630921431545e-6], - linf=[0.002788654460984752, 0.01484602033450666, - 0.017572229756493973, - 0.0016010835493927011, 0.009369847995372549, - 0.008407961775489636, - 3.361991620143279e-5], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end - end - - @trixi_testset "elixir_shallowwater_twolayer_well_balanced.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_well_balanced.jl"), - l2=[3.2935164267930016e-16, 4.6800825611195103e-17, - 4.843057532147818e-17, - 0.0030769233188015013, 1.4809161150389857e-16, - 1.509071695038043e-16, - 0.0030769233188014935], - linf=[2.248201624865942e-15, 2.346382070278936e-16, - 2.208565017494899e-16, - 0.026474051138910493, 9.237568031609006e-16, - 7.520758026187046e-16, - 0.026474051138910267], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end - end - - @trixi_testset "elixir_shallowwater_twolayer_well_balanced with flux_lax_friedrichs.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_well_balanced.jl"), - l2=[2.0525741072929735e-16, 6.000589392730905e-17, - 6.102759428478984e-17, - 0.0030769233188014905, 1.8421386173122792e-16, - 1.8473184927121752e-16, - 0.0030769233188014935], - linf=[7.355227538141662e-16, 2.960836949170518e-16, - 4.2726562436938764e-16, - 0.02647405113891016, 1.038795478061861e-15, - 1.0401789378532516e-15, - 0.026474051138910267], - surface_flux=(flux_lax_friedrichs, - flux_nonconservative_ersing_etal), - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end - end -end - -end # module diff --git a/test/test_unit.jl b/test/test_unit.jl index c1379587cc..1907a28171 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -420,11 +420,6 @@ end (1.0, 1.0), 1.0) @test_nowarn show(stdout, limiter_idp) - # TODO: TrixiShallowWater: move unit test - indicator_hg_swe = IndicatorHennemannGassnerShallowWater(1.0, 0.0, true, "variable", - "cache") - @test_nowarn show(stdout, indicator_hg_swe) - indicator_loehner = IndicatorLöhner(1.0, "variable", (; cache = nothing)) @test_nowarn show(stdout, indicator_loehner) @@ -543,7 +538,7 @@ end end @timed_testset "Shallow water conversion between conservative/entropy variables" begin - H, v1, v2, b = 3.5, 0.25, 0.1, 0.4 + H, v1, v2, b, a = 3.5, 0.25, 0.1, 0.4, 0.3 let equations = ShallowWaterEquations1D(gravity_constant = 9.8) cons_vars = prim2cons(SVector(H, v1, b), equations) @@ -572,6 +567,14 @@ end entropy_vars = cons2entropy(cons_vars, equations) @test cons_vars ≈ entropy2cons(entropy_vars, equations) end + + let equations = ShallowWaterEquationsQuasi1D(gravity_constant = 9.8) + cons_vars = prim2cons(SVector(H, v1, b, a), equations) + entropy_vars = cons2entropy(cons_vars, equations) + + total_energy = energy_total(cons_vars, equations) + @test entropy(cons_vars, equations) ≈ a * total_energy + end end @timed_testset "boundary_condition_do_nothing" begin @@ -697,6 +700,14 @@ end u = SVector(1, 0.5, 0.0) @test flux_hll(u, u, 1, equations) ≈ flux(u, 1, equations) + u_ll = SVector(0.1, 1.0, 0.0) + u_rr = SVector(0.1, 1.0, 0.0) + @test flux_hll(u_ll, u_rr, 1, equations) ≈ flux(u_ll, 1, equations) + + u_ll = SVector(0.1, -1.0, 0.0) + u_rr = SVector(0.1, -1.0, 0.0) + @test flux_hll(u_ll, u_rr, 1, equations) ≈ flux(u_rr, 1, equations) + equations = ShallowWaterEquations2D(gravity_constant = 9.81) normal_directions = [SVector(1.0, 0.0), SVector(0.0, 1.0), @@ -707,6 +718,17 @@ end @test flux_hll(u, u, normal_direction, equations) ≈ flux(u, normal_direction, equations) end + + normal_direction = SVector(1.0, 0.0, 0.0) + u_ll = SVector(0.1, 1.0, 1.0, 0.0) + u_rr = SVector(0.1, 1.0, 1.0, 0.0) + @test flux_hll(u_ll, u_rr, normal_direction, equations) ≈ + flux(u_ll, normal_direction, equations) + + u_ll = SVector(0.1, -1.0, -1.0, 0.0) + u_rr = SVector(0.1, -1.0, -1.0, 0.0) + @test flux_hll(u_ll, u_rr, normal_direction, equations) ≈ + flux(u_rr, normal_direction, equations) end @timed_testset "Consistency check for HLL flux (naive): MHD" begin diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index 83b8318c92..04eb9f679a 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -1,7 +1,5 @@ module TestExamplesUnstructuredMesh2D -# TODO: TrixiShallowWater: move any wet/dry and two layer tests - using Test using Trixi @@ -566,105 +564,6 @@ end end end -@trixi_testset "elixir_shallowwater_three_mound_dam_break.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_three_mound_dam_break.jl"), - l2=[ - 0.0892957892027502, - 0.30648836484407915, - 2.28712547616214e-15, - 0.0008778654298684622, - ], - linf=[ - 0.850329472915091, - 2.330631694956507, - 5.783660020252348e-14, - 0.04326237921249021, - ], - basis=LobattoLegendreBasis(3), - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - -@trixi_testset "elixir_shallowwater_twolayer_convergence.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_convergence.jl"), - l2=[0.0007935561625451243, 0.008825315509943844, - 0.002429969315645897, - 0.0007580145888686304, 0.004495741879625235, - 0.0015758146898767814, - 6.849532064729749e-6], - linf=[0.0059205195991136605, 0.08072126590166251, - 0.03463806075399023, - 0.005884818649227186, 0.042658506561995546, - 0.014125956138838602, 2.5829318284764646e-5], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - -@trixi_testset "elixir_shallowwater_twolayer_well_balanced.jl" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_well_balanced.jl"), - l2=[4.706532184998499e-16, 1.1215950712872183e-15, - 6.7822712922421565e-16, - 0.002192812926266047, 5.506855295923691e-15, - 3.3105180099689275e-15, - 0.0021928129262660085], - linf=[4.468647674116255e-15, 1.3607872120431166e-14, - 9.557155049520056e-15, - 0.024280130945632084, 6.68910907640583e-14, - 4.7000983997100496e-14, - 0.024280130945632732], - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - -@trixi_testset "elixir_shallowwater_twolayer_dam_break.jl with flux_lax_friedrichs" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, - "elixir_shallowwater_twolayer_dam_break.jl"), - l2=[0.012447632879122346, 0.012361250464676683, - 0.0009551519536340908, - 0.09119400061322577, 0.015276216721920347, - 0.0012126995108983853, 0.09991983966647647], - linf=[0.044305765721807444, 0.03279620980615845, - 0.010754320388190101, - 0.111309922939555, 0.03663360204931427, - 0.014332822306649284, - 0.10000000000000003], - surface_flux=(flux_lax_friedrichs, - flux_nonconservative_ersing_etal), - tspan=(0.0, 0.25)) - # Ensure that we do not have excessive memory allocations - # (e.g., from type instabilities) - let - t = sol.t[end] - u_ode = sol.u[end] - du_ode = similar(u_ode) - @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 - end -end - # TODO: FD; for now put the unstructured tests for the 2D FDSBP here. @trixi_testset "FDSBP (central): elixir_advection_basic.jl" begin @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), From 5418274987eec6bade5432ead7c91f1d4a842610 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Fri, 23 Feb 2024 05:31:01 +0100 Subject: [PATCH 25/58] Make `min_max_speed_davis` default wave speed estimate for `FluxHLL()` (#1743) * make min_max_speed_davis default wave speed * fmt * exchange hll * exchange some min_max_speed_naive for min_max_speed_davis for some examples * fmt * add news entry * news * debug * revert unintended elixir changes * news entry * correct test vals * Update test vals * Exchange some tests * update test vals for threaded * test vals for fdsbp unstructured * fmt * tests for coverage --------- Co-authored-by: Hendrik Ranocha --- NEWS.md | 5 ++ .../elixir_eulergravity_convergence.jl | 2 +- ..._euler_source_terms_nonconforming_earth.jl | 2 +- .../elixir_euler_convergence.jl | 2 +- .../elixir_eulergravity_convergence.jl | 2 +- .../elixir_eulergravity_jeans_instability.jl | 2 +- .../elixir_eulergravity_sedov_blast_wave.jl | 2 +- .../elixir_eulergravity_convergence.jl | 2 +- ..._shallowwater_well_balanced_nonperiodic.jl | 4 +- .../elixir_eulergravity_convergence.jl | 2 +- .../elixir_shallowwater_dirichlet.jl | 4 +- src/equations/numerical_fluxes.jl | 16 +++- test/test_dgmulti_1d.jl | 10 +-- test/test_dgmulti_2d.jl | 82 +++++++----------- test/test_dgmulti_3d.jl | 71 ++++++--------- test/test_parabolic_3d.jl | 1 + test/test_special_elixirs.jl | 3 +- test/test_structured_2d.jl | 57 ++++++------ test/test_threaded.jl | 24 +++--- test/test_tree_1d_euler.jl | 8 +- test/test_tree_1d_eulergravity.jl | 10 +-- test/test_tree_1d_shallowwater.jl | 19 ++-- test/test_tree_2d_mhd.jl | 2 +- test/test_tree_2d_shallowwater.jl | 30 ++++++- test/test_tree_3d_euler.jl | 17 ++-- test/test_tree_3d_mhd.jl | 4 +- test/test_unstructured_2d.jl | 86 ++++++++----------- 27 files changed, 230 insertions(+), 239 deletions(-) diff --git a/NEWS.md b/NEWS.md index ecc91581e9..d70504d8c8 100644 --- a/NEWS.md +++ b/NEWS.md @@ -10,6 +10,9 @@ for human readability. #### Changed +- The default wave speed estimate used within `flux_hll` is now `min_max_speed_davis` + instead of `min_max_speed_naive`. + #### Deprecated #### Removed @@ -17,6 +20,7 @@ for human readability. Trixi.jl, but are moved to a dedicated repository: [TrixiShallowWater.jl](https://github.com/trixi-framework/TrixiShallowWater.jl). This includes all features related to wetting and drying, as well as the `ShallowWaterTwoLayerEquations1D` and `ShallowWaterTwoLayerEquations2D`. However, the basic shallow water equations are still part of Trixi.jl. We'll also be updating the TrixiShallowWater.jl documentation with instructions on how to use these relocated features in the future. + ## Changes in the v0.6 lifecycle #### Added @@ -27,6 +31,7 @@ for human readability. - Subcell (positivity) limiting support for nonlinear variables in 2D for `TreeMesh` - Added Lighthill-Whitham-Richards (LWR) traffic model + ## Changes when updating to v0.6 from v0.5.x #### Added diff --git a/examples/p4est_2d_dgsem/elixir_eulergravity_convergence.jl b/examples/p4est_2d_dgsem/elixir_eulergravity_convergence.jl index d55a59ca5c..974466e3b3 100644 --- a/examples/p4est_2d_dgsem/elixir_eulergravity_convergence.jl +++ b/examples/p4est_2d_dgsem/elixir_eulergravity_convergence.jl @@ -10,7 +10,7 @@ gamma = 2.0 equations_euler = CompressibleEulerEquations2D(gamma) polydeg = 3 -solver_euler = DGSEM(polydeg, flux_hll) +solver_euler = DGSEM(polydeg, FluxHLL(min_max_speed_naive)) coordinates_min = (0.0, 0.0) coordinates_max = (2.0, 2.0) diff --git a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_earth.jl b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_earth.jl index 28a300cd68..28cdec12da 100644 --- a/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_earth.jl +++ b/examples/p4est_3d_dgsem/elixir_euler_source_terms_nonconforming_earth.jl @@ -68,7 +68,7 @@ boundary_condition = BoundaryConditionDirichlet(initial_condition) boundary_conditions = Dict(:inside => boundary_condition, :outside => boundary_condition) -surface_flux = flux_hll +surface_flux = FluxHLL(min_max_speed_naive) # Note that a free stream is not preserved if N < 2 * N_geo, where N is the # polydeg of the solver and N_geo is the polydeg of the mesh. # However, the FSP error is negligible in this example. diff --git a/examples/paper_self_gravitating_gas_dynamics/elixir_euler_convergence.jl b/examples/paper_self_gravitating_gas_dynamics/elixir_euler_convergence.jl index aabfce0f66..4f44d7b12a 100644 --- a/examples/paper_self_gravitating_gas_dynamics/elixir_euler_convergence.jl +++ b/examples/paper_self_gravitating_gas_dynamics/elixir_euler_convergence.jl @@ -8,7 +8,7 @@ equations = CompressibleEulerEquations2D(2.0) initial_condition = initial_condition_eoc_test_coupled_euler_gravity -solver = DGSEM(polydeg = 3, surface_flux = flux_hll) +solver = DGSEM(polydeg = 3, surface_flux = FluxHLL(min_max_speed_naive)) coordinates_min = (0.0, 0.0) coordinates_max = (2.0, 2.0) diff --git a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_convergence.jl b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_convergence.jl index ce1d2cd05b..49b9880357 100644 --- a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_convergence.jl +++ b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_convergence.jl @@ -10,7 +10,7 @@ gamma = 2.0 equations_euler = CompressibleEulerEquations2D(gamma) polydeg = 3 -solver_euler = DGSEM(polydeg, flux_hll) +solver_euler = DGSEM(polydeg, FluxHLL(min_max_speed_naive)) coordinates_min = (0.0, 0.0) coordinates_max = (2.0, 2.0) diff --git a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_jeans_instability.jl b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_jeans_instability.jl index f081f6bb91..7461198fbb 100644 --- a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_jeans_instability.jl +++ b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_jeans_instability.jl @@ -66,7 +66,7 @@ gamma = 5 / 3 equations_euler = CompressibleEulerEquations2D(gamma) polydeg = 3 -solver_euler = DGSEM(polydeg, flux_hll) +solver_euler = DGSEM(polydeg, FluxHLL(min_max_speed_naive)) coordinates_min = (0.0, 0.0) coordinates_max = (1.0, 1.0) diff --git a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_sedov_blast_wave.jl b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_sedov_blast_wave.jl index b7be232022..bc7ceb97c8 100644 --- a/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_sedov_blast_wave.jl +++ b/examples/paper_self_gravitating_gas_dynamics/elixir_eulergravity_sedov_blast_wave.jl @@ -85,7 +85,7 @@ function boundary_condition_sedov_self_gravity(u_inner, orientation, direction, end boundary_conditions = boundary_condition_sedov_self_gravity -surface_flux = flux_hll +surface_flux = FluxHLL(min_max_speed_naive) volume_flux = flux_chandrashekar polydeg = 3 basis = LobattoLegendreBasis(polydeg) diff --git a/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl b/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl index 98a9a5521a..cd10315945 100644 --- a/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl +++ b/examples/t8code_2d_dgsem/elixir_eulergravity_convergence.jl @@ -9,7 +9,7 @@ gamma = 2.0 equations_euler = CompressibleEulerEquations2D(gamma) polydeg = 3 -solver_euler = DGSEM(polydeg, flux_hll) +solver_euler = DGSEM(polydeg, FluxHLL(min_max_speed_naive)) coordinates_min = (0.0, 0.0) coordinates_max = (2.0, 2.0) diff --git a/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_nonperiodic.jl b/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_nonperiodic.jl index e55fffc101..9ed02c0e37 100644 --- a/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_nonperiodic.jl +++ b/examples/tree_1d_dgsem/elixir_shallowwater_well_balanced_nonperiodic.jl @@ -26,7 +26,9 @@ boundary_condition = BoundaryConditionDirichlet(initial_condition) # Get the DG approximation space volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -solver = DGSEM(polydeg = 4, surface_flux = (flux_hll, flux_nonconservative_fjordholm_etal), +solver = DGSEM(polydeg = 4, + surface_flux = (flux_hll, + flux_nonconservative_fjordholm_etal), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) ############################################################################### diff --git a/examples/tree_3d_dgsem/elixir_eulergravity_convergence.jl b/examples/tree_3d_dgsem/elixir_eulergravity_convergence.jl index 21ef661d0b..0a8c427bf8 100644 --- a/examples/tree_3d_dgsem/elixir_eulergravity_convergence.jl +++ b/examples/tree_3d_dgsem/elixir_eulergravity_convergence.jl @@ -10,7 +10,7 @@ equations_euler = CompressibleEulerEquations3D(gamma) initial_condition = initial_condition_eoc_test_coupled_euler_gravity polydeg = 3 -solver_euler = DGSEM(polydeg, flux_hll) +solver_euler = DGSEM(polydeg, FluxHLL(min_max_speed_naive)) coordinates_min = (0.0, 0.0, 0.0) coordinates_max = (2.0, 2.0, 2.0) diff --git a/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl b/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl index df1a69192c..38e1279e22 100644 --- a/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl +++ b/examples/unstructured_2d_dgsem/elixir_shallowwater_dirichlet.jl @@ -30,7 +30,9 @@ boundary_condition = Dict(:OuterCircle => boundary_condition_constant) # Get the DG approximation space volume_flux = (flux_wintermeyer_etal, flux_nonconservative_wintermeyer_etal) -solver = DGSEM(polydeg = 4, surface_flux = (flux_hll, flux_nonconservative_fjordholm_etal), +solver = DGSEM(polydeg = 4, + surface_flux = (flux_hll, + flux_nonconservative_fjordholm_etal), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) ############################################################################### diff --git a/src/equations/numerical_fluxes.jl b/src/equations/numerical_fluxes.jl index 87fcb41224..6794c71a32 100644 --- a/src/equations/numerical_fluxes.jl +++ b/src/equations/numerical_fluxes.jl @@ -222,12 +222,12 @@ See [`FluxLaxFriedrichs`](@ref). const flux_lax_friedrichs = FluxLaxFriedrichs() """ - FluxHLL(min_max_speed=min_max_speed_naive) + FluxHLL(min_max_speed=min_max_speed_davis) Create an HLL (Harten, Lax, van Leer) numerical flux where the minimum and maximum wave speeds are estimated as `λ_min, λ_max = min_max_speed(u_ll, u_rr, orientation_or_normal_direction, equations)`, -defaulting to [`min_max_speed_naive`](@ref). +defaulting to [`min_max_speed_davis`](@ref). Original paper: - Amiram Harten, Peter D. Lax, Bram van Leer (1983) On Upstream Differencing and Godunov-Type Schemes for Hyperbolic Conservation Laws @@ -237,7 +237,7 @@ struct FluxHLL{MinMaxSpeed} min_max_speed::MinMaxSpeed end -FluxHLL() = FluxHLL(min_max_speed_naive) +FluxHLL() = FluxHLL(min_max_speed_davis) """ min_max_speed_naive(u_ll, u_rr, orientation::Integer, equations) @@ -246,10 +246,16 @@ FluxHLL() = FluxHLL(min_max_speed_naive) Simple and fast estimate(!) of the minimal and maximal wave speed of the Riemann problem with left and right states `u_ll, u_rr`, usually based only on the local wave speeds associated to `u_ll` and `u_rr`. +Slightly more diffusive than [`min_max_speed_davis`](@ref). - Amiram Harten, Peter D. Lax, Bram van Leer (1983) On Upstream Differencing and Godunov-Type Schemes for Hyperbolic Conservation Laws [DOI: 10.1137/1025002](https://doi.org/10.1137/1025002) +See eq. (10.37) from +- Eleuterio F. Toro (2009) + Riemann Solvers and Numerical Methods for Fluid Dynamics: A Practical Introduction + [DOI: 10.1007/b79761](https://doi.org/10.1007/b79761) + See also [`FluxHLL`](@ref), [`min_max_speed_davis`](@ref), [`min_max_speed_einfeldt`](@ref). """ function min_max_speed_naive end @@ -266,6 +272,10 @@ left and right states `u_ll, u_rr`, usually based only on the local wave speeds Simplified Second-Order Godunov-Type Methods [DOI: 10.1137/0909030](https://doi.org/10.1137/0909030) +See eq. (10.38) from +- Eleuterio F. Toro (2009) + Riemann Solvers and Numerical Methods for Fluid Dynamics: A Practical Introduction + [DOI: 10.1007/b79761](https://doi.org/10.1007/b79761) See also [`FluxHLL`](@ref), [`min_max_speed_naive`](@ref), [`min_max_speed_einfeldt`](@ref). """ function min_max_speed_davis end diff --git a/test/test_dgmulti_1d.jl b/test/test_dgmulti_1d.jl index 0363086341..e470de71ef 100644 --- a/test/test_dgmulti_1d.jl +++ b/test/test_dgmulti_1d.jl @@ -128,14 +128,12 @@ end @trixi_testset "elixir_euler_fdsbp_periodic.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_fdsbp_periodic.jl"), l2=[ - 9.146929180585711e-7, - 1.8997616878017292e-6, - 3.991417702211889e-6, + 9.146929178341782e-7, 1.8997616876521201e-6, + 3.991417701005622e-6, ], linf=[ - 1.7321089884614338e-6, - 3.3252888855805907e-6, - 6.5252787737613005e-6, + 1.7321089882393892e-6, 3.3252888869128583e-6, + 6.525278767988141e-6, ]) show(stdout, semi.solver.basis) show(stdout, MIME"text/plain"(), semi.solver.basis) diff --git a/test/test_dgmulti_2d.jl b/test/test_dgmulti_2d.jl index 892c8ed37f..ab6b505e20 100644 --- a/test/test_dgmulti_2d.jl +++ b/test/test_dgmulti_2d.jl @@ -17,6 +17,7 @@ isdir(outdir) && rm(outdir, recursive = true) @trixi_testset "elixir_euler_weakform.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform.jl"), cells_per_dimension=(4, 4), + surface_integral=SurfaceIntegralWeakForm(FluxHLL(min_max_speed_naive)), # division by 2.0 corresponds to normalization by the square root of the size of the domain l2=[ 0.0013536930300254945, @@ -44,6 +45,7 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform.jl"), cells_per_dimension=(4, 4), approximation_type=SBP(), + surface_integral=SurfaceIntegralWeakForm(FluxHLL(min_max_speed_naive)), # division by 2.0 corresponds to normalization by the square root of the size of the domain l2=[ 0.0074706882014934735, @@ -71,6 +73,7 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform.jl"), cells_per_dimension=(4, 4), element_type=Quad(), + surface_integral=SurfaceIntegralWeakForm(FluxHLL(min_max_speed_naive)), # division by 2.0 corresponds to normalization by the square root of the size of the domain l2=[ 0.00031892254415307093, @@ -184,16 +187,12 @@ end @trixi_testset "elixir_euler_bilinear.jl (Bilinear quadrilateral elements, SBP, flux differencing)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_bilinear.jl"), l2=[ - 1.0259435706215337e-5, - 9.014090233720625e-6, - 9.014090233223014e-6, - 2.738953587401793e-5, + 1.0259432774540821e-5, 9.014087689495575e-6, + 9.01408768888544e-6, 2.738953324859446e-5, ], linf=[ - 7.362609083649829e-5, - 6.874188055272512e-5, - 6.874188052830021e-5, - 0.0001912435192696904, + 7.362605996297233e-5, 6.874189724781488e-5, + 6.874189703509614e-5, 0.00019124355334110277, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -208,16 +207,12 @@ end @trixi_testset "elixir_euler_curved.jl (Quadrilateral elements, SBP, flux differencing)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_curved.jl"), l2=[ - 1.720476068165337e-5, - 1.592168205710526e-5, - 1.592168205812963e-5, - 4.894094865697305e-5, + 1.7204593127904542e-5, 1.5921547179522804e-5, + 1.5921547180107928e-5, 4.894071422525737e-5, ], linf=[ - 0.00010525416930584619, - 0.00010003778091061122, - 0.00010003778085621029, - 0.00036426282101720275, + 0.00010525416937667842, 0.00010003778102718464, + 0.00010003778071832059, 0.0003642628211952825, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -232,6 +227,7 @@ end @trixi_testset "elixir_euler_curved.jl (Quadrilateral elements, GaussSBP, flux differencing)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_curved.jl"), approximation_type=GaussSBP(), + surface_integral=SurfaceIntegralWeakForm(FluxHLL(min_max_speed_naive)), l2=[ 3.4666312079259457e-6, 3.4392774480368986e-6, @@ -259,6 +255,7 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_curved.jl"), element_type=Tri(), approximation_type=Polynomial(), volume_integral=VolumeIntegralWeakForm(), + surface_integral=SurfaceIntegralWeakForm(FluxHLL(min_max_speed_naive)), l2=[ 7.905498158659466e-6, 8.731690809663625e-6, @@ -330,16 +327,12 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform_periodic.jl"), # division by 2.0 corresponds to normalization by the square root of the size of the domain l2=[ - 0.0014986508075708323, - 0.001528523420746786, - 0.0015285234207473158, - 0.004846505183839211, - ] ./ 2.0, + 0.0007492755162295128, 0.0007641875305302599, + 0.0007641875305306243, 0.0024232389721009447, + ], linf=[ - 0.0015062108658376872, - 0.0019373508504645365, - 0.0019373508504538783, - 0.004742686826709086, + 0.0015060064614331736, 0.0019371156800773726, + 0.0019371156800769285, 0.004742431684202408, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -354,16 +347,12 @@ end @trixi_testset "elixir_euler_triangulate_pkg_mesh.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_triangulate_pkg_mesh.jl"), l2=[ - 2.344080455438114e-6, - 1.8610038753097983e-6, - 2.4095165666095305e-6, - 6.373308158814308e-6, + 2.344076909832665e-6, 1.8610002398709756e-6, + 2.4095132179484066e-6, 6.37330249340445e-6, ], linf=[ - 2.5099852761334418e-5, - 2.2683684021362893e-5, - 2.6180448559287584e-5, - 5.5752932611508044e-5, + 2.509979394305084e-5, 2.2683711321080935e-5, + 2.6180377720841363e-5, 5.575278031910713e-5, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -435,16 +424,12 @@ end "elixir_euler_rayleigh_taylor_instability.jl"), cells_per_dimension=(8, 8), tspan=(0.0, 0.2), l2=[ - 0.0709665896982514, - 0.005182828752164663, - 0.013832655585206478, - 0.03247013800580221, + 0.07097806723891838, 0.005168550941966817, + 0.013820912272220933, 0.03243357220022434, ], linf=[ - 0.4783963902824797, - 0.022527207050681054, - 0.040307056293369226, - 0.0852365428206836, + 0.4783395896753895, 0.02244629340135818, + 0.04023357731088538, 0.08515807256615027, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -604,16 +589,12 @@ end @trixi_testset "elixir_euler_fdsbp_periodic.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_fdsbp_periodic.jl"), l2=[ - 1.3333320340010056e-6, - 2.044834627970641e-6, - 2.044834627855601e-6, - 5.282189803559564e-6, + 1.333332033888785e-6, 2.044834627786368e-6, + 2.0448346278315884e-6, 5.282189803437435e-6, ], linf=[ - 2.7000151718858945e-6, - 3.988595028259212e-6, - 3.9885950273710336e-6, - 8.848583042286862e-6, + 2.7000151703315822e-6, 3.988595025372632e-6, + 3.9885950240403645e-6, 8.848583036513702e-6, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -628,6 +609,7 @@ end @trixi_testset "elixir_euler_fdsbp_periodic.jl (arbitrary reference domain)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_fdsbp_periodic.jl"), xmin=-200.0, xmax=100.0, #= parameters for reference interval =# + surface_flux=FluxHLL(min_max_speed_naive), l2=[ 1.333332034149886e-6, 2.0448346280892024e-6, @@ -659,6 +641,7 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_fdsbp_periodic.jl"), approximation_type=D, coordinates_min=(-3.0, -4.0), coordinates_max=(0.0, -1.0), + surface_flux=FluxHLL(min_max_speed_naive), l2=[ 0.07318831033918516, 0.10039910610067465, @@ -691,6 +674,7 @@ end global D = SummationByPartsOperators.couple_continuously(D_local, mesh_local) @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_fdsbp_periodic.jl"), approximation_type=D, + surface_flux=FluxHLL(min_max_speed_naive), l2=[ 1.5440402410017893e-5, 1.4913189903083485e-5, diff --git a/test/test_dgmulti_3d.jl b/test/test_dgmulti_3d.jl index 3a1db25548..fa70b11447 100644 --- a/test/test_dgmulti_3d.jl +++ b/test/test_dgmulti_3d.jl @@ -17,20 +17,15 @@ isdir(outdir) && rm(outdir, recursive = true) # 3d tet/hex tests @trixi_testset "elixir_euler_weakform.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform.jl"), - # division by sqrt(8.0) corresponds to normalization by the square root of the size of the domain l2=[ - 0.0010029534292051608, - 0.0011682205957721673, - 0.001072975385793516, - 0.000997247778892257, - 0.0039364354651358294, - ] ./ sqrt(8), + 0.000354593110864001, 0.00041301573702385284, + 0.00037934556184883277, 0.0003525767114354012, + 0.0013917457634530887, + ], linf=[ - 0.003660737033303718, - 0.005625620600749226, - 0.0030566354814669516, - 0.0041580358824311325, - 0.019326660236036464, + 0.0036608123230692513, 0.005625540942772123, + 0.0030565781898950206, 0.004158099048202857, + 0.01932716837214299, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -74,6 +69,7 @@ end @trixi_testset "elixir_euler_weakform.jl (Hexahedral elements)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform.jl"), element_type=Hex(), + surface_integral=SurfaceIntegralWeakForm(FluxHLL(min_max_speed_naive)), # division by sqrt(8.0) corresponds to normalization by the square root of the size of the domain l2=[ 0.00030580190715769566, @@ -102,18 +98,13 @@ end @trixi_testset "elixir_euler_curved.jl (Hex elements, SBP, flux differencing)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_curved.jl"), l2=[ - 0.018354883045936066, - 0.024412704052042846, - 0.024408520416087945, - 0.01816314570880129, - 0.039342805507972006, + 0.01835488304593566, 0.024412704052042534, + 0.02440852041608929, 0.018163145708800853, + 0.03934280550797125, ], linf=[ - 0.14862225990775757, - 0.28952368161864683, - 0.2912054484817035, - 0.1456603133854122, - 0.3315354586775472, + 0.14862225990793032, 0.2895236816183626, 0.291205448481636, + 0.14566031338563246, 0.33153545867790246, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -129,18 +120,14 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_curved.jl"), approximation_type=GaussSBP(), l2=[ - 0.002631131519508634, - 0.0029144224044954105, - 0.002913889110662827, - 0.002615140832314194, - 0.006881528610614373, + 0.0026311315195097097, 0.002914422404496567, + 0.0029138891106640368, 0.002615140832315232, + 0.006881528610616624, ], linf=[ - 0.020996114874140215, - 0.021314522450134543, - 0.021288322783006297, - 0.020273381695435244, - 0.052598740390024545, + 0.02099611487415931, 0.021314522450152307, + 0.021288322783027613, 0.020273381695449455, + 0.05259874039006007, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -154,20 +141,15 @@ end @trixi_testset "elixir_euler_weakform_periodic.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform_periodic.jl"), - # division by sqrt(8.0) corresponds to normalization by the square root of the size of the domain l2=[ - 0.0010317074322517949, - 0.0012277090547035293, - 0.0011273991123913515, - 0.0010418496196130177, - 0.004058878478404962, - ] ./ sqrt(8), + 0.00036475807571383924, 0.00043404536371780537, + 0.0003985850214093045, 0.0003683451584072326, + 0.00143503620472638, + ], linf=[ - 0.003227752881827861, - 0.005620317864620361, - 0.0030514833972379307, - 0.003987027618439498, - 0.019282224709831652, + 0.0032278615418719347, 0.005620238272054934, + 0.0030514261010661237, 0.0039871165455998, + 0.019282771780667396, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -182,6 +164,7 @@ end @trixi_testset "elixir_euler_weakform_periodic.jl (Hexahedral elements)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_weakform_periodic.jl"), element_type=Hex(), + surface_integral=SurfaceIntegralWeakForm(FluxHLL(min_max_speed_naive)), # division by sqrt(8.0) corresponds to normalization by the square root of the size of the domain l2=[ 0.00034230612468547436, diff --git a/test/test_parabolic_3d.jl b/test/test_parabolic_3d.jl index 1eaa9f51a5..863daeeaf3 100644 --- a/test/test_parabolic_3d.jl +++ b/test/test_parabolic_3d.jl @@ -400,6 +400,7 @@ end @test_trixi_include(joinpath(examples_dir(), "p4est_3d_dgsem", "elixir_navierstokes_taylor_green_vortex.jl"), initial_refinement_level=2, tspan=(0.0, 0.25), + surface_flux=FluxHLL(min_max_speed_naive), l2=[ 0.0001547509861140407, 0.015637861347119624, diff --git a/test/test_special_elixirs.jl b/test/test_special_elixirs.jl index ba670a6025..277ade9bd5 100644 --- a/test/test_special_elixirs.jl +++ b/test/test_special_elixirs.jl @@ -286,7 +286,8 @@ end equations = CompressibleEulerEquations1D(1.4) mesh = TreeMesh((-1.0,), (1.0,), initial_refinement_level = 3, n_cells_max = 10^4) - solver = DGSEM(3, flux_hll, VolumeIntegralFluxDifferencing(flux_ranocha)) + solver = DGSEM(3, FluxHLL(min_max_speed_naive), + VolumeIntegralFluxDifferencing(flux_ranocha)) initial_condition = (x, t, equations) -> begin rho = 2 + sinpi(k * sum(x)) v1 = 0.1 diff --git a/test/test_structured_2d.jl b/test/test_structured_2d.jl index f5fb169033..64a1faf05b 100644 --- a/test/test_structured_2d.jl +++ b/test/test_structured_2d.jl @@ -606,16 +606,12 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_rayleigh_taylor_instability.jl"), l2=[ - 0.06365630381017849, - 0.007166887387738937, - 0.002878708825497772, - 0.010247678114070121, + 0.06365630515019809, 0.007166887172039836, + 0.0028787103533600804, 0.010247678008197966, ], linf=[ - 0.4799214336153155, - 0.024595483032220266, - 0.02059808120543466, - 0.03190756362943725, + 0.47992143569849377, 0.02459548251933757, + 0.02059810091623976, 0.0319077000843877, ], cells_per_dimension=(8, 8), tspan=(0.0, 0.3)) @@ -659,14 +655,12 @@ end @trixi_testset "elixir_eulerpolytropic_convergence.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_convergence.jl"), l2=[ - 0.0016688820596537988, - 0.0025921681885685425, - 0.003280950351435014, + 0.00166898321776379, 0.00259202637930991, + 0.0032810744946276406, ], linf=[ - 0.010994679664394269, - 0.01331197845637, - 0.020080117011346488, + 0.010994883201888683, 0.013309526619369905, + 0.020080326611175536, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -678,18 +672,19 @@ end end end -@trixi_testset "elixir_eulerpolytropic_convergence.jl: HLL(Davis)" begin - @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_convergence.jl"), +@trixi_testset "elixir_eulerpolytropic_convergence.jl with FluxHLL(min_max_speed_naive)" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, + "elixir_eulerpolytropic_convergence.jl"), solver=DGSEM(polydeg = 3, - surface_flux = FluxHLL(min_max_speed_davis), + surface_flux = FluxHLL(min_max_speed_naive), volume_integral = VolumeIntegralFluxDifferencing(volume_flux)), l2=[ - 0.0016689832177644243, 0.0025920263793104445, - 0.003281074494629298, + 0.001668882059653298, 0.002592168188567654, + 0.0032809503514328307, ], linf=[ - 0.01099488320190023, 0.013309526619350365, - 0.02008032661117909, + 0.01099467966437917, 0.013311978456333584, + 0.020080117011337606, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -727,14 +722,12 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_isothermal_wave.jl"), l2=[ - 0.004998778491726366, - 0.004998916000294425, - 9.259136963058664e-17, + 0.004998778512795407, 0.004998916021367992, + 8.991558055435833e-17, ], linf=[ - 0.010001103673834888, - 0.010051165098399503, - 7.623942913643681e-16, + 0.010001103632831354, 0.010051165055185603, + 7.60697457718599e-16, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -749,14 +742,12 @@ end @trixi_testset "elixir_eulerpolytropic_wave.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulerpolytropic_wave.jl"), l2=[ - 0.23642682112204072, - 0.20904264390331334, - 8.174982691297391e-17, + 0.23642871172548174, 0.2090519382039672, + 8.778842676292274e-17, ], linf=[ - 0.4848250368349989, - 0.253350873815695, - 4.984552457753618e-16, + 0.4852276879687425, 0.25327870807625175, + 5.533921691832115e-16, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_threaded.jl b/test/test_threaded.jl index a8a1b1b425..7365dcef21 100644 --- a/test/test_threaded.jl +++ b/test/test_threaded.jl @@ -394,10 +394,10 @@ end "elixir_euler_curved.jl"), alg=RDPK3SpFSAL49(thread = OrdinaryDiffEq.True()), l2=[ - 1.720476068165337e-5, - 1.592168205710526e-5, - 1.592168205812963e-5, - 4.894094865697305e-5, + 1.7204593127904542e-5, + 1.5921547179522804e-5, + 1.5921547180107928e-5, + 4.894071422525737e-5, ], linf=[ 0.00010525416930584619, @@ -420,16 +420,16 @@ end @test_trixi_include(joinpath(examples_dir(), "dgmulti_2d", "elixir_euler_triangulate_pkg_mesh.jl"), l2=[ - 2.344080455438114e-6, - 1.8610038753097983e-6, - 2.4095165666095305e-6, - 6.373308158814308e-6, + 2.344076909832665e-6, + 1.8610002398709756e-6, + 2.4095132179484066e-6, + 6.37330249340445e-6, ], linf=[ - 2.5099852761334418e-5, - 2.2683684021362893e-5, - 2.6180448559287584e-5, - 5.5752932611508044e-5, + 2.509979394305084e-5, + 2.2683711321080935e-5, + 2.6180377720841363e-5, + 5.575278031910713e-5, ]) # Ensure that we do not have excessive memory allocations diff --git a/test/test_tree_1d_euler.jl b/test/test_tree_1d_euler.jl index 39a1f6e30b..f26500b411 100644 --- a/test/test_tree_1d_euler.jl +++ b/test/test_tree_1d_euler.jl @@ -221,11 +221,11 @@ end @trixi_testset "elixir_euler_ec.jl with flux_hll" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_ec.jl"), - l2=[0.07852272782240548, 0.10209790867523805, 0.293873048809011], + l2=[0.07855251823583848, 0.10213903748267686, 0.293985892532479], linf=[ - 0.19244768908604093, - 0.2515941686151897, - 0.7258000837553769, + 0.192621556068018, + 0.25184744005299536, + 0.7264977555504792, ], maxiters=10, surface_flux=flux_hll, diff --git a/test/test_tree_1d_eulergravity.jl b/test/test_tree_1d_eulergravity.jl index 9ab5b287d0..17bc0c71a7 100644 --- a/test/test_tree_1d_eulergravity.jl +++ b/test/test_tree_1d_eulergravity.jl @@ -13,14 +13,12 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") @trixi_testset "elixir_eulergravity_convergence.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_eulergravity_convergence.jl"), l2=[ - 0.0002170799126638106, - 0.0002913792848717502, - 0.0006112320856262327, + 0.00021708496949694728, 0.0002913795242132917, + 0.0006112500956552259, ], linf=[ - 0.0004977401033188222, - 0.0013594223337776157, - 0.002041891084400227, + 0.0004977733237385706, 0.0013594226727522418, + 0.0020418739554664, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_tree_1d_shallowwater.jl b/test/test_tree_1d_shallowwater.jl index f9be63b87f..41ad5c32bb 100644 --- a/test/test_tree_1d_shallowwater.jl +++ b/test/test_tree_1d_shallowwater.jl @@ -143,17 +143,18 @@ end @trixi_testset "elixir_shallowwater_source_terms.jl with flux_hll" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), l2=[ - 0.0022758146627220154, - 0.015864082886204556, + 0.002275023323848826, + 0.015861093821754046, 4.436491725585346e-5, ], linf=[ - 0.008457195427364006, - 0.057201667446161064, + 0.008461451098266792, + 0.05722331401673486, 9.098379777405796e-5, ], tspan=(0.0, 0.025), - surface_flux=(flux_hll, flux_nonconservative_fjordholm_etal)) + surface_flux=(flux_hll, + flux_nonconservative_fjordholm_etal)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -228,7 +229,7 @@ end 0.05720939349382359, 9.098379777405796e-5, ], - surface_flux=(FluxHydrostaticReconstruction(flux_hll, + surface_flux=(FluxHydrostaticReconstruction(FluxHLL(min_max_speed_naive), hydrostatic_reconstruction_audusse_etal), flux_nonconservative_audusse_etal), tspan=(0.0, 0.025)) @@ -255,7 +256,9 @@ end 3.469453422316143e-15, 3.844551077492042e-8, ], - tspan=(0.0, 0.25)) + tspan=(0.0, 0.25), + surface_flux=(FluxHLL(min_max_speed_naive), + flux_nonconservative_fjordholm_etal),) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -280,6 +283,8 @@ end 3.844551077492042e-8, ], tspan=(0.0, 0.25), + surface_flux=(FluxHLL(min_max_speed_naive), + flux_nonconservative_fjordholm_etal), boundary_condition=boundary_condition_slip_wall) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) diff --git a/test/test_tree_2d_mhd.jl b/test/test_tree_2d_mhd.jl index 1f8458075a..66b47138a4 100644 --- a/test/test_tree_2d_mhd.jl +++ b/test/test_tree_2d_mhd.jl @@ -183,7 +183,7 @@ end end end -@trixi_testset "elixir_mhd_orszag_tang.jl with flux_hll" begin +@trixi_testset "elixir_mhd_orszag_tang.jl with flux_hlle" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_orszag_tang.jl"), l2=[ 0.10806619664693064, diff --git a/test/test_tree_2d_shallowwater.jl b/test/test_tree_2d_shallowwater.jl index 93a8cb6366..0174264473 100644 --- a/test/test_tree_2d_shallowwater.jl +++ b/test/test_tree_2d_shallowwater.jl @@ -195,6 +195,33 @@ end end @trixi_testset "elixir_shallowwater_source_terms.jl with flux_hll" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), + l2=[ + 0.0018952610547425214, + 0.016943425162728183, + 0.017556784292859465, + 6.274146767717414e-5, + ], + linf=[ + 0.0151635341334182, + 0.07967467926956129, + 0.08400050790965174, + 0.0001819675955490041, + ], + tspan=(0.0, 0.025), + surface_flux=(flux_hll, + flux_nonconservative_fjordholm_etal)) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "elixir_shallowwater_source_terms.jl with FluxHLL(min_max_speed_naive)" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), l2=[ 0.0018957692481057034, @@ -209,7 +236,8 @@ end 0.0001819675955490041, ], tspan=(0.0, 0.025), - surface_flux=(flux_hll, flux_nonconservative_fjordholm_etal)) + surface_flux=(FluxHLL(min_max_speed_naive), + flux_nonconservative_fjordholm_etal)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let diff --git a/test/test_tree_3d_euler.jl b/test/test_tree_3d_euler.jl index 02e657e001..e9e2b82fec 100644 --- a/test/test_tree_3d_euler.jl +++ b/test/test_tree_3d_euler.jl @@ -92,18 +92,14 @@ end @trixi_testset "elixir_euler_convergence.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_convergence.jl"), l2=[ - 0.0003637241020254405, - 0.0003955570866382718, - 0.0003955570866383613, - 0.00039555708663834417, - 0.0007811613481640202, + 0.0003637241020254673, 0.00039555708663848046, + 0.00039555708663832644, 0.0003955570866385083, + 0.0007811613481643962, ], linf=[ - 0.0024000660244674066, - 0.0029635410025339315, - 0.0029635410025292686, - 0.002963541002525938, - 0.007191437359396424, + 0.0024000660244567484, 0.002963541002521053, + 0.0029635410025201647, 0.002963541002522385, + 0.007191437359379549, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -462,6 +458,7 @@ end 2.397746252817731, ], maxiters=5, max_level=6, + surface_flux=FluxHLL(min_max_speed_naive), coverage_override=(maxiters = 2, initial_refinement_level = 1, base_level = 1, max_level = 3)) # Ensure that we do not have excessive memory allocations diff --git a/test/test_tree_3d_mhd.jl b/test/test_tree_3d_mhd.jl index e75685f0b4..74107d462d 100644 --- a/test/test_tree_3d_mhd.jl +++ b/test/test_tree_3d_mhd.jl @@ -184,9 +184,9 @@ end end end -@trixi_testset "elixir_mhd_alfven_wave.jl with Orszag-Tang setup + flux_hll" begin +@trixi_testset "elixir_mhd_alfven_wave.jl with Orszag-Tang setup + flux_hlle" begin # OBS! This setup does not make much sense and is only used to exercise all components of the - # flux_hll implementation + # flux_hlle implementation @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_mhd_alfven_wave.jl"), l2=[ 0.004391143689111404, diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index 04eb9f679a..87d677e162 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -17,16 +17,12 @@ isdir(outdir) && rm(outdir, recursive = true) @trixi_testset "elixir_euler_periodic.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_periodic.jl"), l2=[ - 0.00010978828464875207, - 0.00013010359527356914, - 0.00013010359527326057, - 0.0002987656724828824, + 0.0001099216141882387, 0.0001303795774982892, + 0.00013037957749794242, 0.0002993727892598759, ], linf=[ - 0.00638626102818618, - 0.009804042508242183, - 0.009804042508253286, - 0.02183139311614468, + 0.006407280810928562, 0.009836067015418948, + 0.009836067015398076, 0.021903519038095176, ]) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -41,16 +37,12 @@ end @trixi_testset "elixir_euler_free_stream.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_free_stream.jl"), l2=[ - 3.3937971107485363e-14, - 2.447586447887882e-13, - 1.4585205789296455e-13, - 4.716993468962946e-13, + 3.3937365073416665e-14, 2.44759188939065e-13, + 1.4585198700082895e-13, 4.716940764877479e-13, ], linf=[ - 8.804734719092266e-12, - 6.261270668606045e-11, - 2.93670088247211e-11, - 1.205400224080222e-10, + 8.804956763697191e-12, 6.261199891888225e-11, + 2.936639820205755e-11, 1.20543575121701e-10, ], tspan=(0.0, 0.1), atol=3.0e-13) @@ -78,7 +70,8 @@ end 0.29339040847600434, 0.5915610037764794, ], - tspan=(0.0, 0.25)) + tspan=(0.0, 0.25), + surface_flux=FluxHLL(min_max_speed_naive)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -407,15 +400,15 @@ end @trixi_testset "elixir_shallowwater_source_terms.jl with FluxHydrostaticReconstruction" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), l2=[ - 0.0011197139793938152, - 0.015430259691310781, - 0.017081031802719724, + 0.001119678684752799, + 0.015429108794630785, + 0.01708275441241111, 5.089218476758271e-6, ], linf=[ - 0.014300809338967824, - 0.12783372461225184, - 0.17625472321992852, + 0.014299564388827513, + 0.12785126473870534, + 0.17626788561725526, 2.6407324614341476e-5, ], surface_flux=(FluxHydrostaticReconstruction(flux_hll, @@ -464,18 +457,19 @@ end @trixi_testset "elixir_shallowwater_source_terms.jl with flux_hll" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_source_terms.jl"), l2=[ - 0.0011197139793938727, - 0.015430259691311309, - 0.017081031802719554, + 0.0011196786847528799, + 0.015429108794631075, + 0.017082754412411742, 5.089218476759981e-6, ], linf=[ - 0.014300809338967824, - 0.12783372461224918, - 0.17625472321993918, + 0.014299564388830177, + 0.12785126473870667, + 0.17626788561728546, 2.6407324614341476e-5, ], - surface_flux=(flux_hll, flux_nonconservative_fjordholm_etal), + surface_flux=(flux_hll, + flux_nonconservative_fjordholm_etal), tspan=(0.0, 0.025)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) @@ -490,16 +484,12 @@ end @trixi_testset "elixir_shallowwater_dirichlet.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_dirichlet.jl"), l2=[ - 1.1577518608940115e-5, - 4.867189932537344e-13, - 4.647273240470541e-13, - 1.1577518608933468e-5, + 1.1577518608938916e-5, 4.859252379740366e-13, + 4.639600837197925e-13, 1.1577518608952174e-5, ], linf=[ - 8.394063878602864e-5, - 1.1469760027632646e-10, - 1.1146619484429974e-10, - 8.394063879602065e-5, + 8.3940638787805e-5, 1.1446362498574484e-10, + 1.1124515748367981e-10, 8.39406387962427e-5, ], tspan=(0.0, 2.0)) # Ensure that we do not have excessive memory allocations @@ -516,16 +506,12 @@ end @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_shallowwater_wall_bc_shockcapturing.jl"), l2=[ - 0.04444388691670699, - 0.1527771788033111, - 0.1593763537203512, - 6.225080476986749e-8, + 0.0442113635677511, 0.1537465759364839, 0.16003586586203947, + 6.225080477067782e-8, ], linf=[ - 0.6526506870169639, - 1.980765893182952, - 2.4807635459119757, - 3.982097158683473e-7, + 0.6347820607387928, 2.0078125433846736, 2.530726684667019, + 3.982097165344811e-7, ], tspan=(0.0, 0.05)) # Ensure that we do not have excessive memory allocations @@ -609,12 +595,12 @@ end 1.0066867437607972e-13, 6.889210012578449e-14, 1.568290814572709e-13], - linf=[2.353373051988683e-10, - 2.801543719233024e-11, - 3.930469838486772e-11, + linf=[5.6139981552405516e-11, + 2.842849566864203e-11, + 1.8290174930157832e-11, 4.61017890529547e-11], tspan=(0.0, 0.1), - atol=1.0e-11) + atol=1.0e-10) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let From c7693aaf06587de194147c9c70888c124ed2daf3 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 23 Feb 2024 13:47:12 +0100 Subject: [PATCH 26/58] set version to v0.7.0; closes #1726 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 6b27e6e999..2fb8e19682 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.6.11-pre" +version = "0.7.0" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 27d91b3ceeb15e1079b815268738b365635c6cab Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 23 Feb 2024 13:47:49 +0100 Subject: [PATCH 27/58] set development version to v0.7.1-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 2fb8e19682..800c7b4c0f 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.0" +version = "0.7.1-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 4ee60e7973ff78d553dad65f7f4760443891bdb6 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Sat, 24 Feb 2024 05:57:09 +0100 Subject: [PATCH 28/58] CompatHelper: bump compat for Trixi to 0.7 for package benchmark, (keep existing compat) (#1853) Co-authored-by: CompatHelper Julia --- benchmark/Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/benchmark/Project.toml b/benchmark/Project.toml index e94144cfd1..51d271e65f 100644 --- a/benchmark/Project.toml +++ b/benchmark/Project.toml @@ -8,4 +8,4 @@ Trixi = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" BenchmarkTools = "0.5, 0.7, 1.0" OrdinaryDiffEq = "5.65, 6" PkgBenchmark = "0.2.10" -Trixi = "0.4, 0.5, 0.6" +Trixi = "0.4, 0.5, 0.6, 0.7" From e205b0637d72341041f59cc740933a2bc5835687 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Fri, 1 Mar 2024 17:30:44 +0100 Subject: [PATCH 29/58] Bump crate-ci/typos from 1.18.0 to 1.18.2 (#1856) Bumps [crate-ci/typos](https://github.com/crate-ci/typos) from 1.18.0 to 1.18.2. - [Release notes](https://github.com/crate-ci/typos/releases) - [Changelog](https://github.com/crate-ci/typos/blob/master/CHANGELOG.md) - [Commits](https://github.com/crate-ci/typos/compare/v1.18.0...v1.18.2) --- updated-dependencies: - dependency-name: crate-ci/typos dependency-type: direct:production update-type: version-update:semver-patch ... Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- .github/workflows/SpellCheck.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/SpellCheck.yml b/.github/workflows/SpellCheck.yml index b242b6e811..87e34cb50f 100644 --- a/.github/workflows/SpellCheck.yml +++ b/.github/workflows/SpellCheck.yml @@ -10,4 +10,4 @@ jobs: - name: Checkout Actions Repository uses: actions/checkout@v4 - name: Check spelling - uses: crate-ci/typos@v1.18.0 + uses: crate-ci/typos@v1.18.2 From 8cdb93892dead6e33ae581e60fd8199aec4ee19e Mon Sep 17 00:00:00 2001 From: Andrew Winters Date: Wed, 6 Mar 2024 13:24:50 +0100 Subject: [PATCH 30/58] Upwind SBP on curved meshes (#1857) * baseline implementation of the curvilinear USBP for testing * working version of curvilinear upwind solver. Needs significant cleanup of debugging statements and different variants of the rotated flux vector splittings * cleanup of the fdsbp_2d file * clean-up FVS routines in the compressible Euler file * cleanup and remove unnecessary containers * add tests for the new solver * remove extra space * run formatter * Apply suggestions from code review Co-authored-by: Hendrik Ranocha * add specialized calc_metric_terms function for upwind type * revert change to the surface integral function * add reference for curvilinear van Leer splitting * new splitting_drikakis_tsangaris in Cartesian and generalized coordinates * added test for Cartesian splitting_drikakis_tsangaris * run formatter * Update src/equations/compressible_euler_2d.jl * remove orientation_or_normal from Steger-Warming --------- Co-authored-by: Hendrik Ranocha --- .../elixir_euler_free_stream_upwind.jl | 86 ++++++ .../elixir_euler_source_terms_upwind.jl | 87 ++++++ src/Trixi.jl | 3 +- src/equations/compressible_euler_2d.jl | 290 +++++++++++++++++- src/equations/numerical_fluxes.jl | 12 +- src/solvers/dgsem_unstructured/dg_2d.jl | 2 +- src/solvers/fdsbp_tree/fdsbp_2d.jl | 2 +- .../fdsbp_unstructured/containers_2d.jl | 10 +- src/solvers/fdsbp_unstructured/fdsbp_2d.jl | 99 +++++- test/test_tree_2d_fdsbp.jl | 26 ++ test/test_unstructured_2d.jl | 70 +++++ 11 files changed, 659 insertions(+), 28 deletions(-) create mode 100644 examples/unstructured_2d_fdsbp/elixir_euler_free_stream_upwind.jl create mode 100644 examples/unstructured_2d_fdsbp/elixir_euler_source_terms_upwind.jl diff --git a/examples/unstructured_2d_fdsbp/elixir_euler_free_stream_upwind.jl b/examples/unstructured_2d_fdsbp/elixir_euler_free_stream_upwind.jl new file mode 100644 index 0000000000..2a1956f9d1 --- /dev/null +++ b/examples/unstructured_2d_fdsbp/elixir_euler_free_stream_upwind.jl @@ -0,0 +1,86 @@ +# !!! warning "Experimental implementation (upwind SBP)" +# This is an experimental feature and may change in future releases. + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +initial_condition = initial_condition_constant + +# Boundary conditions for free-stream preservation test +boundary_condition_free_stream = BoundaryConditionDirichlet(initial_condition) + +boundary_conditions = Dict(:outerCircle => boundary_condition_free_stream, + :cone1 => boundary_condition_free_stream, + :cone2 => boundary_condition_free_stream, + :iceCream => boundary_condition_free_stream) + +############################################################################### +# Get the Upwind FDSBP approximation space + +# TODO: FDSBP +# Note, one must set `xmin=-1` and `xmax=1` due to the reuse +# of interpolation routines from `calc_node_coordinates!` to create +# the physical coordinates in the mappings. +D_upw = upwind_operators(SummationByPartsOperators.Mattsson2017, + derivative_order = 1, + accuracy_order = 8, + xmin = -1.0, xmax = 1.0, + N = 17) + +flux_splitting = splitting_vanleer_haenel +solver = FDSBP(D_upw, + surface_integral = SurfaceIntegralStrongForm(FluxUpwind(flux_splitting)), + volume_integral = VolumeIntegralUpwind(flux_splitting)) + +############################################################################### +# Get the curved quad mesh from a file (downloads the file if not available locally) + +# Mesh with second-order boundary polynomials requires an upwind SBP operator +# with (at least) 4th order boundary closure to guarantee the approximation is +# free-stream preserving +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/ec9a345f09199ebe471d35d5c1e4e08f/raw/15975943d8642e42f8292235314b6f1b30aa860d/mesh_inner_outer_boundaries.mesh", + joinpath(@__DIR__, "mesh_inner_outer_boundaries.mesh")) + +mesh = UnstructuredMesh2D(mesh_file) + +############################################################################### +# create the semi discretization object + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 5.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 1000, + save_initial_solution = true, + save_final_solution = true) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + save_solution, + alive_callback) + +############################################################################### +# run the simulation + +# set small tolerances for the free-stream preservation test +sol = solve(ode, SSPRK43(), abstol = 1.0e-12, reltol = 1.0e-12, + save_everystep = false, callback = callbacks) + +summary_callback() # print the timer summary diff --git a/examples/unstructured_2d_fdsbp/elixir_euler_source_terms_upwind.jl b/examples/unstructured_2d_fdsbp/elixir_euler_source_terms_upwind.jl new file mode 100644 index 0000000000..9bd2afa574 --- /dev/null +++ b/examples/unstructured_2d_fdsbp/elixir_euler_source_terms_upwind.jl @@ -0,0 +1,87 @@ +# !!! warning "Experimental implementation (upwind SBP)" +# This is an experimental feature and may change in future releases. + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +initial_condition = initial_condition_convergence_test + +source_term = source_terms_convergence_test + +boundary_condition_eoc = BoundaryConditionDirichlet(initial_condition) + +boundary_conditions = Dict(:Top => boundary_condition_eoc, + :Bottom => boundary_condition_eoc, + :Right => boundary_condition_eoc, + :Left => boundary_condition_eoc) + +############################################################################### +# Get the Upwind FDSBP approximation space + +# TODO: FDSBP +# Note, one must set `xmin=-1` and `xmax=1` due to the reuse +# of interpolation routines from `calc_node_coordinates!` to create +# the physical coordinates in the mappings. +D_upw = upwind_operators(SummationByPartsOperators.Mattsson2017, + derivative_order = 1, + accuracy_order = 4, + xmin = -1.0, xmax = 1.0, + N = 9) + +flux_splitting = splitting_drikakis_tsangaris +solver = FDSBP(D_upw, + surface_integral = SurfaceIntegralStrongForm(FluxUpwind(flux_splitting)), + volume_integral = VolumeIntegralUpwind(flux_splitting)) + +############################################################################### +# Get the curved quad mesh from a file (downloads the file if not available locally) + +# Mesh with first-order boundary polynomials requires an upwind SBP operator +# with (at least) 2nd order boundary closure to guarantee the approximation is +# free-stream preserving +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/a4f4743008bf3233957a9ea6ac7a62e0/raw/8b36cc6649153fe0a5723b200368a210a1d74eaf/mesh_refined_box.mesh", + joinpath(@__DIR__, "mesh_refined_box.mesh")) + +mesh = UnstructuredMesh2D(mesh_file) + +############################################################################### +# create the semidiscretization object + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_term, + boundary_conditions = boundary_conditions) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +save_solution = SaveSolutionCallback(interval = 1000, + save_initial_solution = true, + save_final_solution = true) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + save_solution, + alive_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, SSPRK43(), abstol = 1.0e-6, reltol = 1.0e-6, + save_everystep = false, callback = callbacks) + +summary_callback() # print the timer summary diff --git a/src/Trixi.jl b/src/Trixi.jl index 5f8cd9cae8..da7359999c 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -191,7 +191,8 @@ export flux, flux_central, flux_lax_friedrichs, flux_hll, flux_hllc, flux_hlle, FluxUpwind export splitting_steger_warming, splitting_vanleer_haenel, - splitting_coirier_vanleer, splitting_lax_friedrichs + splitting_coirier_vanleer, splitting_lax_friedrichs, + splitting_drikakis_tsangaris export initial_condition_constant, initial_condition_gauss, diff --git a/src/equations/compressible_euler_2d.jl b/src/equations/compressible_euler_2d.jl index f5a632723c..43f15a3cfb 100644 --- a/src/equations/compressible_euler_2d.jl +++ b/src/equations/compressible_euler_2d.jl @@ -689,7 +689,9 @@ end orientation::Integer, equations::CompressibleEulerEquations2D) -Splitting of the compressible Euler flux of Steger and Warming. +Splitting of the compressible Euler flux of Steger and Warming. For +curvilinear coordinates use the improved Steger-Warming-type splitting +[`splitting_drikakis_tsangaris`](@ref). Returns a tuple of the fluxes "minus" (associated with waves going into the negative axis direction) and "plus" (associated with waves going into the @@ -809,6 +811,174 @@ end return SVector(f1m, f2m, f3m, f4m) end +""" + splitting_drikakis_tsangaris(u, orientation_or_normal_direction, + equations::CompressibleEulerEquations2D) + splitting_drikakis_tsangaris(u, which::Union{Val{:minus}, Val{:plus}} + orientation_or_normal_direction, + equations::CompressibleEulerEquations2D) + +Improved variant of the Steger-Warming flux vector splitting +[`splitting_steger_warming`](@ref) for generalized coordinates. +This splitting also reformulates the energy +flux as in Hänel et al. to obtain conservation of the total temperature +for inviscid flows. + +Returns a tuple of the fluxes "minus" (associated with waves going into the +negative axis direction) and "plus" (associated with waves going into the +positive axis direction). If only one of the fluxes is required, use the +function signature with argument `which` set to `Val{:minus}()` or `Val{:plus}()`. + +!!! warning "Experimental implementation (upwind SBP)" + This is an experimental feature and may change in future releases. + +## References + +- D. Drikakis and S. Tsangaris (1993) + On the solution of the compressible Navier-Stokes equations using + improved flux vector splitting methods + [DOI: 10.1016/0307-904X(93)90054-K](https://doi.org/10.1016/0307-904X(93)90054-K) +- D. Hänel, R. Schwane and G. Seider (1987) + On the accuracy of upwind schemes for the solution of the Navier-Stokes equations + [DOI: 10.2514/6.1987-1105](https://doi.org/10.2514/6.1987-1105) +""" +@inline function splitting_drikakis_tsangaris(u, orientation_or_normal_direction, + equations::CompressibleEulerEquations2D) + fm = splitting_drikakis_tsangaris(u, Val{:minus}(), orientation_or_normal_direction, + equations) + fp = splitting_drikakis_tsangaris(u, Val{:plus}(), orientation_or_normal_direction, + equations) + return fm, fp +end + +@inline function splitting_drikakis_tsangaris(u, ::Val{:plus}, orientation::Integer, + equations::CompressibleEulerEquations2D) + rho, rho_v1, rho_v2, rho_e = u + v1 = rho_v1 / rho + v2 = rho_v2 / rho + p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2)) + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + + if orientation == 1 + lambda1 = v1 + a + lambda2 = v1 - a + + lambda1_p = positive_part(lambda1) # Same as (lambda_i + abs(lambda_i)) / 2, but faster :) + lambda2_p = positive_part(lambda2) + + rhoa_2gamma = 0.5 * rho * a / equations.gamma + f1p = 0.5 * rho * (lambda1_p + lambda2_p) + f2p = f1p * v1 + rhoa_2gamma * (lambda1_p - lambda2_p) + f3p = f1p * v2 + f4p = f1p * H + else # orientation == 2 + lambda1 = v2 + a + lambda2 = v2 - a + + lambda1_p = positive_part(lambda1) # Same as (lambda_i + abs(lambda_i)) / 2, but faster :) + lambda2_p = positive_part(lambda2) + + rhoa_2gamma = 0.5 * rho * a / equations.gamma + f1p = 0.5 * rho * (lambda1_p + lambda2_p) + f2p = f1p * v1 + f3p = f1p * v2 + rhoa_2gamma * (lambda1_p - lambda2_p) + f4p = f1p * H + end + return SVector(f1p, f2p, f3p, f4p) +end + +@inline function splitting_drikakis_tsangaris(u, ::Val{:minus}, orientation::Integer, + equations::CompressibleEulerEquations2D) + rho, rho_v1, rho_v2, rho_e = u + v1 = rho_v1 / rho + v2 = rho_v2 / rho + p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2)) + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + + if orientation == 1 + lambda1 = v1 + a + lambda2 = v1 - a + + lambda1_m = negative_part(lambda1) # Same as (lambda_i - abs(lambda_i)) / 2, but faster :) + lambda2_m = negative_part(lambda2) + + rhoa_2gamma = 0.5 * rho * a / equations.gamma + f1m = 0.5 * rho * (lambda1_m + lambda2_m) + f2m = f1m * v1 + rhoa_2gamma * (lambda1_m - lambda2_m) + f3m = f1m * v2 + f4m = f1m * H + else # orientation == 2 + lambda1 = v2 + a + lambda2 = v2 - a + + lambda1_m = negative_part(lambda1) # Same as (lambda_i - abs(lambda_i)) / 2, but faster :) + lambda2_m = negative_part(lambda2) + + rhoa_2gamma = 0.5 * rho * a / equations.gamma + f1m = 0.5 * rho * (lambda1_m + lambda2_m) + f2m = f1m * v1 + f3m = f1m * v2 + rhoa_2gamma * (lambda1_m - lambda2_m) + f4m = f1m * H + end + return SVector(f1m, f2m, f3m, f4m) +end + +@inline function splitting_drikakis_tsangaris(u, ::Val{:plus}, + normal_direction::AbstractVector, + equations::CompressibleEulerEquations2D) + rho, rho_v1, rho_v2, rho_e = u + v1 = rho_v1 / rho + v2 = rho_v2 / rho + p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2)) + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + + v_n = normal_direction[1] * v1 + normal_direction[2] * v2 + + lambda1 = v_n + a + lambda2 = v_n - a + + lambda1_p = positive_part(lambda1) # Same as (lambda_i + abs(lambda_i)) / 2, but faster :) + lambda2_p = positive_part(lambda2) + + rhoa_2gamma = 0.5 * rho * a / equations.gamma + f1p = 0.5 * rho * (lambda1_p + lambda2_p) + f2p = f1p * v1 + rhoa_2gamma * normal_direction[1] * (lambda1_p - lambda2_p) + f3p = f1p * v2 + rhoa_2gamma * normal_direction[2] * (lambda1_p - lambda2_p) + f4p = f1p * H + + return SVector(f1p, f2p, f3p, f4p) +end + +@inline function splitting_drikakis_tsangaris(u, ::Val{:minus}, + normal_direction::AbstractVector, + equations::CompressibleEulerEquations2D) + rho, rho_v1, rho_v2, rho_e = u + v1 = rho_v1 / rho + v2 = rho_v2 / rho + p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2)) + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + + v_n = normal_direction[1] * v1 + normal_direction[2] * v2 + + lambda1 = v_n + a + lambda2 = v_n - a + + lambda1_m = negative_part(lambda1) # Same as (lambda_i - abs(lambda_i)) / 2, but faster :) + lambda2_m = negative_part(lambda2) + + rhoa_2gamma = 0.5 * rho * a / equations.gamma + f1m = 0.5 * rho * (lambda1_m + lambda2_m) + f2m = f1m * v1 + rhoa_2gamma * normal_direction[1] * (lambda1_m - lambda2_m) + f3m = f1m * v2 + rhoa_2gamma * normal_direction[2] * (lambda1_m - lambda2_m) + f4m = f1m * H + + return SVector(f1m, f2m, f3m, f4m) +end + """ FluxLMARS(c)(u_ll, u_rr, orientation_or_normal_direction, equations::CompressibleEulerEquations2D) @@ -902,10 +1072,10 @@ end end """ - splitting_vanleer_haenel(u, orientation::Integer, + splitting_vanleer_haenel(u, orientation_or_normal_direction, equations::CompressibleEulerEquations2D) splitting_vanleer_haenel(u, which::Union{Val{:minus}, Val{:plus}} - orientation::Integer, + orientation_or_normal_direction, equations::CompressibleEulerEquations2D) Splitting of the compressible Euler flux from van Leer. This splitting further @@ -913,7 +1083,8 @@ contains a reformulation due to Hänel et al. where the energy flux uses the enthalpy. The pressure splitting is independent from the splitting of the convective terms. As such there are many pressure splittings suggested across the literature. We implement the 'p4' variant suggested by Liou and Steffen as -it proved the most robust in practice. +it proved the most robust in practice. For details on the curvilinear variant +of this flux vector splitting see Anderson et al. Returns a tuple of the fluxes "minus" (associated with waves going into the negative axis direction) and "plus" (associated with waves going into the @@ -934,11 +1105,16 @@ function signature with argument `which` set to `Val{:minus}()` or `Val{:plus}() - Meng-Sing Liou and Chris J. Steffen, Jr. (1991) High-Order Polynomial Expansions (HOPE) for Flux-Vector Splitting [NASA Technical Memorandum](https://ntrs.nasa.gov/citations/19910016425) +- W. Kyle Anderson, James L. Thomas, and Bram van Leer (1986) + Comparison of Finite Volume Flux Vector Splittings for the Euler Equations + [DOI: 10.2514/3.9465](https://doi.org/10.2514/3.9465) """ -@inline function splitting_vanleer_haenel(u, orientation::Integer, +@inline function splitting_vanleer_haenel(u, orientation_or_normal_direction, equations::CompressibleEulerEquations2D) - fm = splitting_vanleer_haenel(u, Val{:minus}(), orientation, equations) - fp = splitting_vanleer_haenel(u, Val{:plus}(), orientation, equations) + fm = splitting_vanleer_haenel(u, Val{:minus}(), orientation_or_normal_direction, + equations) + fp = splitting_vanleer_haenel(u, Val{:plus}(), orientation_or_normal_direction, + equations) return fm, fp end @@ -1002,11 +1178,57 @@ end return SVector(f1m, f2m, f3m, f4m) end +@inline function splitting_vanleer_haenel(u, ::Val{:plus}, + normal_direction::AbstractVector, + equations::CompressibleEulerEquations2D) + rho, rho_v1, rho_v2, rho_e = u + v1 = rho_v1 / rho + v2 = rho_v2 / rho + p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2)) + + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + + v_n = normal_direction[1] * v1 + normal_direction[2] * v2 + M = v_n / a + p_plus = 0.5 * (1 + equations.gamma * M) * p + + f1p = 0.25 * rho * a * (M + 1)^2 + f2p = f1p * v1 + normal_direction[1] * p_plus + f3p = f1p * v2 + normal_direction[2] * p_plus + f4p = f1p * H + + return SVector(f1p, f2p, f3p, f4p) +end + +@inline function splitting_vanleer_haenel(u, ::Val{:minus}, + normal_direction::AbstractVector, + equations::CompressibleEulerEquations2D) + rho, rho_v1, rho_v2, rho_e = u + v1 = rho_v1 / rho + v2 = rho_v2 / rho + p = (equations.gamma - 1) * (rho_e - 0.5 * (rho_v1 * v1 + rho_v2 * v2)) + + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + + v_n = normal_direction[1] * v1 + normal_direction[2] * v2 + M = v_n / a + p_minus = 0.5 * (1 - equations.gamma * M) * p + + f1m = -0.25 * rho * a * (M - 1)^2 + f2m = f1m * v1 + normal_direction[1] * p_minus + f3m = f1m * v2 + normal_direction[2] * p_minus + f4m = f1m * H + + return SVector(f1m, f2m, f3m, f4m) +end + """ - splitting_lax_friedrichs(u, orientation::Integer, + splitting_lax_friedrichs(u, orientation_or_normal_direction, equations::CompressibleEulerEquations2D) splitting_lax_friedrichs(u, which::Union{Val{:minus}, Val{:plus}} - orientation::Integer, + orientation_or_normal_direction, equations::CompressibleEulerEquations2D) Naive local Lax-Friedrichs style flux splitting of the form `f⁺ = 0.5 (f + λ u)` @@ -1021,10 +1243,12 @@ function signature with argument `which` set to `Val{:minus}()` or `Val{:plus}() !!! warning "Experimental implementation (upwind SBP)" This is an experimental feature and may change in future releases. """ -@inline function splitting_lax_friedrichs(u, orientation::Integer, +@inline function splitting_lax_friedrichs(u, orientation_or_normal_direction, equations::CompressibleEulerEquations2D) - fm = splitting_lax_friedrichs(u, Val{:minus}(), orientation, equations) - fp = splitting_lax_friedrichs(u, Val{:plus}(), orientation, equations) + fm = splitting_lax_friedrichs(u, Val{:minus}(), orientation_or_normal_direction, + equations) + fp = splitting_lax_friedrichs(u, Val{:plus}(), orientation_or_normal_direction, + equations) return fm, fp end @@ -1082,6 +1306,48 @@ end return SVector(f1m, f2m, f3m, f4m) end +@inline function splitting_lax_friedrichs(u, ::Val{:plus}, + normal_direction::AbstractVector, + equations::CompressibleEulerEquations2D) + rho_e = last(u) + rho, v1, v2, p = cons2prim(u, equations) + + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + lambda = 0.5 * (sqrt(v1^2 + v2^2) + a) + + v_normal = v1 * normal_direction[1] + v2 * normal_direction[2] + rho_v_normal = rho * v_normal + + f1p = 0.5 * rho_v_normal + lambda * u[1] + f2p = 0.5 * rho_v_normal * v1 + 0.5 * p * normal_direction[1] + lambda * u[2] + f3p = 0.5 * rho_v_normal * v2 + 0.5 * p * normal_direction[2] + lambda * u[3] + f4p = 0.5 * rho_v_normal * H + lambda * u[4] + + return SVector(f1p, f2p, f3p, f4p) +end + +@inline function splitting_lax_friedrichs(u, ::Val{:minus}, + normal_direction::AbstractVector, + equations::CompressibleEulerEquations2D) + rho_e = last(u) + rho, v1, v2, p = cons2prim(u, equations) + + a = sqrt(equations.gamma * p / rho) + H = (rho_e + p) / rho + lambda = 0.5 * (sqrt(v1^2 + v2^2) + a) + + v_normal = v1 * normal_direction[1] + v2 * normal_direction[2] + rho_v_normal = rho * v_normal + + f1m = 0.5 * rho_v_normal - lambda * u[1] + f2m = 0.5 * rho_v_normal * v1 + 0.5 * p * normal_direction[1] - lambda * u[2] + f3m = 0.5 * rho_v_normal * v2 + 0.5 * p * normal_direction[2] - lambda * u[3] + f4m = 0.5 * rho_v_normal * H - lambda * u[4] + + return SVector(f1m, f2m, f3m, f4m) +end + # Calculate maximum wave speed for local Lax-Friedrichs-type dissipation as the # maximum velocity magnitude plus the maximum speed of sound @inline function max_abs_speed_naive(u_ll, u_rr, orientation::Integer, diff --git a/src/equations/numerical_fluxes.jl b/src/equations/numerical_fluxes.jl index 6794c71a32..e3e798381a 100644 --- a/src/equations/numerical_fluxes.jl +++ b/src/equations/numerical_fluxes.jl @@ -415,7 +415,8 @@ flux vector splitting. The [`SurfaceIntegralUpwind`](@ref) with a given `splitting` is equivalent to the [`SurfaceIntegralStrongForm`](@ref) with `FluxUpwind(splitting)` -as numerical flux (up to floating point differences). +as numerical flux (up to floating point differences). Note, that +[`SurfaceIntegralUpwind`](@ref) is only available on [`TreeMesh`](@ref). !!! warning "Experimental implementation (upwind SBP)" This is an experimental feature and may change in future releases. @@ -431,5 +432,14 @@ end return fm + fp end +@inline function (numflux::FluxUpwind)(u_ll, u_rr, + normal_direction::AbstractVector, + equations::AbstractEquations{2}) + @unpack splitting = numflux + f_tilde_m = splitting(u_rr, Val{:minus}(), normal_direction, equations) + f_tilde_p = splitting(u_ll, Val{:plus}(), normal_direction, equations) + return f_tilde_m + f_tilde_p +end + Base.show(io::IO, f::FluxUpwind) = print(io, "FluxUpwind(", f.splitting, ")") end # @muladd diff --git a/src/solvers/dgsem_unstructured/dg_2d.jl b/src/solvers/dgsem_unstructured/dg_2d.jl index b12a96c4c3..988e995d6b 100644 --- a/src/solvers/dgsem_unstructured/dg_2d.jl +++ b/src/solvers/dgsem_unstructured/dg_2d.jl @@ -77,7 +77,7 @@ function rhs!(du, u, t, end # Apply Jacobian from mapping to reference element - # Note! this routine is reused from dg_curved/dg_2d.jl + # Note! this routine is reused from dgsem_structured/dg_2d.jl @trixi_timeit timer() "Jacobian" apply_jacobian!(du, mesh, equations, dg, cache) # Calculate source terms diff --git a/src/solvers/fdsbp_tree/fdsbp_2d.jl b/src/solvers/fdsbp_tree/fdsbp_2d.jl index 09d18cecd7..36afbbc022 100644 --- a/src/solvers/fdsbp_tree/fdsbp_2d.jl +++ b/src/solvers/fdsbp_tree/fdsbp_2d.jl @@ -19,7 +19,7 @@ function create_cache(mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, equations, return (; f_threaded) end -function create_cache(mesh::TreeMesh{2}, equations, +function create_cache(mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, equations, volume_integral::VolumeIntegralUpwind, dg, uEltype) u_node = SVector{nvariables(equations), uEltype}(ntuple(_ -> zero(uEltype), Val{nvariables(equations)}())) diff --git a/src/solvers/fdsbp_unstructured/containers_2d.jl b/src/solvers/fdsbp_unstructured/containers_2d.jl index 3857c2d8a2..f68b1e00f5 100644 --- a/src/solvers/fdsbp_unstructured/containers_2d.jl +++ b/src/solvers/fdsbp_unstructured/containers_2d.jl @@ -9,7 +9,7 @@ #! format: noindent # initialize all the values in the container of a general FD block (either straight sided or curved) -# OBS! Requires the SBP derivative matrix in order to compute metric terms that are free-stream preserving +# OBS! Requires the SBP derivative matrix in order to compute metric terms. function init_element!(elements, element, basis::AbstractDerivativeOperator, corners_or_surface_curves) calc_node_coordinates!(elements.node_coordinates, element, get_nodes(basis), @@ -29,9 +29,15 @@ function init_element!(elements, element, basis::AbstractDerivativeOperator, return elements end +# Specialization to pass the central differencing matrix from an upwind SBP operator +function calc_metric_terms!(jacobian_matrix, element, + D_SBP::SummationByPartsOperators.UpwindOperators, + node_coordinates) + calc_metric_terms!(jacobian_matrix, element, D_SBP.central, node_coordinates) +end + # construct the metric terms for a FDSBP element "block". Directly use the derivative matrix # applied to the node coordinates. -# TODO: FD; How to make this work for the upwind solver because basis has three available derivative matrices function calc_metric_terms!(jacobian_matrix, element, D_SBP::AbstractDerivativeOperator, node_coordinates) diff --git a/src/solvers/fdsbp_unstructured/fdsbp_2d.jl b/src/solvers/fdsbp_unstructured/fdsbp_2d.jl index b459f4c42c..c35772cdf1 100644 --- a/src/solvers/fdsbp_unstructured/fdsbp_2d.jl +++ b/src/solvers/fdsbp_unstructured/fdsbp_2d.jl @@ -25,8 +25,6 @@ function create_cache(mesh::UnstructuredMesh2D, equations, dg::FDSBP, RealT, uEl return cache end -# TODO: FD; Upwind versions of surface / volume integral - # 2D volume integral contributions for `VolumeIntegralStrongForm` # OBS! This is the standard (not de-aliased) form of the volume integral. # So it is not provably stable for variable coefficients due to the the metric terms. @@ -86,6 +84,91 @@ end return nothing end +# 2D volume integral contributions for `VolumeIntegralUpwind`. +# Note that the plus / minus notation of the operators does not refer to the +# upwind / downwind directions of the fluxes. +# Instead, the plus / minus refers to the direction of the biasing within +# the finite difference stencils. Thus, the D^- operator acts on the positive +# part of the flux splitting f^+ and the D^+ operator acts on the negative part +# of the flux splitting f^-. +function calc_volume_integral!(du, u, + mesh::UnstructuredMesh2D, + nonconservative_terms::False, equations, + volume_integral::VolumeIntegralUpwind, + dg::FDSBP, cache) + # Assume that + # dg.basis isa SummationByPartsOperators.UpwindOperators + D_minus = dg.basis.minus # Upwind SBP D^- derivative operator + D_plus = dg.basis.plus # Upwind SBP D^+ derivative operator + @unpack f_minus_plus_threaded, f_minus_threaded, f_plus_threaded = cache + @unpack splitting = volume_integral + @unpack contravariant_vectors = cache.elements + + # SBP operators from SummationByPartsOperators.jl implement the basic interface + # of matrix-vector multiplication. Thus, we pass an "array of structures", + # packing all variables per node in an `SVector`. + if nvariables(equations) == 1 + # `reinterpret(reshape, ...)` removes the leading dimension only if more + # than one variable is used. + u_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(u)}, u), + nnodes(dg), nnodes(dg), nelements(dg, cache)) + du_vectors = reshape(reinterpret(SVector{nvariables(equations), eltype(du)}, + du), + nnodes(dg), nnodes(dg), nelements(dg, cache)) + else + u_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(u)}, u) + du_vectors = reinterpret(reshape, SVector{nvariables(equations), eltype(du)}, + du) + end + + # Use the tensor product structure to compute the discrete derivatives of + # the fluxes line-by-line and add them to `du` for each element. + @threaded for element in eachelement(dg, cache) + # f_minus_plus_element wraps the storage provided by f_minus_element and + # f_plus_element such that we can use a single assignment below. + # f_minus_element and f_plus_element are updated whenever we update + # `f_minus_plus_element[i, j] = ...` below. + f_minus_plus_element = f_minus_plus_threaded[Threads.threadid()] + f_minus_element = f_minus_threaded[Threads.threadid()] + f_plus_element = f_plus_threaded[Threads.threadid()] + u_element = view(u_vectors, :, :, element) + + # x direction + # We use flux vector splittings in the directions of the contravariant + # basis vectors. Thus, we do not use a broadcasting operation like + # @. f_minus_plus_element = splitting(u_element, 1, equations) + # in the Cartesian case but loop over all nodes. + for j in eachnode(dg), i in eachnode(dg) + # contravariant vectors computed with central D matrix + Ja1 = get_contravariant_vector(1, contravariant_vectors, i, j, element) + f_minus_plus_element[i, j] = splitting(u_element[i, j], Ja1, equations) + end + + for j in eachnode(dg) + mul!(view(du_vectors, :, j, element), D_minus, view(f_plus_element, :, j), + one(eltype(du)), one(eltype(du))) + mul!(view(du_vectors, :, j, element), D_plus, view(f_minus_element, :, j), + one(eltype(du)), one(eltype(du))) + end + + # y direction + for j in eachnode(dg), i in eachnode(dg) + # contravariant vectors computed with central D matrix + Ja2 = get_contravariant_vector(2, contravariant_vectors, i, j, element) + f_minus_plus_element[i, j] = splitting(u_element[i, j], Ja2, equations) + end + + for i in eachnode(dg) + mul!(view(du_vectors, i, :, element), D_minus, view(f_plus_element, i, :), + one(eltype(du)), one(eltype(du))) + mul!(view(du_vectors, i, :, element), D_plus, view(f_minus_element, i, :), + one(eltype(du)), one(eltype(du))) + end + end + + return nothing +end + # Note! The local side numbering for the unstructured quadrilateral element implementation differs # from the structured TreeMesh or StructuredMesh local side numbering: # @@ -114,8 +197,7 @@ function calc_surface_integral!(du, u, mesh::UnstructuredMesh2D, # surface at -x u_node = get_node_vars(u, equations, dg, 1, l, element) # compute internal flux in normal direction on side 4 - outward_direction = get_node_coords(normal_directions, equations, dg, l, 4, - element) + outward_direction = get_surface_normal(normal_directions, l, 4, element) f_node = flux(u_node, outward_direction, equations) f_num = get_node_vars(surface_flux_values, equations, dg, l, 4, element) multiply_add_to_node_vars!(du, inv_weight_left, (f_num - f_node), @@ -124,8 +206,7 @@ function calc_surface_integral!(du, u, mesh::UnstructuredMesh2D, # surface at +x u_node = get_node_vars(u, equations, dg, nnodes(dg), l, element) # compute internal flux in normal direction on side 2 - outward_direction = get_node_coords(normal_directions, equations, dg, l, 2, - element) + outward_direction = get_surface_normal(normal_directions, l, 2, element) f_node = flux(u_node, outward_direction, equations) f_num = get_node_vars(surface_flux_values, equations, dg, l, 2, element) multiply_add_to_node_vars!(du, inv_weight_right, (f_num - f_node), @@ -134,8 +215,7 @@ function calc_surface_integral!(du, u, mesh::UnstructuredMesh2D, # surface at -y u_node = get_node_vars(u, equations, dg, l, 1, element) # compute internal flux in normal direction on side 1 - outward_direction = get_node_coords(normal_directions, equations, dg, l, 1, - element) + outward_direction = get_surface_normal(normal_directions, l, 1, element) f_node = flux(u_node, outward_direction, equations) f_num = get_node_vars(surface_flux_values, equations, dg, l, 1, element) multiply_add_to_node_vars!(du, inv_weight_left, (f_num - f_node), @@ -144,8 +224,7 @@ function calc_surface_integral!(du, u, mesh::UnstructuredMesh2D, # surface at +y u_node = get_node_vars(u, equations, dg, l, nnodes(dg), element) # compute internal flux in normal direction on side 3 - outward_direction = get_node_coords(normal_directions, equations, dg, l, 3, - element) + outward_direction = get_surface_normal(normal_directions, l, 3, element) f_node = flux(u_node, outward_direction, equations) f_num = get_node_vars(surface_flux_values, equations, dg, l, 3, element) multiply_add_to_node_vars!(du, inv_weight_right, (f_num - f_node), diff --git a/test/test_tree_2d_fdsbp.jl b/test/test_tree_2d_fdsbp.jl index c0844ee5db..d477cab056 100644 --- a/test/test_tree_2d_fdsbp.jl +++ b/test/test_tree_2d_fdsbp.jl @@ -102,6 +102,32 @@ end end end + @trixi_testset "elixir_euler_convergence.jl with Drikakis-Tsangaris splitting" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_convergence.jl"), + l2=[ + 1.708838999643608e-6, + 1.7437997854485807e-6, + 1.7437997854741082e-6, + 5.457223460116349e-6, + ], + linf=[ + 9.796504911285808e-6, + 9.614745899888533e-6, + 9.614745899444443e-6, + 4.02610718399643e-5, + ], + tspan=(0.0, 0.1), flux_splitting=splitting_drikakis_tsangaris) + + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end + end + @trixi_testset "elixir_euler_kelvin_helmholtz_instability.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_kelvin_helmholtz_instability.jl"), diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index 87d677e162..8a62dcaec3 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -610,6 +610,76 @@ end @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end end + +@trixi_testset "FDSBP (upwind): elixir_euler_source_terms_upwind.jl" begin + @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + "elixir_euler_source_terms_upwind.jl"), + l2=[4.085391175504837e-5, + 7.19179253772227e-5, + 7.191792537723135e-5, + 0.00021775241532855398], + linf=[0.0004054489124620808, + 0.0006164432358217731, + 0.0006164432358186644, + 0.001363103391379461], + tspan=(0.0, 0.05), + atol=1.0e-10) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "FDSBP (upwind): elixir_euler_source_terms_upwind.jl with LF splitting" begin + @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + "elixir_euler_source_terms_upwind.jl"), + l2=[3.8300267071890586e-5, + 5.295846741663533e-5, + 5.295846741663526e-5, + 0.00017564759295593478], + linf=[0.00018810716496542312, + 0.0003794187430412599, + 0.0003794187430412599, + 0.0009632958510650269], + tspan=(0.0, 0.025), + flux_splitting=splitting_lax_friedrichs, + atol=1.0e-10) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + +@trixi_testset "FDSBP (upwind): elixir_euler_free_stream_upwind.jl" begin + @test_trixi_include(joinpath(pkgdir(Trixi, "examples", "unstructured_2d_fdsbp"), + "elixir_euler_free_stream_upwind.jl"), + l2=[3.2114065566681054e-14, + 2.132488788134846e-14, + 2.106144937311659e-14, + 8.609642264224197e-13], + linf=[3.354871935812298e-11, + 7.006478730531285e-12, + 1.148153794261475e-11, + 9.041265514042607e-10], + tspan=(0.0, 0.05), + atol=1.0e-10) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end end # Clean up afterwards: delete Trixi.jl output directory From 253c358243ed2a8b6f63c422b53cf09a68188f68 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 6 Mar 2024 13:25:19 +0100 Subject: [PATCH 31/58] set version to v0.7.1 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 800c7b4c0f..53b859a422 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.1-pre" +version = "0.7.1" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From ca082c2eb1273611cf38e80c6d2dab04e8f8177f Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 6 Mar 2024 13:25:37 +0100 Subject: [PATCH 32/58] set development version to v0.7.2-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 53b859a422..e8eb7c788c 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.1" +version = "0.7.2-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 4bf61a0fd3abb21b457ea9d9ee2c19c835d018c3 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Wed, 6 Mar 2024 14:25:25 +0100 Subject: [PATCH 33/58] force new FFMPEG.jl version for tests (#1858) --- test/Project.toml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/test/Project.toml b/test/Project.toml index a376c2805e..1a042dab44 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -2,6 +2,7 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6" +FFMPEG = "c87230d0-a227-11e9-1b43-d7ebe4e7570a" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" @@ -15,6 +16,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Aqua = "0.8" CairoMakie = "0.10" Downloads = "1" +FFMPEG = "0.4" ForwardDiff = "0.10.24" LinearAlgebra = "1" MPI = "0.20" From 3bed8285ca2bb3857050bde4fd1408d151cb760d Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Thu, 7 Mar 2024 10:41:56 +0100 Subject: [PATCH 34/58] Check BCs for periodicity for periodic Tree & Structured meshes (#1860) * Check BCs for periodicity for periodic meshes * default case for periodic bcs * fmt * specialize * error ant fmt * isperiodic TreeMesh * avoid if * shorten * shorten * Make meshes non-periodic * fix rti * shorten dispatch --------- Co-authored-by: Hendrik Ranocha --- .../elixir_advection_coupled.jl | 12 ++-- ...lixir_euler_rayleigh_taylor_instability.jl | 2 +- .../elixir_euler_warm_bubble.jl | 3 +- src/meshes/tree_mesh.jl | 3 + .../semidiscretization_hyperbolic.jl | 70 +++++++++++++++++++ ...semidiscretization_hyperbolic_parabolic.jl | 2 + 6 files changed, 86 insertions(+), 6 deletions(-) diff --git a/examples/structured_2d_dgsem/elixir_advection_coupled.jl b/examples/structured_2d_dgsem/elixir_advection_coupled.jl index 43b68f21b0..0002bb8d37 100644 --- a/examples/structured_2d_dgsem/elixir_advection_coupled.jl +++ b/examples/structured_2d_dgsem/elixir_advection_coupled.jl @@ -53,7 +53,8 @@ cells_per_dimension = (8, 8) coordinates_min1 = (-1.0, 0.0) # minimum coordinates (min(x), min(y)) coordinates_max1 = (0.0, 1.0) # maximum coordinates (max(x), max(y)) -mesh1 = StructuredMesh(cells_per_dimension, coordinates_min1, coordinates_max1) +mesh1 = StructuredMesh(cells_per_dimension, coordinates_min1, coordinates_max1, + periodicity = false) # Define the coupling functions coupling_function12 = (x, u, equations_other, equations_own) -> u @@ -84,7 +85,8 @@ semi1 = SemidiscretizationHyperbolic(mesh1, equations, initial_condition_converg coordinates_min2 = (0.0, 0.0) # minimum coordinates (min(x), min(y)) coordinates_max2 = (1.0, 1.0) # maximum coordinates (max(x), max(y)) -mesh2 = StructuredMesh(cells_per_dimension, coordinates_min2, coordinates_max2) +mesh2 = StructuredMesh(cells_per_dimension, coordinates_min2, coordinates_max2, + periodicity = false) # Define the coupling functions coupling_function21 = (x, u, equations_other, equations_own) -> u @@ -115,7 +117,8 @@ semi2 = SemidiscretizationHyperbolic(mesh2, equations, initial_condition_converg coordinates_min3 = (-1.0, -1.0) # minimum coordinates (min(x), min(y)) coordinates_max3 = (0.0, 0.0) # maximum coordinates (max(x), max(y)) -mesh3 = StructuredMesh(cells_per_dimension, coordinates_min3, coordinates_max3) +mesh3 = StructuredMesh(cells_per_dimension, coordinates_min3, coordinates_max3, + periodicity = false) # Define the coupling functions coupling_function34 = (x, u, equations_other, equations_own) -> u @@ -146,7 +149,8 @@ semi3 = SemidiscretizationHyperbolic(mesh3, equations, initial_condition_converg coordinates_min4 = (0.0, -1.0) # minimum coordinates (min(x), min(y)) coordinates_max4 = (1.0, 0.0) # maximum coordinates (max(x), max(y)) -mesh4 = StructuredMesh(cells_per_dimension, coordinates_min4, coordinates_max4) +mesh4 = StructuredMesh(cells_per_dimension, coordinates_min4, coordinates_max4, + periodicity = false) # Define the coupling functions coupling_function43 = (x, u, equations_other, equations_own) -> u diff --git a/examples/structured_2d_dgsem/elixir_euler_rayleigh_taylor_instability.jl b/examples/structured_2d_dgsem/elixir_euler_rayleigh_taylor_instability.jl index 6c254e8bd8..dd0cc339b2 100644 --- a/examples/structured_2d_dgsem/elixir_euler_rayleigh_taylor_instability.jl +++ b/examples/structured_2d_dgsem/elixir_euler_rayleigh_taylor_instability.jl @@ -69,7 +69,7 @@ mapping(xi, eta) = SVector(0.25 * 0.5 * (1.0 + xi), 0.5 * (1.0 + eta)) num_elements_per_dimension = 32 cells_per_dimension = (num_elements_per_dimension, num_elements_per_dimension * 4) -mesh = StructuredMesh(cells_per_dimension, mapping) +mesh = StructuredMesh(cells_per_dimension, mapping, periodicity = false) initial_condition = initial_condition_rayleigh_taylor_instability boundary_conditions = (x_neg = boundary_condition_slip_wall, diff --git a/examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl b/examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl index 05c09d5753..38b9386e94 100644 --- a/examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl +++ b/examples/structured_2d_dgsem/elixir_euler_warm_bubble.jl @@ -100,7 +100,8 @@ coordinates_min = (0.0, 0.0) coordinates_max = (20_000.0, 10_000.0) cells_per_dimension = (64, 32) -mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max) +mesh = StructuredMesh(cells_per_dimension, coordinates_min, coordinates_max, + periodicity = (true, false)) semi = SemidiscretizationHyperbolic(mesh, equations, warm_bubble_setup, solver, source_terms = warm_bubble_setup, diff --git a/src/meshes/tree_mesh.jl b/src/meshes/tree_mesh.jl index 05699d17d1..1092fc54cc 100644 --- a/src/meshes/tree_mesh.jl +++ b/src/meshes/tree_mesh.jl @@ -228,5 +228,8 @@ function total_volume(mesh::TreeMesh) return mesh.tree.length_level_0^ndims(mesh) end +isperiodic(mesh::TreeMesh) = isperiodic(mesh.tree) +isperiodic(mesh::TreeMesh, dimension) = isperiodic(mesh.tree, dimension) + include("parallel_tree_mesh.jl") end # @muladd diff --git a/src/semidiscretization/semidiscretization_hyperbolic.jl b/src/semidiscretization/semidiscretization_hyperbolic.jl index 7ebd758de3..f61378a7dc 100644 --- a/src/semidiscretization/semidiscretization_hyperbolic.jl +++ b/src/semidiscretization/semidiscretization_hyperbolic.jl @@ -72,6 +72,8 @@ function SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver _boundary_conditions = digest_boundary_conditions(boundary_conditions, mesh, solver, cache) + check_periodicity_mesh_boundary_conditions(mesh, _boundary_conditions) + SemidiscretizationHyperbolic{typeof(mesh), typeof(equations), typeof(initial_condition), typeof(_boundary_conditions), typeof(source_terms), @@ -210,6 +212,74 @@ function digest_boundary_conditions(boundary_conditions::AbstractArray, mesh, so throw(ArgumentError("Please use a (named) tuple instead of an (abstract) array to supply multiple boundary conditions (to improve performance).")) end +# No checks for these meshes yet available +function check_periodicity_mesh_boundary_conditions(mesh::Union{P4estMesh, + UnstructuredMesh2D, + T8codeMesh, + DGMultiMesh}, + boundary_conditions) +end + +# No actions needed for periodic boundary conditions +function check_periodicity_mesh_boundary_conditions(mesh::Union{TreeMesh, + StructuredMesh}, + boundary_conditions::BoundaryConditionPeriodic) +end + +function check_periodicity_mesh_boundary_conditions_x(mesh, x_neg, x_pos) + if isperiodic(mesh, 1) && + (x_neg != BoundaryConditionPeriodic() || + x_pos != BoundaryConditionPeriodic()) + @error "For periodic mesh non-periodic boundary conditions in x-direction are supplied." + end +end + +function check_periodicity_mesh_boundary_conditions_y(mesh, y_neg, y_pos) + if isperiodic(mesh, 2) && + (y_neg != BoundaryConditionPeriodic() || + y_pos != BoundaryConditionPeriodic()) + @error "For periodic mesh non-periodic boundary conditions in y-direction are supplied." + end +end + +function check_periodicity_mesh_boundary_conditions_z(mesh, z_neg, z_pos) + if isperiodic(mesh, 3) && + (z_neg != BoundaryConditionPeriodic() || + z_pos != BoundaryConditionPeriodic()) + @error "For periodic mesh non-periodic boundary conditions in z-direction are supplied." + end +end + +function check_periodicity_mesh_boundary_conditions(mesh::Union{TreeMesh{1}, + StructuredMesh{1}}, + boundary_conditions::Union{NamedTuple, + Tuple}) + check_periodicity_mesh_boundary_conditions_x(mesh, boundary_conditions[1], + boundary_conditions[2]) +end + +function check_periodicity_mesh_boundary_conditions(mesh::Union{TreeMesh{2}, + StructuredMesh{2}}, + boundary_conditions::Union{NamedTuple, + Tuple}) + check_periodicity_mesh_boundary_conditions_x(mesh, boundary_conditions[1], + boundary_conditions[2]) + check_periodicity_mesh_boundary_conditions_y(mesh, boundary_conditions[3], + boundary_conditions[4]) +end + +function check_periodicity_mesh_boundary_conditions(mesh::Union{TreeMesh{3}, + StructuredMesh{3}}, + boundary_conditions::Union{NamedTuple, + Tuple}) + check_periodicity_mesh_boundary_conditions_x(mesh, boundary_conditions[1], + boundary_conditions[2]) + check_periodicity_mesh_boundary_conditions_y(mesh, boundary_conditions[3], + boundary_conditions[4]) + check_periodicity_mesh_boundary_conditions_z(mesh, boundary_conditions[5], + boundary_conditions[6]) +end + function Base.show(io::IO, semi::SemidiscretizationHyperbolic) @nospecialize semi # reduce precompilation time diff --git a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl index 0f44941390..57724374ac 100644 --- a/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl +++ b/src/semidiscretization/semidiscretization_hyperbolic_parabolic.jl @@ -136,6 +136,8 @@ function SemidiscretizationHyperbolicParabolic(mesh, equations, equations_parabo _boundary_conditions_parabolic = digest_boundary_conditions(boundary_conditions_parabolic, mesh, solver, cache) + check_periodicity_mesh_boundary_conditions(mesh, _boundary_conditions) + cache_parabolic = (; create_cache_parabolic(mesh, equations, equations_parabolic, solver, solver_parabolic, RealT, From b8b34ea21cfa22f93daa131c7ef77d8ed8f3f789 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 7 Mar 2024 10:45:17 +0100 Subject: [PATCH 35/58] set version to v0.7.2 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index e8eb7c788c..9a6e8e0d9a 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.2-pre" +version = "0.7.2" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From b6bc7c8cae92cf6877fca90c5ef100771a7781a8 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 7 Mar 2024 10:45:30 +0100 Subject: [PATCH 36/58] set development version to v0.7.3-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 9a6e8e0d9a..6b44af4a3f 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.2" +version = "0.7.3-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From b11bc10d8ac68798ae2e53f9db3d5c4f824af5fa Mon Sep 17 00:00:00 2001 From: Benedict <135045760+bgeihe@users.noreply.github.com> Date: Thu, 7 Mar 2024 13:04:19 +0100 Subject: [PATCH 37/58] add warning (#1863) --- src/solvers/dgsem/basis_lobatto_legendre.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/solvers/dgsem/basis_lobatto_legendre.jl b/src/solvers/dgsem/basis_lobatto_legendre.jl index 6a92fd1c06..9e21b88dfa 100644 --- a/src/solvers/dgsem/basis_lobatto_legendre.jl +++ b/src/solvers/dgsem/basis_lobatto_legendre.jl @@ -12,6 +12,7 @@ Create a nodal Lobatto-Legendre basis for polynomials of degree `polydeg`. For the special case `polydeg=0` the DG method reduces to a finite volume method. Therefore, this function sets the center point of the cell as single node. +This exceptional case is currently only supported for TreeMesh! """ struct LobattoLegendreBasis{RealT <: Real, NNODES, VectorT <: AbstractVector{RealT}, From c4bf3df8a4d4e3920b0f774960d129a4af6ee287 Mon Sep 17 00:00:00 2001 From: Benedict <135045760+bgeihe@users.noreply.github.com> Date: Thu, 7 Mar 2024 13:06:20 +0100 Subject: [PATCH 38/58] Update parallelization.md (#1864) MPI support in T8code came with #1803 --- docs/src/parallelization.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/docs/src/parallelization.md b/docs/src/parallelization.md index f599eb5faf..2114f30fb8 100644 --- a/docs/src/parallelization.md +++ b/docs/src/parallelization.md @@ -69,8 +69,7 @@ installations. Follow the steps described [here](https://github.com/DLR-AMR/T8co [here](https://github.com/trixi-framework/P4est.jl/blob/main/README.md#installation) for the configuration. The paths that point to `libp4est.so` (and potentially to `libsc.so`) need to be the same for P4est.jl and T8code.jl. This could, e.g., be `libp4est.so` that usually can be found -in `lib/` or `local/lib/` in the installation directory of `t8code`. Note that the `T8codeMesh`, however, -does not support MPI yet. +in `lib/` or `local/lib/` in the installation directory of `t8code`. The preferences for [HDF5.jl](https://github.com/JuliaIO/HDF5.jl) always need to be set, even if you do not want to use `HDF5` from Trixi.jl, see also [issue #1079 in HDF5.jl](https://github.com/JuliaIO/HDF5.jl/issues/1079). To set the preferences for HDF5.jl, follow the instructions described From 1ca37cf2271806d203a832d3d99bfa2c14d3226f Mon Sep 17 00:00:00 2001 From: Benedict <135045760+bgeihe@users.noreply.github.com> Date: Fri, 8 Mar 2024 07:48:24 +0100 Subject: [PATCH 39/58] set capacity also when using MPI (#1862) Co-authored-by: Hendrik Ranocha --- src/meshes/mesh_io.jl | 1 + 1 file changed, 1 insertion(+) diff --git a/src/meshes/mesh_io.jl b/src/meshes/mesh_io.jl index 337e33e696..28e6efa8c5 100644 --- a/src/meshes/mesh_io.jl +++ b/src/meshes/mesh_io.jl @@ -74,6 +74,7 @@ function save_mesh_file(mesh::TreeMesh, output_directory, timestep, attributes(file)["mesh_type"] = get_name(mesh) attributes(file)["ndims"] = ndims(mesh) attributes(file)["n_cells"] = n_cells + attributes(file)["capacity"] = mesh.tree.capacity attributes(file)["n_leaf_cells"] = count_leaf_cells(mesh.tree) attributes(file)["minimum_level"] = minimum_level(mesh.tree) attributes(file)["maximum_level"] = maximum_level(mesh.tree) From f235619a49dbc8bd7d84a77269558a64b21a155f Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Fri, 8 Mar 2024 12:20:45 +0100 Subject: [PATCH 40/58] Mention hyphen/dash caveat for boundary symbols (#1866) * Mention hyphen/dash caveat for boundary symbols * typo * elaborate --- docs/src/meshes/p4est_mesh.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/src/meshes/p4est_mesh.md b/docs/src/meshes/p4est_mesh.md index 3b35ffcad6..a14551b3f4 100644 --- a/docs/src/meshes/p4est_mesh.md +++ b/docs/src/meshes/p4est_mesh.md @@ -256,6 +256,8 @@ By doing so, only nodesets with a label present in `boundary_symbols` are treate Other nodesets that could be used for diagnostics are not treated as external boundaries. Note that there is a leading colon `:` compared to the label in the `.inp` mesh file. This is required to turn the label into a [`Symbol`](https://docs.julialang.org/en/v1/manual/metaprogramming/#Symbols). +**Important**: In Julia, a symbol _cannot_ contain a hyphen/dash `-`, i.e., `:BC-1` is _not_ a valid symbol. +Keep this in mind when importing boundaries, you might have to convert hyphens/dashes `-` to underscores `_` in the `.inp` mesh file, i.e., `BC_1` instead of `BC-1`. A 2D example for this mesh, which is read-in for an unstructured mesh file created with `gmsh`, is presented in `examples/p4est_2d_dgsem/elixir_euler_NACA6412airfoil_mach2.jl`. From bd060b6d140b2c4f8f717a89899e867593e141c1 Mon Sep 17 00:00:00 2001 From: Andrew Winters Date: Thu, 14 Mar 2024 15:49:16 +0100 Subject: [PATCH 41/58] Add functionality for `TimeSeries` callback on `UnstructuredMesh2D` (#1855) * add functionality for TimeSeries callback on UnstructuredMesh2D * Update src/callbacks_step/time_series_dg.jl Co-authored-by: Hendrik Ranocha * Apply suggestions from code review Co-authored-by: Daniel Doehring * add strategy to correctly locate a gauge point within a curvilinear element * add sanity check that the Newton solution is correct * run formatter * implement a more general approach that also works on curved element without issue * run formatter * forgot to format the examples * Apply suggestions from code review Co-authored-by: Hendrik Ranocha Co-authored-by: Daniel Doehring * working version of the element finding routine * run formatter * add new elixir for the time series callback * add additional test for the time series callback on an unstructured mesh * add appropriate test * update docstring * add comment about the barycenter computation * add simplifications and comments from code review * adjust variable name to avoid ugly formatting * Apply suggestions from code review Co-authored-by: Hendrik Ranocha * fix variable name * remove Experimental status from the TimeSeriesCallback * move new TimeSeries test into the unit testing * add output_directory creation if not already done. Necessary if this callback is used without the SaveSolution callback * formatting * update test mesh to have one straight-sided element to trigger inverse bi-linear interpolation * update test values * add news item * forgot to update all new test values on the new mesh * update tests and use coverage override to avoid redundancy --------- Co-authored-by: Hendrik Ranocha Co-authored-by: Daniel Doehring --- NEWS.md | 12 +- .../elixir_euler_time_series.jl | 115 ++++++++ src/callbacks_step/time_series.jl | 9 +- src/callbacks_step/time_series_dg.jl | 6 +- src/callbacks_step/time_series_dg2d.jl | 279 +++++++++++++++++- test/test_unit.jl | 1 + test/test_unstructured_2d.jl | 33 +++ 7 files changed, 443 insertions(+), 12 deletions(-) create mode 100644 examples/unstructured_2d_dgsem/elixir_euler_time_series.jl diff --git a/NEWS.md b/NEWS.md index d70504d8c8..5b08d51ab8 100644 --- a/NEWS.md +++ b/NEWS.md @@ -4,13 +4,19 @@ Trixi.jl follows the interpretation of [semantic versioning (semver)](https://ju used in the Julia ecosystem. Notable changes will be documented in this file for human readability. +## Changes in the v0.7 lifecycle + +#### Added +- Implementation of `TimeSeriesCallback` for curvilinear meshes on `UnstructuredMesh2D`. + + ## Changes when updating to v0.7 from v0.6.x #### Added #### Changed -- The default wave speed estimate used within `flux_hll` is now `min_max_speed_davis` +- The default wave speed estimate used within `flux_hll` is now `min_max_speed_davis` instead of `min_max_speed_naive`. #### Deprecated @@ -26,7 +32,7 @@ for human readability. #### Added - AMR for hyperbolic-parabolic equations on 3D `P4estMesh` - `flux_hllc` on non-cartesian meshes for `CompressibleEulerEquations{2,3}D` -- Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, +- Different boundary conditions for quad/hex meshes in Abaqus format, even if not generated by HOHQMesh, can now be digested by Trixi in 2D and 3D. - Subcell (positivity) limiting support for nonlinear variables in 2D for `TreeMesh` - Added Lighthill-Whitham-Richards (LWR) traffic model @@ -40,7 +46,7 @@ for human readability. #### Changed - The wave speed estimates for `flux_hll`, `FluxHLL()` are now consistent across equations. - In particular, the functions `min_max_speed_naive`, `min_max_speed_einfeldt` are now + In particular, the functions `min_max_speed_naive`, `min_max_speed_einfeldt` are now conceptually identical across equations. Users, who have been using `flux_hll` for MHD have now to use `flux_hlle` in order to use the Einfeldt wave speed estimate. diff --git a/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl b/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl new file mode 100644 index 0000000000..13233cdadb --- /dev/null +++ b/examples/unstructured_2d_dgsem/elixir_euler_time_series.jl @@ -0,0 +1,115 @@ +# An elixir that has an alternative convergence test that uses +# the `TimeSeriesCallback` on several gauge points. Many of the +# gauge points are selected as "stress tests" for the element +# identification, e.g., a gauge point that lies on an +# element corner of a curvilinear mesh + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Euler equations + +equations = CompressibleEulerEquations2D(1.4) + +# Modify the manufactured solution test to use `L = sqrt(2)` +# in the initial condition and source terms +function initial_condition_convergence_shifted(x, t, + equations::CompressibleEulerEquations2D) + c = 2 + A = 0.1 + L = sqrt(2) + f = 1 / L + ω = 2 * pi * f + ini = c + A * sin(ω * (x[1] + x[2] - t)) + + rho = ini + rho_v1 = ini + rho_v2 = ini + rho_e = ini^2 + + return SVector(rho, rho_v1, rho_v2, rho_e) +end + +@inline function source_terms_convergence_shifted(u, x, t, + equations::CompressibleEulerEquations2D) + # Same settings as in `initial_condition` + c = 2 + A = 0.1 + L = sqrt(2) + f = 1 / L + ω = 2 * pi * f + γ = equations.gamma + + x1, x2 = x + si, co = sincos(ω * (x1 + x2 - t)) + rho = c + A * si + rho_x = ω * A * co + # Note that d/dt rho = -d/dx rho = -d/dy rho. + + tmp = (2 * rho - 1) * (γ - 1) + + du1 = rho_x + du2 = rho_x * (1 + tmp) + du3 = du2 + du4 = 2 * rho_x * (rho + tmp) + + return SVector(du1, du2, du3, du4) +end + +initial_condition = initial_condition_convergence_shifted + +source_term = source_terms_convergence_shifted + +############################################################################### +# Get the DG approximation space + +solver = DGSEM(polydeg = 6, surface_flux = flux_lax_friedrichs) + +############################################################################### +# Get the curved quad mesh from a file (downloads the file if not available locally) + +mesh_file = Trixi.download("https://gist.githubusercontent.com/andrewwinters5000/b434e724e3972a9c4ee48d58c80cdcdb/raw/55c916cd8c0294a2d4a836e960dac7247b7c8ccf/mesh_multiple_flips.mesh", + joinpath(@__DIR__, "mesh_multiple_flips.mesh")) + +mesh = UnstructuredMesh2D(mesh_file, periodicity = true) + +############################################################################### +# create the semi discretization object + +semi = SemidiscretizationHyperbolic(mesh, equations, initial_condition, solver, + source_terms = source_term) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 1.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 1000 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +time_series = TimeSeriesCallback(semi, + [(0.75, 0.7), (1.23, 0.302), (0.8, 1.0), + (0.353553390593274, 0.353553390593274), + (0.505, 1.125), (1.37, 0.89), (0.349, 0.7153), + (0.883883476483184, 0.406586401289607), + (sqrt(2), sqrt(2))]; + interval = 10) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + time_series, + alive_callback) + +############################################################################### +# run the simulation + +sol = solve(ode, RDPK3SpFSAL49(); abstol = 1.0e-6, reltol = 1.0e-6, + ode_default_options()..., callback = callbacks); + +summary_callback() # print the timer summary diff --git a/src/callbacks_step/time_series.jl b/src/callbacks_step/time_series.jl index 7baa6b9c5a..f6d76f0fb1 100644 --- a/src/callbacks_step/time_series.jl +++ b/src/callbacks_step/time_series.jl @@ -23,8 +23,8 @@ After the last time step, the results are stored in an HDF5 file `filename` in d The real data type `RealT` and data type for solution variables `uEltype` default to the respective types used in the solver and the cache. -!!! warning "Experimental implementation" - This is an experimental feature and may change in future releases. +Currently this callback is only implemented for [`TreeMesh`](@ref) in 2D +and [`UnstructuredMesh2D`](@ref). """ mutable struct TimeSeriesCallback{RealT <: Real, uEltype <: Real, SolutionVariables, VariableNames, Cache} @@ -96,6 +96,11 @@ function TimeSeriesCallback(mesh, equations, solver, cache, point_coordinates; throw(ArgumentError("`point_coordinates` must be a matrix of size n_points × ndims")) end + # create the output folder if it does not exist already + if mpi_isroot() && !isdir(output_directory) + mkpath(output_directory) + end + # Transpose point_coordinates to our usual format [ndims, n_points] # Note: They are accepted in a different format to allow direct input from `readdlm` point_coordinates_ = permutedims(point_coordinates) diff --git a/src/callbacks_step/time_series_dg.jl b/src/callbacks_step/time_series_dg.jl index 1b63979d57..ae394afbbf 100644 --- a/src/callbacks_step/time_series_dg.jl +++ b/src/callbacks_step/time_series_dg.jl @@ -5,8 +5,10 @@ @muladd begin #! format: noindent -# Store time series file for a TreeMesh with a DG solver -function save_time_series_file(time_series_callback, mesh::TreeMesh, equations, dg::DG) +# Store time series file for a DG solver +function save_time_series_file(time_series_callback, + mesh::Union{TreeMesh, UnstructuredMesh2D}, + equations, dg::DG) @unpack (interval, solution_variables, variable_names, output_directory, filename, point_coordinates, point_data, time, step, time_series_cache) = time_series_callback diff --git a/src/callbacks_step/time_series_dg2d.jl b/src/callbacks_step/time_series_dg2d.jl index c15945d6e1..ad7c6851c8 100644 --- a/src/callbacks_step/time_series_dg2d.jl +++ b/src/callbacks_step/time_series_dg2d.jl @@ -6,7 +6,9 @@ #! format: noindent # Creates cache for time series callback -function create_cache_time_series(point_coordinates, mesh::TreeMesh{2}, dg, cache) +function create_cache_time_series(point_coordinates, + mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, + dg, cache) # Determine element ids for point coordinates element_ids = get_elements_by_coordinates(point_coordinates, mesh, dg, cache) @@ -68,6 +70,144 @@ function get_elements_by_coordinates!(element_ids, coordinates, mesh::TreeMesh, return element_ids end +# Elements on an `UnstructuredMesh2D` are possibly curved. Assume that each +# element is convex, i.e., all interior angles are less than 180 degrees. +# This routine computes the shortest distance from a given point to each element +# surface in the mesh. These distances then indicate possible candidate elements. +# From these candidates we (essentially) apply a ray casting strategy and identify +# the element in which the point lies by comparing the ray formed by the point to +# the nearest boundary to the rays cast by the candidate element barycenters to the +# boundary. If these rays point in the same direction, then we have identified the +# desired element location. +function get_elements_by_coordinates!(element_ids, coordinates, + mesh::UnstructuredMesh2D, + dg, cache) + if length(element_ids) != size(coordinates, 2) + throw(DimensionMismatch("storage length for element ids does not match the number of coordinates")) + end + + # Reset element ids - 0 indicates "not (yet) found" + element_ids .= 0 + + # Compute and save the barycentric coordinate on each element + bary_centers = zeros(eltype(mesh.corners), 2, mesh.n_elements) + calc_bary_centers!(bary_centers, dg, cache) + + # Iterate over coordinates + distances = zeros(eltype(mesh.corners), mesh.n_elements) + indices = zeros(Int, mesh.n_elements, 2) + for index in 1:length(element_ids) + # Grab the current point for which the element needs found + point = SVector(coordinates[1, index], + coordinates[2, index]) + + # Compute the minimum distance between the `point` and all the element surfaces + # saved into `distances`. The point in `node_coordinates` that gives said minimum + # distance on each element is saved in `indices` + distances, indices = calc_minimum_surface_distance(point, + cache.elements.node_coordinates, + dg, mesh) + + # Get the candidate elements where the `point` might live + candidates = findall(abs.(minimum(distances) .- distances) .< + 500 * eps(eltype(point))) + + # The minimal surface point is on a boundary so it plays no role which candidate + # we use to grab it. So just use the first one + surface_point = SVector(cache.elements.node_coordinates[1, + indices[candidates[1], + 1], + indices[candidates[1], + 2], + candidates[1]], + cache.elements.node_coordinates[2, + indices[candidates[1], + 1], + indices[candidates[1], + 2], + candidates[1]]) + + # Compute the vector pointing from the current `point` toward the surface + P = surface_point - point + + # If the vector `P` is the zero vector then this `point` is at an element corner or + # on a surface. In this case the choice of a candidate element is ambiguous and + # we just use the first candidate. However, solutions might differ at discontinuous + # interfaces such that this choice may influence the result. + if sum(P .* P) < 500 * eps(eltype(point)) + element_ids[index] = candidates[1] + continue + end + + # Loop through all the element candidates until we find a vector from the barycenter + # to the surface that points in the same direction as the current `point` vector. + # This then gives us the correct element. + for element in 1:length(candidates) + bary_center = SVector(bary_centers[1, candidates[element]], + bary_centers[2, candidates[element]]) + # Vector pointing from the barycenter toward the minimal `surface_point` + B = surface_point - bary_center + if sum(P .* B) > zero(eltype(bary_center)) + element_ids[index] = candidates[element] + break + end + end + end + + return element_ids +end + +# Use the available `node_coordinates` on each element to compute and save the barycenter. +# In essence, the barycenter is like an average where all the x and y node coordinates are +# summed and then we divide by the total number of degrees of freedom on the element, i.e., +# the value of `n^2` in two spatial dimensions. +@inline function calc_bary_centers!(bary_centers, dg, cache) + n = nnodes(dg) + @views for element in eachelement(dg, cache) + bary_centers[1, element] = sum(cache.elements.node_coordinates[1, :, :, + element]) / n^2 + bary_centers[2, element] = sum(cache.elements.node_coordinates[2, :, :, + element]) / n^2 + end + return nothing +end + +# Compute the shortest distance from a `point` to the surface of each element +# using the available `node_coordinates`. Also return the index pair of this +# minimum surface point location. We compute and store in `min_distance` +# the squared norm to avoid computing computationally more expensive square roots. +# Note! Could be made more accurate if the `node_coordinates` were super-sampled +# and reinterpolated onto a higher polynomial degree before this computation. +function calc_minimum_surface_distance(point, node_coordinates, + dg, mesh::UnstructuredMesh2D) + n = nnodes(dg) + min_distance2 = Inf * ones(eltype(mesh.corners), length(mesh)) + indices = zeros(Int, length(mesh), 2) + for k in 1:length(mesh) + # used to ensure that only boundary points are used + on_surface = MVector(false, false) + for j in 1:n + on_surface[2] = (j == 1) || (j == n) + for i in 1:n + on_surface[1] = (i == 1) || (i == n) + if !any(on_surface) + continue + end + node = SVector(node_coordinates[1, i, j, k], + node_coordinates[2, i, j, k]) + distance2 = sum(abs2, node - point) + if distance2 < min_distance2[k] + min_distance2[k] = distance2 + indices[k, 1] = i + indices[k, 2] = j + end + end + end + end + + return min_distance2, indices +end + function get_elements_by_coordinates(coordinates, mesh, dg, cache) element_ids = Vector{Int}(undef, size(coordinates, 2)) get_elements_by_coordinates!(element_ids, coordinates, mesh, dg, cache) @@ -106,8 +246,137 @@ function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, return interpolating_polynomials end -function calc_interpolating_polynomials(coordinates, element_ids, mesh::TreeMesh, dg, - cache) +function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, + element_ids, + mesh::UnstructuredMesh2D, dg::DGSEM, cache) + @unpack nodes = dg.basis + + wbary = barycentric_weights(nodes) + + # Helper array for a straight-sided quadrilateral element + corners = zeros(eltype(mesh.corners), 4, 2) + + for index in 1:length(element_ids) + # Construct point + x = SVector(ntuple(i -> coordinates[i, index], ndims(mesh))) + + # Convert to unit coordinates; procedure differs for straight-sided + # versus curvilinear elements + element = element_ids[index] + if !mesh.element_is_curved[element] + for j in 1:2, i in 1:4 + # Pull the (x,y) values of the element corners from the global corners array + corners[i, j] = mesh.corners[j, mesh.element_node_ids[i, element]] + end + # Compute coordinates in reference system + unit_coordinates = invert_bilinear_interpolation(mesh, x, corners) + + # Sanity check that the computed `unit_coordinates` indeed recover the desired point `x` + x_check = straight_side_quad_map(unit_coordinates[1], unit_coordinates[2], + corners) + if !isapprox(x[1], x_check[1]) || !isapprox(x[2], x_check[2]) + error("failed to compute computational coordinates for the time series point $(x), closet candidate was $(x_check)") + end + else # mesh.element_is_curved[element] + unit_coordinates = invert_transfinite_interpolation(mesh, x, + view(mesh.surface_curves, + :, element)) + + # Sanity check that the computed `unit_coordinates` indeed recover the desired point `x` + x_check = transfinite_quad_map(unit_coordinates[1], unit_coordinates[2], + view(mesh.surface_curves, :, element)) + if !isapprox(x[1], x_check[1]) || !isapprox(x[2], x_check[2]) + error("failed to compute computational coordinates for the time series point $(x), closet candidate was $(x_check)") + end + end + + # Calculate interpolating polynomial for each dimension, making use of tensor product structure + for d in 1:ndims(mesh) + interpolating_polynomials[:, d, index] .= lagrange_interpolating_polynomials(unit_coordinates[d], + nodes, + wbary) + end + end + + return interpolating_polynomials +end + +# Use a Newton iteration to determine the computational coordinates +# (xi, eta) of an (x,y) `point` that is given in physical coordinates +# by inverting the transformation. For straight-sided elements this +# amounts to inverting a bi-linear interpolation. For curved +# elements we invert the transfinite interpolation with linear blending. +# The residual function for the Newton iteration is +# r(xi, eta) = X(xi, eta) - point +# and the Jacobian entries are computed accordingly from either +# `straight_side_quad_map_metrics` or `transfinite_quad_map_metrics`. +# We exploit the 2x2 nature of the problem and directly compute the matrix +# inverse to make things faster. The implementations below are inspired by +# an answer on Stack Overflow (https://stackoverflow.com/a/18332009) where +# the author explicitly states that their code is released to the public domain. +@inline function invert_bilinear_interpolation(mesh::UnstructuredMesh2D, point, + element_corners) + # Initial guess for the point (center of the reference element) + xi = zero(eltype(point)) + eta = zero(eltype(point)) + for k in 1:5 # Newton's method should converge quickly + # Compute current x and y coordinate and the Jacobian matrix + # J = (X_xi, X_eta; Y_xi, Y_eta) + x, y = straight_side_quad_map(xi, eta, element_corners) + J11, J12, J21, J22 = straight_side_quad_map_metrics(xi, eta, element_corners) + + # Compute residuals for the Newton teration for the current (x, y) coordinate + r1 = x - point[1] + r2 = y - point[2] + + # Newton update that directly applies the inverse of the 2x2 Jacobian matrix + inv_detJ = inv(J11 * J22 - J12 * J21) + + # Update with explicitly inverted Jacobian + xi = xi - inv_detJ * (J22 * r1 - J12 * r2) + eta = eta - inv_detJ * (-J21 * r1 + J11 * r2) + + # Ensure updated point is in the reference element + xi = min(max(xi, -1), 1) + eta = min(max(eta, -1), 1) + end + + return SVector(xi, eta) +end + +@inline function invert_transfinite_interpolation(mesh::UnstructuredMesh2D, point, + surface_curves::AbstractVector{<:CurvedSurface}) + # Initial guess for the point (center of the reference element) + xi = zero(eltype(point)) + eta = zero(eltype(point)) + for k in 1:5 # Newton's method should converge quickly + # Compute current x and y coordinate and the Jacobian matrix + # J = (X_xi, X_eta; Y_xi, Y_eta) + x, y = transfinite_quad_map(xi, eta, surface_curves) + J11, J12, J21, J22 = transfinite_quad_map_metrics(xi, eta, surface_curves) + + # Compute residuals for the Newton teration for the current (x,y) coordinate + r1 = x - point[1] + r2 = y - point[2] + + # Newton update that directly applies the inverse of the 2x2 Jacobian matrix + inv_detJ = inv(J11 * J22 - J12 * J21) + + # Update with explicitly inverted Jacobian + xi = xi - inv_detJ * (J22 * r1 - J12 * r2) + eta = eta - inv_detJ * (-J21 * r1 + J11 * r2) + + # Ensure updated point is in the reference element + xi = min(max(xi, -1), 1) + eta = min(max(eta, -1), 1) + end + + return SVector(xi, eta) +end + +function calc_interpolating_polynomials(coordinates, element_ids, + mesh::Union{TreeMesh, UnstructuredMesh2D}, + dg, cache) interpolating_polynomials = Array{real(dg), 3}(undef, nnodes(dg), ndims(mesh), length(element_ids)) @@ -121,8 +390,8 @@ end # Record the solution variables at each given point function record_state_at_points!(point_data, u, solution_variables, n_solution_variables, - mesh::TreeMesh{2}, equations, dg::DG, - time_series_cache) + mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, + equations, dg::DG, time_series_cache) @unpack element_ids, interpolating_polynomials = time_series_cache old_length = length(first(point_data)) new_length = old_length + n_solution_variables diff --git a/test/test_unit.jl b/test/test_unit.jl index 1907a28171..03a78f6918 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -600,6 +600,7 @@ end end @timed_testset "TimeSeriesCallback" begin + # Test the 2D TreeMesh version of the callback and some warnings @test_nowarn_mod trixi_include(@__MODULE__, joinpath(examples_dir(), "tree_2d_dgsem", "elixir_acoustics_gaussian_source.jl"), diff --git a/test/test_unstructured_2d.jl b/test/test_unstructured_2d.jl index 8a62dcaec3..6814250dd4 100644 --- a/test/test_unstructured_2d.jl +++ b/test/test_unstructured_2d.jl @@ -198,6 +198,39 @@ end end end +@trixi_testset "elixir_euler_time_series.jl" begin + @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_euler_time_series.jl"), + l2=[ + 6.984024099236519e-5, + 6.289022520363763e-5, + 6.550951878107466e-5, + 0.00016222767700879948, + ], + linf=[ + 0.0005367823248620951, + 0.000671293180158461, + 0.0005656680962440319, + 0.0013910024779804075, + ], + tspan=(0.0, 0.2), + # With the default `maxiters = 1` in coverage tests, + # there would be no time series to check against. + coverage_override=(maxiters = 20,)) + # Extra test that the `TimeSeries` callback creates reasonable data + point_data_1 = time_series.affect!.point_data[1] + @test all(isapprox.(point_data_1[1:4], + [1.9546882708551676, 1.9547149531788077, + 1.9547142161310154, 3.821066781119142])) + # Ensure that we do not have excessive memory allocations + # (e.g., from type instabilities) + let + t = sol.t[end] + u_ode = sol.u[end] + du_ode = similar(u_ode) + @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 + end +end + @trixi_testset "elixir_acoustics_gauss_wall.jl" begin @test_trixi_include(joinpath(EXAMPLES_DIR, "elixir_acoustics_gauss_wall.jl"), l2=[0.029330394861252995, 0.029345079728907965, From 9323c2ae47300ce83976fcdaba8f5801e82e41a5 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 14 Mar 2024 15:54:43 +0100 Subject: [PATCH 42/58] set version to v0.7.3 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 6b44af4a3f..f2f8a10626 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.3-pre" +version = "0.7.3" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From a528083a1c41d9fd131fc972816881d3276f718e Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Thu, 14 Mar 2024 15:54:55 +0100 Subject: [PATCH 43/58] set development version to v0.7.4-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index f2f8a10626..97da4aec51 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.3" +version = "0.7.4-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From aa9ea20342e3d02445ec2dc53e380c405da3b683 Mon Sep 17 00:00:00 2001 From: Benedict <135045760+bgeihe@users.noreply.github.com> Date: Fri, 15 Mar 2024 10:20:43 +0100 Subject: [PATCH 44/58] reset n_boundaries_per_direction (#1870) --- src/solvers/dgsem_tree/containers_2d.jl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/solvers/dgsem_tree/containers_2d.jl b/src/solvers/dgsem_tree/containers_2d.jl index 4bfbddead9..7048739a22 100644 --- a/src/solvers/dgsem_tree/containers_2d.jl +++ b/src/solvers/dgsem_tree/containers_2d.jl @@ -421,6 +421,8 @@ end function init_boundaries!(boundaries, elements, mesh::TreeMesh2D) # Exit early if there are no boundaries to initialize if nboundaries(boundaries) == 0 + # In this case n_boundaries_per_direction still needs to be reset! + boundaries.n_boundaries_per_direction = SVector(0, 0, 0, 0) return nothing end From 38a9a5234cb2e5588fced11c7bac5d9441142014 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 15 Mar 2024 10:21:54 +0100 Subject: [PATCH 45/58] remove some minor allocations for threaded FDSBP (#1868) * do not inline calc_volume_integral! for FDSBP * avoid allocations in unstructured prolong2interfaces! * avoid allocations in unstructured prolong2boundaries! --- src/solvers/dgsem_unstructured/dg_2d.jl | 40 ++++++++++++---------- src/solvers/fdsbp_unstructured/fdsbp_2d.jl | 10 +++--- 2 files changed, 27 insertions(+), 23 deletions(-) diff --git a/src/solvers/dgsem_unstructured/dg_2d.jl b/src/solvers/dgsem_unstructured/dg_2d.jl index 988e995d6b..ce602e178d 100644 --- a/src/solvers/dgsem_unstructured/dg_2d.jl +++ b/src/solvers/dgsem_unstructured/dg_2d.jl @@ -95,49 +95,51 @@ function prolong2interfaces!(cache, u, mesh::UnstructuredMesh2D, equations, surface_integral, dg::DG) @unpack interfaces = cache + @unpack element_ids, element_side_ids = interfaces + interfaces_u = interfaces.u @threaded for interface in eachinterface(dg, cache) - primary_element = interfaces.element_ids[1, interface] - secondary_element = interfaces.element_ids[2, interface] + primary_element = element_ids[1, interface] + secondary_element = element_ids[2, interface] - primary_side = interfaces.element_side_ids[1, interface] - secondary_side = interfaces.element_side_ids[2, interface] + primary_side = element_side_ids[1, interface] + secondary_side = element_side_ids[2, interface] if primary_side == 1 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, i, 1, primary_element] + interfaces_u[1, v, i, interface] = u[v, i, 1, primary_element] end elseif primary_side == 2 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, nnodes(dg), i, primary_element] + interfaces_u[1, v, i, interface] = u[v, nnodes(dg), i, primary_element] end elseif primary_side == 3 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, i, nnodes(dg), primary_element] + interfaces_u[1, v, i, interface] = u[v, i, nnodes(dg), primary_element] end else # primary_side == 4 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[1, v, i, interface] = u[v, 1, i, primary_element] + interfaces_u[1, v, i, interface] = u[v, 1, i, primary_element] end end if secondary_side == 1 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, i, 1, secondary_element] + interfaces_u[2, v, i, interface] = u[v, i, 1, secondary_element] end elseif secondary_side == 2 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, nnodes(dg), i, + interfaces_u[2, v, i, interface] = u[v, nnodes(dg), i, secondary_element] end elseif secondary_side == 3 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, i, nnodes(dg), + interfaces_u[2, v, i, interface] = u[v, i, nnodes(dg), secondary_element] end else # secondary_side == 4 for i in eachnode(dg), v in eachvariable(equations) - interfaces.u[2, v, i, interface] = u[v, 1, i, secondary_element] + interfaces_u[2, v, i, interface] = u[v, 1, i, secondary_element] end end end @@ -278,26 +280,28 @@ function prolong2boundaries!(cache, u, mesh::UnstructuredMesh2D, equations, surface_integral, dg::DG) @unpack boundaries = cache + @unpack element_id, element_side_id = boundaries + boundaries_u = boundaries.u @threaded for boundary in eachboundary(dg, cache) - element = boundaries.element_id[boundary] - side = boundaries.element_side_id[boundary] + element = element_id[boundary] + side = element_side_id[boundary] if side == 1 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, l, 1, element] + boundaries_u[v, l, boundary] = u[v, l, 1, element] end elseif side == 2 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, nnodes(dg), l, element] + boundaries_u[v, l, boundary] = u[v, nnodes(dg), l, element] end elseif side == 3 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, l, nnodes(dg), element] + boundaries_u[v, l, boundary] = u[v, l, nnodes(dg), element] end else # side == 4 for l in eachnode(dg), v in eachvariable(equations) - boundaries.u[v, l, boundary] = u[v, 1, l, element] + boundaries_u[v, l, boundary] = u[v, 1, l, element] end end end diff --git a/src/solvers/fdsbp_unstructured/fdsbp_2d.jl b/src/solvers/fdsbp_unstructured/fdsbp_2d.jl index c35772cdf1..cbe11ac6ac 100644 --- a/src/solvers/fdsbp_unstructured/fdsbp_2d.jl +++ b/src/solvers/fdsbp_unstructured/fdsbp_2d.jl @@ -28,11 +28,11 @@ end # 2D volume integral contributions for `VolumeIntegralStrongForm` # OBS! This is the standard (not de-aliased) form of the volume integral. # So it is not provably stable for variable coefficients due to the the metric terms. -@inline function calc_volume_integral!(du, u, - mesh::UnstructuredMesh2D, - nonconservative_terms::False, equations, - volume_integral::VolumeIntegralStrongForm, - dg::FDSBP, cache) +function calc_volume_integral!(du, u, + mesh::UnstructuredMesh2D, + nonconservative_terms::False, equations, + volume_integral::VolumeIntegralStrongForm, + dg::FDSBP, cache) D = dg.basis # SBP derivative operator @unpack f_threaded = cache @unpack contravariant_vectors = cache.elements From 2dfde7faf3cc74f066d86148ae6c99ed9e58fa79 Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Fri, 15 Mar 2024 13:55:10 +0100 Subject: [PATCH 46/58] Docstrings for some methods in basis Lobatto-Legendre (#1874) * Docstrings for some methods in basis LL * double back slash --- .../src/files/scalar_linear_advection_1d.jl | 2 +- src/solvers/dgsem/basis_lobatto_legendre.jl | 77 +++++++++++++++++-- 2 files changed, 72 insertions(+), 7 deletions(-) diff --git a/docs/literate/src/files/scalar_linear_advection_1d.jl b/docs/literate/src/files/scalar_linear_advection_1d.jl index 77ba7b087c..9b48f29d34 100644 --- a/docs/literate/src/files/scalar_linear_advection_1d.jl +++ b/docs/literate/src/files/scalar_linear_advection_1d.jl @@ -115,7 +115,7 @@ integral = sum(nodes.^3 .* weights) # To approximate the solution, we need to get the polynomial coefficients $\{u_j^{Q_l}\}_{j=0}^N$ # for every element $Q_l$. -# After defining all nodes, we can implement the spatial coordinate $x$ and its initial value $u0$ +# After defining all nodes, we can implement the spatial coordinate $x$ and its initial value $u0 = u(t_0)$ # for every node. x = Matrix{Float64}(undef, length(nodes), n_elements) for element in 1:n_elements diff --git a/src/solvers/dgsem/basis_lobatto_legendre.jl b/src/solvers/dgsem/basis_lobatto_legendre.jl index 9e21b88dfa..cac1dba9c7 100644 --- a/src/solvers/dgsem/basis_lobatto_legendre.jl +++ b/src/solvers/dgsem/basis_lobatto_legendre.jl @@ -404,7 +404,8 @@ function calc_dsplit(nodes, weights) return dsplit end -# Calculate the polynomial derivative matrix D +# Calculate the polynomial derivative matrix D. +# This implements algorithm 37 "PolynomialDerivativeMatrix" from Kopriva's book. function polynomial_derivative_matrix(nodes) n_nodes = length(nodes) d = zeros(n_nodes, n_nodes) @@ -421,6 +422,7 @@ function polynomial_derivative_matrix(nodes) end # Calculate and interpolation matrix (Vandermonde matrix) between two given sets of nodes +# See algorithm 32 "PolynomialInterpolationMatrix" from Kopriva's book. function polynomial_interpolation_matrix(nodes_in, nodes_out, baryweights_in = barycentric_weights(nodes_in)) n_nodes_in = length(nodes_in) @@ -433,6 +435,7 @@ function polynomial_interpolation_matrix(nodes_in, nodes_out, return vandermonde end +# This implements algorithm 32 "PolynomialInterpolationMatrix" from Kopriva's book. function polynomial_interpolation_matrix!(vandermonde, nodes_in, nodes_out, baryweights_in) @@ -463,7 +466,19 @@ function polynomial_interpolation_matrix!(vandermonde, return vandermonde end -# Calculate the barycentric weights for a given node distribution. +""" + barycentric_weights(nodes) + +Calculate the barycentric weights for a given node distribution, i.e., +```math +w_j = \\frac{1}{ \\prod_{k \\neq j} \\left( x_j - x_k \\right ) } +``` + +For details, see (especially Section 3) +- Jean-Paul Berrut and Lloyd N. Trefethen (2004). + Barycentric Lagrange Interpolation. + [DOI:10.1137/S0036144502417715](https://doi.org/10.1137/S0036144502417715) +""" function barycentric_weights(nodes) n_nodes = length(nodes) weights = ones(n_nodes) @@ -494,12 +509,31 @@ function calc_lhat(x, nodes, weights) return lhat end -# Calculate Lagrange polynomials for a given node distribution. +""" + lagrange_interpolating_polynomials(x, nodes, wbary) + +Calculate Lagrange polynomials for a given node distribution with +associated barycentric weights `wbary` at a given point `x` on the +reference interval ``[-1, 1]``. + +This returns all ``l_j(x)``, i.e., the Lagrange polynomials for each node ``x_j``. +Thus, to obtain the interpolating polynomial ``p(x)`` at ``x``, one has to +multiply the Lagrange polynomials with the nodal values ``u_j`` and sum them up: +``p(x) = \\sum_{j=1}^{n} u_j l_j(x)``. + +For details, see e.g. Section 2 of +- Jean-Paul Berrut and Lloyd N. Trefethen (2004). + Barycentric Lagrange Interpolation. + [DOI:10.1137/S0036144502417715](https://doi.org/10.1137/S0036144502417715) +""" function lagrange_interpolating_polynomials(x, nodes, wbary) n_nodes = length(nodes) polynomials = zeros(n_nodes) for i in 1:n_nodes + # Avoid division by zero when `x` is close to node by using + # the Kronecker-delta property at nodes + # of the Lagrange interpolation polynomials. if isapprox(x, nodes[i], rtol = eps(x)) polynomials[i] = 1 return polynomials @@ -518,6 +552,17 @@ function lagrange_interpolating_polynomials(x, nodes, wbary) return polynomials end +""" + gauss_lobatto_nodes_weights(n_nodes::Integer) + +Computes nodes ``x_j`` and weights ``w_j`` for the (Legendre-)Gauss-Lobatto quadrature. +This implements algorithm 25 "GaussLobattoNodesAndWeights" from the book + +- David A. Kopriva, (2009). + Implementing spectral methods for partial differential equations: + Algorithms for scientists and engineers. + [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) +""" # From FLUXO (but really from blue book by Kopriva) function gauss_lobatto_nodes_weights(n_nodes::Integer) # From Kopriva's book @@ -585,7 +630,7 @@ function gauss_lobatto_nodes_weights(n_nodes::Integer) return nodes, weights end -# From FLUXO (but really from blue book by Kopriva) +# From FLUXO (but really from blue book by Kopriva, algorithm 24) function calc_q_and_l(N::Integer, x::Float64) L_Nm2 = 1.0 L_Nm1 = x @@ -609,7 +654,17 @@ function calc_q_and_l(N::Integer, x::Float64) end calc_q_and_l(N::Integer, x::Real) = calc_q_and_l(N, convert(Float64, x)) -# From FLUXO (but really from blue book by Kopriva) +""" + gauss_nodes_weights(n_nodes::Integer) + +Computes nodes ``x_j`` and weights ``w_j`` for the Gauss-Legendre quadrature. +This implements algorithm 23 "LegendreGaussNodesAndWeights" from the book + +- David A. Kopriva, (2009). + Implementing spectral methods for partial differential equations: + Algorithms for scientists and engineers. + [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) +""" function gauss_nodes_weights(n_nodes::Integer) # From Kopriva's book n_iterations = 10 @@ -666,7 +721,17 @@ function gauss_nodes_weights(n_nodes::Integer) end end -# From FLUXO (but really from blue book by Kopriva) +""" + legendre_polynomial_and_derivative(N::Int, x::Real) + +Computes the Legendre polynomial of degree `N` and its derivative at `x`. +This implements algorithm 22 "LegendrePolynomialAndDerivative" from the book + +- David A. Kopriva, (2009). + Implementing spectral methods for partial differential equations: + Algorithms for scientists and engineers. + [DOI:10.1007/978-90-481-2261-5](https://doi.org/10.1007/978-90-481-2261-5) +""" function legendre_polynomial_and_derivative(N::Int, x::Real) if N == 0 poly = 1.0 From 17ab101d4dc62382bfd83f449d3ca602e20c23ca Mon Sep 17 00:00:00 2001 From: Patrick Ersing <114223904+patrickersing@users.noreply.github.com> Date: Thu, 21 Mar 2024 16:46:03 +0100 Subject: [PATCH 47/58] Extend TimeSeriesCallback for TreeMesh1D/3D (#1873) * add TimeSeriesCallback support for TreeMesh1D * add TimeSeriesCallback support for TreeMesh3D * add testing * update tests * reorganize files structure for TimeSeriesCallback * update test values for julia 1.9 * update news.md * rename time_series_dg2d.jl, add comments from code review * apply formatter --- NEWS.md | 3 +- .../elixir_euler_source_terms.jl | 3 + .../elixir_euler_source_terms.jl | 5 + src/callbacks_step/time_series.jl | 6 +- src/callbacks_step/time_series_dg.jl | 37 ++++ src/callbacks_step/time_series_dg_tree.jl | 185 ++++++++++++++++++ ...dg2d.jl => time_series_dg_unstructured.jl} | 119 +---------- test/test_tree_1d_euler.jl | 17 +- test/test_tree_3d_euler.jl | 37 +++- 9 files changed, 288 insertions(+), 124 deletions(-) create mode 100644 src/callbacks_step/time_series_dg_tree.jl rename src/callbacks_step/{time_series_dg2d.jl => time_series_dg_unstructured.jl} (76%) diff --git a/NEWS.md b/NEWS.md index 5b08d51ab8..022252e61a 100644 --- a/NEWS.md +++ b/NEWS.md @@ -7,7 +7,8 @@ for human readability. ## Changes in the v0.7 lifecycle #### Added -- Implementation of `TimeSeriesCallback` for curvilinear meshes on `UnstructuredMesh2D`. +- Implementation of `TimeSeriesCallback` for curvilinear meshes on `UnstructuredMesh2D` and extension + to 1D and 3D on `TreeMesh`. ## Changes when updating to v0.7 from v0.6.x diff --git a/examples/tree_1d_dgsem/elixir_euler_source_terms.jl b/examples/tree_1d_dgsem/elixir_euler_source_terms.jl index 555910f69f..cb8a09057d 100644 --- a/examples/tree_1d_dgsem/elixir_euler_source_terms.jl +++ b/examples/tree_1d_dgsem/elixir_euler_source_terms.jl @@ -44,9 +44,12 @@ save_solution = SaveSolutionCallback(interval = 100, stepsize_callback = StepsizeCallback(cfl = 0.8) +time_series = TimeSeriesCallback(semi, [0.0, 0.33, 1.0], interval = 10) + callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, + time_series, stepsize_callback) ############################################################################### diff --git a/examples/tree_3d_dgsem/elixir_euler_source_terms.jl b/examples/tree_3d_dgsem/elixir_euler_source_terms.jl index f0246c3049..021fd09f31 100644 --- a/examples/tree_3d_dgsem/elixir_euler_source_terms.jl +++ b/examples/tree_3d_dgsem/elixir_euler_source_terms.jl @@ -41,9 +41,14 @@ save_solution = SaveSolutionCallback(interval = 100, stepsize_callback = StepsizeCallback(cfl = 0.6) +time_series = TimeSeriesCallback(semi, + [(0.0, 0.0, 0.0), (0.33, 0.33, 0.33), (1.0, 1.0, 1.0)], + interval = 10) + callbacks = CallbackSet(summary_callback, analysis_callback, alive_callback, save_solution, + time_series, stepsize_callback) ############################################################################### diff --git a/src/callbacks_step/time_series.jl b/src/callbacks_step/time_series.jl index f6d76f0fb1..ae18c85700 100644 --- a/src/callbacks_step/time_series.jl +++ b/src/callbacks_step/time_series.jl @@ -23,8 +23,7 @@ After the last time step, the results are stored in an HDF5 file `filename` in d The real data type `RealT` and data type for solution variables `uEltype` default to the respective types used in the solver and the cache. -Currently this callback is only implemented for [`TreeMesh`](@ref) in 2D -and [`UnstructuredMesh2D`](@ref). +Currently this callback is only implemented for [`TreeMesh`](@ref) and [`UnstructuredMesh2D`](@ref). """ mutable struct TimeSeriesCallback{RealT <: Real, uEltype <: Real, SolutionVariables, VariableNames, Cache} @@ -218,5 +217,6 @@ function (time_series_callback::TimeSeriesCallback)(integrator) end include("time_series_dg.jl") -include("time_series_dg2d.jl") +include("time_series_dg_tree.jl") +include("time_series_dg_unstructured.jl") end # @muladd diff --git a/src/callbacks_step/time_series_dg.jl b/src/callbacks_step/time_series_dg.jl index ae394afbbf..3781a10662 100644 --- a/src/callbacks_step/time_series_dg.jl +++ b/src/callbacks_step/time_series_dg.jl @@ -34,4 +34,41 @@ function save_time_series_file(time_series_callback, end end end + +# Creates cache for time series callback +function create_cache_time_series(point_coordinates, + mesh::Union{TreeMesh, UnstructuredMesh2D}, + dg, cache) + # Determine element ids for point coordinates + element_ids = get_elements_by_coordinates(point_coordinates, mesh, dg, cache) + + # Calculate & store Lagrange interpolation polynomials + interpolating_polynomials = calc_interpolating_polynomials(point_coordinates, + element_ids, mesh, + dg, cache) + + time_series_cache = (; element_ids, interpolating_polynomials) + + return time_series_cache +end + +function get_elements_by_coordinates(coordinates, mesh, dg, cache) + element_ids = Vector{Int}(undef, size(coordinates, 2)) + get_elements_by_coordinates!(element_ids, coordinates, mesh, dg, cache) + + return element_ids +end + +function calc_interpolating_polynomials(coordinates, element_ids, + mesh::Union{TreeMesh, UnstructuredMesh2D}, + dg, cache) + interpolating_polynomials = Array{real(dg), 3}(undef, + nnodes(dg), ndims(mesh), + length(element_ids)) + calc_interpolating_polynomials!(interpolating_polynomials, coordinates, element_ids, + mesh, dg, + cache) + + return interpolating_polynomials +end end # @muladd diff --git a/src/callbacks_step/time_series_dg_tree.jl b/src/callbacks_step/time_series_dg_tree.jl new file mode 100644 index 0000000000..37d4e6ea70 --- /dev/null +++ b/src/callbacks_step/time_series_dg_tree.jl @@ -0,0 +1,185 @@ +# By default, Julia/LLVM does not use fused multiply-add operations (FMAs). +# Since these FMAs can increase the performance of many numerical algorithms, +# we need to opt-in explicitly. +# See https://ranocha.de/blog/Optimizing_EC_Trixi for further details. +@muladd begin +#! format: noindent + +# Find element ids containing coordinates given as a matrix [ndims, npoints] +function get_elements_by_coordinates!(element_ids, coordinates, mesh::TreeMesh, dg, + cache) + if length(element_ids) != size(coordinates, 2) + throw(DimensionMismatch("storage length for element ids does not match the number of coordinates")) + end + + @unpack cell_ids = cache.elements + @unpack tree = mesh + + # Reset element ids - 0 indicates "not (yet) found" + element_ids .= 0 + found_elements = 0 + + # Iterate over all elements + for element in eachelement(dg, cache) + # Get cell id + cell_id = cell_ids[element] + + # Iterate over coordinates + for index in 1:length(element_ids) + # Skip coordinates for which an element has already been found + if element_ids[index] > 0 + continue + end + + # Construct point + x = SVector(ntuple(i -> coordinates[i, index], ndims(mesh))) + + # Skip if point is not in cell + if !is_point_in_cell(tree, x, cell_id) + continue + end + + # Otherwise point is in cell and thus in element + element_ids[index] = element + found_elements += 1 + end + + # Exit loop if all elements have already been found + if found_elements == length(element_ids) + break + end + end + + return element_ids +end + +# Calculate the interpolating polynomials to extract data at the given coordinates +# The coordinates are known to be located in the respective element in `element_ids` +function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, + element_ids, + mesh::TreeMesh, dg::DGSEM, cache) + @unpack tree = mesh + @unpack nodes = dg.basis + + wbary = barycentric_weights(nodes) + + for index in 1:length(element_ids) + # Construct point + x = SVector(ntuple(i -> coordinates[i, index], ndims(mesh))) + + # Convert to unit coordinates + cell_id = cache.elements.cell_ids[element_ids[index]] + cell_coordinates_ = cell_coordinates(tree, cell_id) + cell_length = length_at_cell(tree, cell_id) + unit_coordinates = (x .- cell_coordinates_) * 2 / cell_length + + # Calculate interpolating polynomial for each dimension, making use of tensor product structure + for d in 1:ndims(mesh) + interpolating_polynomials[:, d, index] .= lagrange_interpolating_polynomials(unit_coordinates[d], + nodes, + wbary) + end + end + + return interpolating_polynomials +end + +# Record the solution variables at each given point for the 1D case +function record_state_at_points!(point_data, u, solution_variables, + n_solution_variables, + mesh::TreeMesh{1}, equations, dg::DG, + time_series_cache) + @unpack element_ids, interpolating_polynomials = time_series_cache + old_length = length(first(point_data)) + new_length = old_length + n_solution_variables + + # Loop over all points/elements that should be recorded + for index in 1:length(element_ids) + # Extract data array and element id + data = point_data[index] + element_id = element_ids[index] + + # Make room for new data to be recorded + resize!(data, new_length) + data[(old_length + 1):new_length] .= zero(eltype(data)) + + # Loop over all nodes to compute their contribution to the interpolated values + for i in eachnode(dg) + u_node = solution_variables(get_node_vars(u, equations, dg, i, + element_id), equations) + + for v in 1:length(u_node) + data[old_length + v] += (u_node[v] * + interpolating_polynomials[i, 1, index]) + end + end + end +end + +# Record the solution variables at each given point for the 2D case +function record_state_at_points!(point_data, u, solution_variables, + n_solution_variables, + mesh::TreeMesh{2}, + equations, dg::DG, time_series_cache) + @unpack element_ids, interpolating_polynomials = time_series_cache + old_length = length(first(point_data)) + new_length = old_length + n_solution_variables + + # Loop over all points/elements that should be recorded + for index in 1:length(element_ids) + # Extract data array and element id + data = point_data[index] + element_id = element_ids[index] + + # Make room for new data to be recorded + resize!(data, new_length) + data[(old_length + 1):new_length] .= zero(eltype(data)) + + # Loop over all nodes to compute their contribution to the interpolated values + for j in eachnode(dg), i in eachnode(dg) + u_node = solution_variables(get_node_vars(u, equations, dg, i, j, + element_id), equations) + + for v in 1:length(u_node) + data[old_length + v] += (u_node[v] + * interpolating_polynomials[i, 1, index] + * interpolating_polynomials[j, 2, index]) + end + end + end +end + +# Record the solution variables at each given point for the 3D case +function record_state_at_points!(point_data, u, solution_variables, + n_solution_variables, + mesh::TreeMesh{3}, equations, dg::DG, + time_series_cache) + @unpack element_ids, interpolating_polynomials = time_series_cache + old_length = length(first(point_data)) + new_length = old_length + n_solution_variables + + # Loop over all points/elements that should be recorded + for index in 1:length(element_ids) + # Extract data array and element id + data = point_data[index] + element_id = element_ids[index] + + # Make room for new data to be recorded + resize!(data, new_length) + data[(old_length + 1):new_length] .= zero(eltype(data)) + + # Loop over all nodes to compute their contribution to the interpolated values + for k in eachnode(dg), j in eachnode(dg), i in eachnode(dg) + u_node = solution_variables(get_node_vars(u, equations, dg, i, j, k, + element_id), equations) + + for v in 1:length(u_node) + data[old_length + v] += (u_node[v] + * interpolating_polynomials[i, 1, index] + * interpolating_polynomials[j, 2, index] + * interpolating_polynomials[k, 3, index]) + end + end + end +end +end # @muladd diff --git a/src/callbacks_step/time_series_dg2d.jl b/src/callbacks_step/time_series_dg_unstructured.jl similarity index 76% rename from src/callbacks_step/time_series_dg2d.jl rename to src/callbacks_step/time_series_dg_unstructured.jl index ad7c6851c8..f6d1bb48f2 100644 --- a/src/callbacks_step/time_series_dg2d.jl +++ b/src/callbacks_step/time_series_dg_unstructured.jl @@ -5,71 +5,6 @@ @muladd begin #! format: noindent -# Creates cache for time series callback -function create_cache_time_series(point_coordinates, - mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, - dg, cache) - # Determine element ids for point coordinates - element_ids = get_elements_by_coordinates(point_coordinates, mesh, dg, cache) - - # Calculate & store Lagrange interpolation polynomials - interpolating_polynomials = calc_interpolating_polynomials(point_coordinates, - element_ids, mesh, - dg, cache) - - time_series_cache = (; element_ids, interpolating_polynomials) - - return time_series_cache -end - -# Find element ids containing coordinates given as a matrix [ndims, npoints] -function get_elements_by_coordinates!(element_ids, coordinates, mesh::TreeMesh, dg, - cache) - if length(element_ids) != size(coordinates, 2) - throw(DimensionMismatch("storage length for element ids does not match the number of coordinates")) - end - - @unpack cell_ids = cache.elements - @unpack tree = mesh - - # Reset element ids - 0 indicates "not (yet) found" - element_ids .= 0 - found_elements = 0 - - # Iterate over all elements - for element in eachelement(dg, cache) - # Get cell id - cell_id = cell_ids[element] - - # Iterate over coordinates - for index in 1:length(element_ids) - # Skip coordinates for which an element has already been found - if element_ids[index] > 0 - continue - end - - # Construct point - x = SVector(ntuple(i -> coordinates[i, index], ndims(mesh))) - - # Skip if point is not in cell - if !is_point_in_cell(tree, x, cell_id) - continue - end - - # Otherwise point is in cell and thus in element - element_ids[index] = element - found_elements += 1 - end - - # Exit loop if all elements have already been found - if found_elements == length(element_ids) - break - end - end - - return element_ids -end - # Elements on an `UnstructuredMesh2D` are possibly curved. Assume that each # element is convex, i.e., all interior angles are less than 180 degrees. # This routine computes the shortest distance from a given point to each element @@ -208,44 +143,6 @@ function calc_minimum_surface_distance(point, node_coordinates, return min_distance2, indices end -function get_elements_by_coordinates(coordinates, mesh, dg, cache) - element_ids = Vector{Int}(undef, size(coordinates, 2)) - get_elements_by_coordinates!(element_ids, coordinates, mesh, dg, cache) - - return element_ids -end - -# Calculate the interpolating polynomials to extract data at the given coordinates -# The coordinates are known to be located in the respective element in `element_ids` -function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, - element_ids, - mesh::TreeMesh, dg::DGSEM, cache) - @unpack tree = mesh - @unpack nodes = dg.basis - - wbary = barycentric_weights(nodes) - - for index in 1:length(element_ids) - # Construct point - x = SVector(ntuple(i -> coordinates[i, index], ndims(mesh))) - - # Convert to unit coordinates - cell_id = cache.elements.cell_ids[element_ids[index]] - cell_coordinates_ = cell_coordinates(tree, cell_id) - cell_length = length_at_cell(tree, cell_id) - unit_coordinates = (x .- cell_coordinates_) * 2 / cell_length - - # Calculate interpolating polynomial for each dimension, making use of tensor product structure - for d in 1:ndims(mesh) - interpolating_polynomials[:, d, index] .= lagrange_interpolating_polynomials(unit_coordinates[d], - nodes, - wbary) - end - end - - return interpolating_polynomials -end - function calc_interpolating_polynomials!(interpolating_polynomials, coordinates, element_ids, mesh::UnstructuredMesh2D, dg::DGSEM, cache) @@ -374,23 +271,9 @@ end return SVector(xi, eta) end -function calc_interpolating_polynomials(coordinates, element_ids, - mesh::Union{TreeMesh, UnstructuredMesh2D}, - dg, cache) - interpolating_polynomials = Array{real(dg), 3}(undef, - nnodes(dg), ndims(mesh), - length(element_ids)) - calc_interpolating_polynomials!(interpolating_polynomials, coordinates, element_ids, - mesh, dg, - cache) - - return interpolating_polynomials -end - -# Record the solution variables at each given point function record_state_at_points!(point_data, u, solution_variables, n_solution_variables, - mesh::Union{TreeMesh{2}, UnstructuredMesh2D}, + mesh::UnstructuredMesh2D, equations, dg::DG, time_series_cache) @unpack element_ids, interpolating_polynomials = time_series_cache old_length = length(first(point_data)) diff --git a/test/test_tree_1d_euler.jl b/test/test_tree_1d_euler.jl index f26500b411..784d123128 100644 --- a/test/test_tree_1d_euler.jl +++ b/test/test_tree_1d_euler.jl @@ -21,7 +21,10 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") 1.6205433861493646e-7, 1.465427772462391e-7, 5.372255111879554e-7, - ]) + ], + # With the default `maxiters = 1` in coverage tests, + # there would be no time series to check against. + coverage_override=(maxiters = 20,)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -30,6 +33,18 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_1d_dgsem") du_ode = similar(u_ode) @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end + # Extra test to make sure the "TimeSeriesCallback" made correct data. + # Extracts data at all points from the first step of the time series and compares it to the + # exact solution and an interpolated reference solution + point_data = [getindex(time_series.affect!.point_data[i], 1:3) for i in 1:3] + exact_data = [initial_condition_convergence_test(time_series.affect!.point_coordinates[i], + time_series.affect!.time[1], + equations) for i in 1:3] + ref_data = [[1.968279088772251, 1.9682791565395945, 3.874122958278797], + [2.0654816955822017, 2.0654817326611883, 4.26621471136323], + [2.0317209235018936, 2.0317209516429506, 4.127889808862571]] + @test point_data≈exact_data atol=1e-6 + @test point_data ≈ ref_data end @trixi_testset "elixir_euler_convergence_pure_fv.jl" begin diff --git a/test/test_tree_3d_euler.jl b/test/test_tree_3d_euler.jl index e9e2b82fec..47669dce2f 100644 --- a/test/test_tree_3d_euler.jl +++ b/test/test_tree_3d_euler.jl @@ -25,7 +25,10 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_dgsem") 0.032179231640894645, 0.032179231640895534, 0.0655408023333299, - ]) + ], + # With the default `maxiters = 1` in coverage tests, + # there would be no time series to check against. + coverage_override=(maxiters = 20,)) # Ensure that we do not have excessive memory allocations # (e.g., from type instabilities) let @@ -34,6 +37,38 @@ EXAMPLES_DIR = pkgdir(Trixi, "examples", "tree_3d_dgsem") du_ode = similar(u_ode) @test (@allocated Trixi.rhs!(du_ode, u_ode, semi, t)) < 1000 end + # Extra test to make sure the "TimeSeriesCallback" made correct data. + # Extracts data at all points from the first step of the time series and compares it to the + # exact solution and an interpolated reference solution + point_data = [getindex(time_series.affect!.point_data[i], 1:5) for i in 1:3] + exact_data = [initial_condition_convergence_test(time_series.affect!.point_coordinates[:, + i], + time_series.affect!.time[1], + equations) for i in 1:3] + ref_data = [ + [ + 1.951156832316166, + 1.952073047561595, + 1.9520730475615966, + 1.9520730475615953, + 3.814390510967551, + ], + [ + 2.0506452262144363, + 2.050727319703708, + 2.0507273197037073, + 2.0507273197037077, + 4.203653999433724, + ], + [ + 2.046982357537558, + 2.0463728824399654, + 2.0463728824399654, + 2.0463728824399645, + 4.190033459318115, + ]] + @test point_data≈exact_data atol=1e-1 + @test point_data ≈ ref_data end @trixi_testset "elixir_euler_convergence_pure_fv.jl" begin From 18aaae96035a995e840e4e262964e7b49fdd9325 Mon Sep 17 00:00:00 2001 From: Joshua Lampert <51029046+JoshuaLampert@users.noreply.github.com> Date: Thu, 21 Mar 2024 19:56:02 +0100 Subject: [PATCH 48/58] Add ExplicitImports.jl test (#1875) * add ExplicitImports.jl tests * format * use Trixi.at-batch in at-threaded and skipt at-batch in stale explicit imports test --------- Co-authored-by: Michael Schlottke-Lakemper --- Project.toml | 2 -- src/Trixi.jl | 13 ++++++------- src/auxiliary/auxiliary.jl | 2 ++ test/Project.toml | 2 ++ test/test_aqua.jl | 9 +++++++++ 5 files changed, 19 insertions(+), 9 deletions(-) diff --git a/Project.toml b/Project.toml index 97da4aec51..0e960a06a3 100644 --- a/Project.toml +++ b/Project.toml @@ -31,7 +31,6 @@ RecipesBase = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" Reexport = "189a3867-3050-52da-a836-e630ba90ab69" Requires = "ae029012-a4dd-5104-9daa-d747884805df" SciMLBase = "0bca4576-84f4-4d90-8ffe-ffa030f20462" -Setfield = "efcf1570-3423-57d1-acb7-fd33fddbac46" SimpleUnPack = "ce78b400-467f-4804-87d8-8f486da07d0a" SparseArrays = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" StartUpDG = "472ebc20-7c99-4d4b-9470-8fde4e9faa0f" @@ -84,7 +83,6 @@ RecipesBase = "1.1" Reexport = "1.0" Requires = "1.1" SciMLBase = "1.90, 2" -Setfield = "1" SimpleUnPack = "1.1" SparseArrays = "1" StartUpDG = "0.17.7" diff --git a/src/Trixi.jl b/src/Trixi.jl index da7359999c..9375c80d77 100644 --- a/src/Trixi.jl +++ b/src/Trixi.jl @@ -22,7 +22,7 @@ using LinearAlgebra: LinearAlgebra, Diagonal, diag, dot, mul!, norm, cross, norm UniformScaling, det using Printf: @printf, @sprintf, println using SparseArrays: AbstractSparseMatrix, AbstractSparseMatrixCSC, sparse, droptol!, - rowvals, nzrange, nonzeros, spzeros + rowvals, nzrange, nonzeros # import @reexport now to make it available for further imports/exports using Reexport: @reexport @@ -32,10 +32,10 @@ using Reexport: @reexport using MPI: MPI using SciMLBase: CallbackSet, DiscreteCallback, - ODEProblem, ODESolution, ODEFunction, + ODEProblem, ODESolution, SplitODEProblem import SciMLBase: get_du, get_tmp_cache, u_modified!, - AbstractODEIntegrator, init, step!, check_error, + init, step!, check_error, get_proposed_dt, set_proposed_dt!, terminate!, remake, add_tstop!, has_tstop, first_tstop @@ -57,7 +57,6 @@ using Polyester: Polyester, @batch # You know, the cheapest threads you can find using OffsetArrays: OffsetArray, OffsetVector using P4est using T8code -using Setfield: @set using RecipesBase: RecipesBase using Requires: @require using Static: Static, One, True, False @@ -66,7 +65,7 @@ using StaticArrays: StaticArrays, MVector, MArray, SMatrix, @SMatrix using StrideArrays: PtrArray, StrideArray, StaticInt @reexport using StructArrays: StructArrays, StructArray using TimerOutputs: TimerOutputs, @notimeit, TimerOutput, print_timer, reset_timer! -using Triangulate: Triangulate, TriangulateIO, triangulate +using Triangulate: Triangulate, TriangulateIO export TriangulateIO # for type parameter in DGMultiMesh using TriplotBase: TriplotBase using TriplotRecipes: DGTriPseudocolor @@ -84,9 +83,9 @@ const _PREFERENCE_LOG = @load_preference("log", "log_Trixi_NaN") # finite difference SBP operators using SummationByPartsOperators: AbstractDerivativeOperator, - AbstractNonperiodicDerivativeOperator, DerivativeOperator, + AbstractNonperiodicDerivativeOperator, AbstractPeriodicDerivativeOperator, - PeriodicDerivativeOperator, grid + grid import SummationByPartsOperators: integrate, semidiscretize, compute_coefficients, compute_coefficients!, left_boundary_weight, right_boundary_weight diff --git a/src/auxiliary/auxiliary.jl b/src/auxiliary/auxiliary.jl index 92da9a5ba8..972a748c56 100644 --- a/src/auxiliary/auxiliary.jl +++ b/src/auxiliary/auxiliary.jl @@ -242,6 +242,8 @@ macro threaded(expr) # !!! danger "Heisenbug" # Look at the comments for `wrap_array` when considering to change this macro. + # By using `Trixi.@batch` we allow users of Trixi.jl to use `@threaded` without having + # Polyester.jl in their namespace. return esc(quote Trixi.@batch $(expr) end) diff --git a/test/Project.toml b/test/Project.toml index 1a042dab44..1491d7a5c5 100644 --- a/test/Project.toml +++ b/test/Project.toml @@ -2,6 +2,7 @@ Aqua = "4c88cf16-eb10-579e-8560-4a9242c79595" CairoMakie = "13f3f980-e62b-5c42-98c6-ff1f3baf88f0" Downloads = "f43a241f-c20a-4ad4-852c-f6b1247861c6" +ExplicitImports = "7d51a73a-1435-4ff3-83d9-f097790105c7" FFMPEG = "c87230d0-a227-11e9-1b43-d7ebe4e7570a" ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" @@ -16,6 +17,7 @@ Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" Aqua = "0.8" CairoMakie = "0.10" Downloads = "1" +ExplicitImports = "1.0.1" FFMPEG = "0.4" ForwardDiff = "0.10.24" LinearAlgebra = "1" diff --git a/test/test_aqua.jl b/test/test_aqua.jl index 93457caba2..04c4a533d2 100644 --- a/test/test_aqua.jl +++ b/test/test_aqua.jl @@ -1,6 +1,7 @@ module TestAqua using Aqua +using ExplicitImports: check_no_implicit_imports, check_no_stale_explicit_imports using Test using Trixi @@ -13,6 +14,14 @@ include("test_trixi.jl") # in src/solvers/dgmulti/sbp.jl piracies = (treat_as_own = [Trixi.StartUpDG.RefElemData, Trixi.StartUpDG.MeshData],)) + @test isnothing(check_no_implicit_imports(Trixi, + skip = (Core, Base, Trixi.P4est, Trixi.T8code, + Trixi.EllipsisNotation))) + @test isnothing(check_no_stale_explicit_imports(Trixi, + ignore = (:derivative_operator, + :periodic_derivative_operator, + :upwind_operators, + Symbol("@batch")))) end end #module From f10969548615547e520623a9fb351a41bd952065 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 22 Mar 2024 12:40:51 +0100 Subject: [PATCH 49/58] Create SECURITY.md (#1884) * Create SECURITY.md * link to Julia SemVer --- SECURITY.md | 30 ++++++++++++++++++++++++++++++ 1 file changed, 30 insertions(+) create mode 100644 SECURITY.md diff --git a/SECURITY.md b/SECURITY.md new file mode 100644 index 0000000000..faa84a770b --- /dev/null +++ b/SECURITY.md @@ -0,0 +1,30 @@ +# Security Policy + +The Trixi.jl development team takes security issues seriously. We appreciate +all efforts to responsibly disclose any security issues and will make every +effort to acknowledge contributions. + + +## Supported Versions + +The current stable release following the interpretation of +[semantic versioning (SemVer)](https://julialang.github.io/Pkg.jl/dev/compatibility/#Version-specifier-format-1) +used in the Julia ecosystem is supported with security updates. + + +## Reporting a Vulnerability + +To report a security issue, please use the GitHub Security Advisory +["Report a Vulnerability"](https://github.com/trixi-framework/Trixi.jl/security/advisories/new) +tab. + +We will send a response indicating the next steps in handling your report. +After the initial reply to your report, we will keep you informed of the +progress towards a fix and full announcement, and may ask for additional +information or guidance. + +Please report security bugs in third-party modules directly to the person +or team maintaining the module. + +Public notifications of vulnerabilities will be shared in community channels +such as Slack. From f4cb1e0e88fe6fac456d55a619d79e4a4d8d265e Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 22 Mar 2024 12:56:20 +0100 Subject: [PATCH 50/58] add OpenSSF best practices badge (#1885) * add OpenSSF best practices badge * remove broken Genie badge --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 71370d3478..86a8514a5b 100644 --- a/README.md +++ b/README.md @@ -10,7 +10,8 @@ [![Aqua QA](https://raw.githubusercontent.com/JuliaTesting/Aqua.jl/master/badge.svg)](https://github.com/JuliaTesting/Aqua.jl) [![License: MIT](https://img.shields.io/badge/License-MIT-success.svg)](https://opensource.org/licenses/MIT) [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.3996439.svg)](https://doi.org/10.5281/zenodo.3996439) -[![Downloads](https://shields.io/endpoint?url=https://pkgs.genieframework.com/api/v1/badge/Trixi)](https://pkgs.genieframework.com?packages=Trixi) +[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/8695/badge)](https://www.bestpractices.dev/projects/8695) + From e31e25928d6e51f1e84d1437840346edf4e944a6 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 22 Mar 2024 17:56:06 +0100 Subject: [PATCH 51/58] Update contributing.md (#1883) * Update contributing.md * copy contributing from root directory to docs --- CONTRIBUTING.md | 9 ++++--- docs/.gitignore | 2 ++ docs/make.jl | 43 ++++++++++++++++++++++---------- docs/src/contributing.md | 54 ---------------------------------------- 4 files changed, 38 insertions(+), 70 deletions(-) delete mode 100644 docs/src/contributing.md diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index c3ad581062..c40c40bb18 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,9 +1,12 @@ # Contributing Trixi.jl is an open-source project and we are very happy to accept contributions -from the community. Please feel free to open issues or submit patches (preferably -as pull requests) any time. For planned larger contributions, it is often -beneficial to get in contact with one of the principal developers first (see +from the community. Please feel free to +[open issues](https://github.com/trixi-framework/Trixi.jl/issues/new/choose) +or submit patches (preferably as +[pull requests](https://github.com/trixi-framework/Trixi.jl/pulls)) +any time. For planned larger contributions, it is often beneficial to get +in contact with one of the principal developers first (see [AUTHORS.md](AUTHORS.md)). Trixi.jl and its contributions are licensed under the MIT license (see diff --git a/docs/.gitignore b/docs/.gitignore index 01f3ac8d79..c8a9e84224 100644 --- a/docs/.gitignore +++ b/docs/.gitignore @@ -1 +1,3 @@ src/code_of_conduct.md +src/contributing.md + diff --git a/docs/make.jl b/docs/make.jl index 8427c4049b..f752a7b0ee 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -28,19 +28,36 @@ DocMeta.setdocmeta!(Trixi2Vtk, :DocTestSetup, :(using Trixi2Vtk); recursive=true # as necessary # Based on: https://github.com/ranocha/SummationByPartsOperators.jl/blob/0206a74140d5c6eb9921ca5021cb7bf2da1a306d/docs/make.jl#L27-L41 open(joinpath(@__DIR__, "src", "code_of_conduct.md"), "w") do io - # Point to source license file - println(io, """ - ```@meta - EditURL = "https://github.com/trixi-framework/Trixi.jl/blob/main/CODE_OF_CONDUCT.md" - ``` - """) - # Write the modified contents - println(io, "# [Code of Conduct](@id code-of-conduct)") - println(io, "") - for line in eachline(joinpath(dirname(@__DIR__), "CODE_OF_CONDUCT.md")) - line = replace(line, "[AUTHORS.md](AUTHORS.md)" => "[Authors](@ref)") - println(io, "> ", line) - end + # Point to source license file + println(io, + """ + ```@meta + EditURL = "https://github.com/trixi-framework/Trixi.jl/blob/main/CODE_OF_CONDUCT.md" + ``` + """) + # Write the modified contents + println(io, "# [Code of Conduct](@id code-of-conduct)") + println(io, "") + for line in eachline(joinpath(dirname(@__DIR__), "CODE_OF_CONDUCT.md")) + line = replace(line, "[AUTHORS.md](AUTHORS.md)" => "[Authors](@ref)") + println(io, "> ", line) + end +end + +open(joinpath(@__DIR__, "src", "contributing.md"), "w") do io + # Point to source license file + println(io, + """ + ```@meta + EditURL = "https://github.com/trixi-framework/Trixi.jl/blob/main/CONTRIBUTING.md" + ``` + """) + # Write the modified contents + for line in eachline(joinpath(dirname(@__DIR__), "CONTRIBUTING.md")) + line = replace(line, "[LICENSE.md](LICENSE.md)" => "[License](@ref)") + line = replace(line, "[AUTHORS.md](AUTHORS.md)" => "[Authors](@ref)") + println(io, line) + end end # Create tutorials for the following files: diff --git a/docs/src/contributing.md b/docs/src/contributing.md deleted file mode 100644 index 5f99647721..0000000000 --- a/docs/src/contributing.md +++ /dev/null @@ -1,54 +0,0 @@ -# Contributing - -Trixi.jl is an open-source project and we are very happy to accept contributions -from the community. Please feel free to open issues or submit patches (preferably -as merge requests) any time. For planned larger contributions, it is often -beneficial to get in contact with one of the principal developers first (see -[Authors](@ref)). - -Trixi.jl and its contributions are licensed under the MIT license (see -[License](@ref)). As a contributor, you certify that all your -contributions are in conformance with the *Developer Certificate of Origin -(Version 1.1)*, which is reproduced below. - -## Developer Certificate of Origin (Version 1.1) -The following text was taken from -[https://developercertificate.org](https://developercertificate.org): - - Developer Certificate of Origin - Version 1.1 - - Copyright (C) 2004, 2006 The Linux Foundation and its contributors. - 1 Letterman Drive - Suite D4700 - San Francisco, CA, 94129 - - Everyone is permitted to copy and distribute verbatim copies of this - license document, but changing it is not allowed. - - - Developer's Certificate of Origin 1.1 - - By making a contribution to this project, I certify that: - - (a) The contribution was created in whole or in part by me and I - have the right to submit it under the open source license - indicated in the file; or - - (b) The contribution is based upon previous work that, to the best - of my knowledge, is covered under an appropriate open source - license and I have the right under that license to submit that - work with modifications, whether created in whole or in part - by me, under the same open source license (unless I am - permitted to submit under a different license), as indicated - in the file; or - - (c) The contribution was provided directly to me by some other - person who certified (a), (b) or (c) and I have not modified - it. - - (d) I understand and agree that this project and the contribution - are public and that a record of the contribution (including all - personal information I submit with it, including my sign-off) is - maintained indefinitely and may be redistributed consistent with - this project or the open source license(s) involved. From 9f330400a9b62473a5c695f1faedabb487c6c8b1 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 22 Mar 2024 17:57:31 +0100 Subject: [PATCH 52/58] set version to v0.7.4 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 0e960a06a3..ada2b40bf0 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.4-pre" +version = "0.7.4" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 01032aaa78f6140d7200f50d1011b3c7cd0c9c9f Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Fri, 22 Mar 2024 17:57:46 +0100 Subject: [PATCH 53/58] set development version to v0.7.5-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index ada2b40bf0..27df49ed4f 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.4" +version = "0.7.5-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From d86a0f47bebe0945b3a70143015adb0fe95e6174 Mon Sep 17 00:00:00 2001 From: Benjamin Bolm <74359358+bennibolm@users.noreply.github.com> Date: Tue, 26 Mar 2024 08:22:48 +0100 Subject: [PATCH 54/58] Use @batch reduction functionality for subcell bounds check (#1888) * Use @batch reduction functionality for bounds check * fmt * Adapt compat bound for Polyester.jl * Add comments --- Project.toml | 2 +- docs/src/performance.md | 11 ---- .../subcell_bounds_check_2d.jl | 66 ++++++++----------- src/solvers/dgsem_tree/subcell_limiters_2d.jl | 11 +--- 4 files changed, 32 insertions(+), 58 deletions(-) diff --git a/Project.toml b/Project.toml index 27df49ed4f..3db90c9928 100644 --- a/Project.toml +++ b/Project.toml @@ -75,7 +75,7 @@ MuladdMacro = "0.2.2" Octavian = "0.3.21" OffsetArrays = "1.12" P4est = "0.4.9" -Polyester = "0.7.5" +Polyester = "0.7.10" PrecompileTools = "1.1" Preferences = "1.3" Printf = "1" diff --git a/docs/src/performance.md b/docs/src/performance.md index 40970e58c5..9f81d3c3d8 100644 --- a/docs/src/performance.md +++ b/docs/src/performance.md @@ -282,14 +282,3 @@ requires. It can thus be seen as a proxy for "energy used" and, as an extension, timing result, you need to set the analysis interval such that the `AnalysisCallback` is invoked at least once during the course of the simulation and discard the first PID value. - -## Performance issues with multi-threaded reductions -[False sharing](https://en.wikipedia.org/wiki/False_sharing) is a known performance issue -for systems with distributed caches. It also occurred for the implementation of a thread -parallel bounds checking routine for the subcell IDP limiting -in [PR #1736](https://github.com/trixi-framework/Trixi.jl/pull/1736). -After some [testing and discussion](https://github.com/trixi-framework/Trixi.jl/pull/1736#discussion_r1423881895), -it turned out that initializing a vector of length `n * Threads.nthreads()` and only using every -n-th entry instead of a vector of length `Threads.nthreads()` fixes the problem. -Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)`. -Now, the bounds checking routine of the IDP limiting scales as hoped. diff --git a/src/callbacks_stage/subcell_bounds_check_2d.jl b/src/callbacks_stage/subcell_bounds_check_2d.jl index 19d73968c9..3a56ea71f6 100644 --- a/src/callbacks_stage/subcell_bounds_check_2d.jl +++ b/src/callbacks_stage/subcell_bounds_check_2d.jl @@ -12,35 +12,37 @@ (; variable_bounds) = limiter.cache.subcell_limiter_coefficients (; idp_bounds_delta_local, idp_bounds_delta_global) = limiter.cache - # Note: Accessing the threaded memory vector `idp_bounds_delta_local` with - # `deviation = idp_bounds_delta_local[key][Threads.threadid()]` causes critical performance - # issues due to False Sharing. - # Initializing a vector with n times the length and using every n-th entry fixes this - # problem and allows proper scaling: - # `deviation = idp_bounds_delta_local[key][n * Threads.threadid()]` - # Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)` - stride_size = div(128, sizeof(eltype(u))) # = n + # Note: In order to get the maximum deviation from the target bounds, this bounds check + # requires a reduction in every RK stage and for every enabled limiting option. To make + # this Thread-parallel we are using Polyester.jl's (at least v0.7.10) `@batch reduction` + # functionality. + # Although `@threaded` and `@batch` are currently used equivalently in Trixi.jl, we use + # `@batch` here to allow a possible redefinition of `@threaded` without creating errors here. + # See also https://github.com/trixi-framework/Trixi.jl/pull/1888#discussion_r1537785293. if local_minmax for v in limiter.local_minmax_variables_cons v_string = string(v) key_min = Symbol(v_string, "_min") key_max = Symbol(v_string, "_max") - deviation_min_threaded = idp_bounds_delta_local[key_min] - deviation_max_threaded = idp_bounds_delta_local[key_max] - @threaded for element in eachelement(solver, cache) - deviation_min = deviation_min_threaded[stride_size * Threads.threadid()] - deviation_max = deviation_max_threaded[stride_size * Threads.threadid()] + deviation_min = idp_bounds_delta_local[key_min] + deviation_max = idp_bounds_delta_local[key_max] + @batch reduction=((max, deviation_min), (max, deviation_max)) for element in eachelement(solver, + cache) for j in eachnode(solver), i in eachnode(solver) var = u[v, i, j, element] + # Note: We always save the absolute deviations >= 0 and therefore use the + # `max` operator for the lower and upper bound. The different directions of + # upper and lower bound are considered in their calculations with a + # different sign. deviation_min = max(deviation_min, variable_bounds[key_min][i, j, element] - var) deviation_max = max(deviation_max, var - variable_bounds[key_max][i, j, element]) end - deviation_min_threaded[stride_size * Threads.threadid()] = deviation_min - deviation_max_threaded[stride_size * Threads.threadid()] = deviation_max end + idp_bounds_delta_local[key_min] = deviation_min + idp_bounds_delta_local[key_max] = deviation_max end end if positivity @@ -49,40 +51,35 @@ continue end key = Symbol(string(v), "_min") - deviation_threaded = idp_bounds_delta_local[key] - @threaded for element in eachelement(solver, cache) - deviation = deviation_threaded[stride_size * Threads.threadid()] + deviation = idp_bounds_delta_local[key] + @batch reduction=(max, deviation) for element in eachelement(solver, cache) for j in eachnode(solver), i in eachnode(solver) var = u[v, i, j, element] deviation = max(deviation, variable_bounds[key][i, j, element] - var) end - deviation_threaded[stride_size * Threads.threadid()] = deviation end + idp_bounds_delta_local[key] = deviation end for variable in limiter.positivity_variables_nonlinear key = Symbol(string(variable), "_min") - deviation_threaded = idp_bounds_delta_local[key] - @threaded for element in eachelement(solver, cache) - deviation = deviation_threaded[stride_size * Threads.threadid()] + deviation = idp_bounds_delta_local[key] + @batch reduction=(max, deviation) for element in eachelement(solver, cache) for j in eachnode(solver), i in eachnode(solver) var = variable(get_node_vars(u, equations, solver, i, j, element), equations) deviation = max(deviation, variable_bounds[key][i, j, element] - var) end - deviation_threaded[stride_size * Threads.threadid()] = deviation end + idp_bounds_delta_local[key] = deviation end end for (key, _) in idp_bounds_delta_local - # Calculate maximum deviations of all threads - idp_bounds_delta_local[key][stride_size] = maximum(idp_bounds_delta_local[key][stride_size * i] - for i in 1:Threads.nthreads()) # Update global maximum deviations idp_bounds_delta_global[key] = max(idp_bounds_delta_global[key], - idp_bounds_delta_local[key][stride_size]) + idp_bounds_delta_local[key]) end if save_errors @@ -92,10 +89,8 @@ if local_minmax for v in limiter.local_minmax_variables_cons v_string = string(v) - print(f, ", ", - idp_bounds_delta_local[Symbol(v_string, "_min")][stride_size], - ", ", - idp_bounds_delta_local[Symbol(v_string, "_max")][stride_size]) + print(f, ", ", idp_bounds_delta_local[Symbol(v_string, "_min")], + ", ", idp_bounds_delta_local[Symbol(v_string, "_max")]) end end if positivity @@ -103,21 +98,18 @@ if v in limiter.local_minmax_variables_cons continue end - print(f, ", ", - idp_bounds_delta_local[Symbol(string(v), "_min")][stride_size]) + print(f, ", ", idp_bounds_delta_local[Symbol(string(v), "_min")]) end for variable in limiter.positivity_variables_nonlinear print(f, ", ", - idp_bounds_delta_local[Symbol(string(variable), "_min")][stride_size]) + idp_bounds_delta_local[Symbol(string(variable), "_min")]) end end println(f) end # Reset local maximum deviations for (key, _) in idp_bounds_delta_local - for i in 1:Threads.nthreads() - idp_bounds_delta_local[key][stride_size * i] = zero(eltype(idp_bounds_delta_local[key][stride_size])) - end + idp_bounds_delta_local[key] = zero(eltype(idp_bounds_delta_local[key])) end end diff --git a/src/solvers/dgsem_tree/subcell_limiters_2d.jl b/src/solvers/dgsem_tree/subcell_limiters_2d.jl index 3f7954c895..9343cee439 100644 --- a/src/solvers/dgsem_tree/subcell_limiters_2d.jl +++ b/src/solvers/dgsem_tree/subcell_limiters_2d.jl @@ -18,18 +18,11 @@ function create_cache(limiter::Type{SubcellLimiterIDP}, equations::AbstractEquat # Memory for bounds checking routine with `BoundsCheckCallback`. # Local variable contains the maximum deviation since the last export. - # Using a threaded vector to parallelize bounds check. - idp_bounds_delta_local = Dict{Symbol, Vector{real(basis)}}() + idp_bounds_delta_local = Dict{Symbol, real(basis)}() # Global variable contains the total maximum deviation. idp_bounds_delta_global = Dict{Symbol, real(basis)}() - # Note: False sharing causes critical performance issues on multiple threads when using a vector - # of length `Threads.nthreads()`. Initializing a vector of length `n * Threads.nthreads()` - # and then only using every n-th entry, fixes the problem and allows proper scaling. - # Since there are no processors with caches over 128B, we use `n = 128B / size(uEltype)` - stride_size = div(128, sizeof(eltype(basis.nodes))) # = n for key in bound_keys - idp_bounds_delta_local[key] = [zero(real(basis)) - for _ in 1:(stride_size * Threads.nthreads())] + idp_bounds_delta_local[key] = zero(real(basis)) idp_bounds_delta_global[key] = zero(real(basis)) end From cd6fdaaa1443376ad97bc408cafc27d82a647175 Mon Sep 17 00:00:00 2001 From: Benedict <135045760+bgeihe@users.noreply.github.com> Date: Tue, 26 Mar 2024 08:23:15 +0100 Subject: [PATCH 55/58] abort initial AMR loop after 10 iterations (#1890) Co-authored-by: Hendrik Ranocha --- src/callbacks_step/amr.jl | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/src/callbacks_step/amr.jl b/src/callbacks_step/amr.jl index 6f57d6647f..1ab65a3553 100644 --- a/src/callbacks_step/amr.jl +++ b/src/callbacks_step/amr.jl @@ -138,11 +138,18 @@ function initialize!(cb::DiscreteCallback{Condition, Affect!}, u, t, # iterate until mesh does not change anymore has_changed = amr_callback(integrator, only_refine = amr_callback.adapt_initial_condition_only_refine) + iterations = 1 while has_changed compute_coefficients!(integrator.u, t, semi) u_modified!(integrator, true) has_changed = amr_callback(integrator, only_refine = amr_callback.adapt_initial_condition_only_refine) + iterations = iterations + 1 + if iterations > 10 + @warn "AMR for initial condition did not settle within 10 iterations!\n" * + "Consider adjusting thresholds or setting `adapt_initial_condition_only_refine`." + break + end end end From 27026de6e2be4f0f6655efb68c082515f1383e84 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Tue, 26 Mar 2024 10:07:11 +0100 Subject: [PATCH 56/58] set version to v0.7.5 --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 3db90c9928..9ca3086306 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.5-pre" +version = "0.7.5" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 909abb4473c95042a87e93e46d443dc381c2b523 Mon Sep 17 00:00:00 2001 From: Hendrik Ranocha Date: Tue, 26 Mar 2024 10:07:23 +0100 Subject: [PATCH 57/58] set development version to v0.7.6-pre --- Project.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Project.toml b/Project.toml index 9ca3086306..6ff7f29686 100644 --- a/Project.toml +++ b/Project.toml @@ -1,7 +1,7 @@ name = "Trixi" uuid = "a7f1ee26-1774-49b1-8366-f1abc58fbfcb" authors = ["Michael Schlottke-Lakemper ", "Gregor Gassner ", "Hendrik Ranocha ", "Andrew R. Winters ", "Jesse Chan "] -version = "0.7.5" +version = "0.7.6-pre" [deps] CodeTracking = "da1fd8a2-8d9e-5ec2-8556-3022fb5608a2" From 73da92c446c6f60fc09ad1cf5e579bbf5eccb34d Mon Sep 17 00:00:00 2001 From: Daniel Doehring Date: Wed, 27 Mar 2024 15:55:37 +0100 Subject: [PATCH 58/58] Sutherlands Law for temperature dependent viscosity (#1808) * sample 2D * bug fixes * typo * sutherlands 1d * sutherlands 3d * fmt * main merge * test var mu * variable mu * fmt * example using sutherland * comment * example * reset toml * Shorten * Unit test * fmt * Update examples/tree_2d_dgsem/elixir_navierstokes_taylor_green_vortex_sutherland.jl * remove todo * Apply suggestions from code review Co-authored-by: Andrew Winters * test vals * non-functionalized mu * comments & dosctrings * docstrings * fix tests * avoid if * docstring and comments * Update src/equations/compressible_navier_stokes.jl Co-authored-by: Michael Schlottke-Lakemper * fmt --------- Co-authored-by: Andrew Winters Co-authored-by: Michael Schlottke-Lakemper Co-authored-by: Jesse Chan <1156048+jlchan@users.noreply.github.com> --- .../elixir_navierstokes_lid_driven_cavity.jl | 5 +- ...elixir_navierstokes_taylor_green_vortex.jl | 5 +- .../elixir_navierstokes_lid_driven_cavity.jl | 5 +- ...ixir_navierstokes_lid_driven_cavity_amr.jl | 5 +- .../elixir_navierstokes_blast_wave_amr.jl | 5 +- ...elixir_navierstokes_taylor_green_vortex.jl | 5 +- ...ir_navierstokes_taylor_green_vortex_amr.jl | 5 +- ...lixir_navierstokes_convergence_periodic.jl | 1 - .../elixir_navierstokes_lid_driven_cavity.jl | 5 +- .../elixir_navierstokes_shearlayer_amr.jl | 5 +- ...elixir_navierstokes_taylor_green_vortex.jl | 5 +- ...erstokes_taylor_green_vortex_sutherland.jl | 94 +++++++++++++++++++ ...elixir_navierstokes_taylor_green_vortex.jl | 5 +- src/equations/compressible_navier_stokes.jl | 14 +++ .../compressible_navier_stokes_1d.jl | 26 +++-- .../compressible_navier_stokes_2d.jl | 26 +++-- .../compressible_navier_stokes_3d.jl | 26 +++-- test/test_parabolic_2d.jl | 34 +++++-- test/test_unit.jl | 39 ++++++++ 19 files changed, 243 insertions(+), 72 deletions(-) create mode 100644 examples/tree_2d_dgsem/elixir_navierstokes_taylor_green_vortex_sutherland.jl diff --git a/examples/dgmulti_2d/elixir_navierstokes_lid_driven_cavity.jl b/examples/dgmulti_2d/elixir_navierstokes_lid_driven_cavity.jl index 7c55cbf0cc..a612dd0e0d 100644 --- a/examples/dgmulti_2d/elixir_navierstokes_lid_driven_cavity.jl +++ b/examples/dgmulti_2d/elixir_navierstokes_lid_driven_cavity.jl @@ -4,12 +4,11 @@ using Trixi ############################################################################### # semidiscretization of the ideal compressible Navier-Stokes equations -# TODO: parabolic; unify names of these accessor functions prandtl_number() = 0.72 -mu() = 0.001 +mu = 0.001 equations = CompressibleEulerEquations2D(1.4) -equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu(), +equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu, Prandtl = prandtl_number()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux diff --git a/examples/dgmulti_3d/elixir_navierstokes_taylor_green_vortex.jl b/examples/dgmulti_3d/elixir_navierstokes_taylor_green_vortex.jl index dedd8267a3..9ae90ac47b 100644 --- a/examples/dgmulti_3d/elixir_navierstokes_taylor_green_vortex.jl +++ b/examples/dgmulti_3d/elixir_navierstokes_taylor_green_vortex.jl @@ -5,12 +5,11 @@ using Trixi ############################################################################### # semidiscretization of the compressible Navier-Stokes equations -# TODO: parabolic; unify names of these accessor functions prandtl_number() = 0.72 -mu() = 6.25e-4 # equivalent to Re = 1600 +mu = 6.25e-4 # equivalent to Re = 1600 equations = CompressibleEulerEquations3D(1.4) -equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu(), +equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu, Prandtl = prandtl_number()) """ diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl index bc28ae6ffb..728736fe49 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl @@ -4,12 +4,11 @@ using Trixi ############################################################################### # semidiscretization of the ideal compressible Navier-Stokes equations -# TODO: parabolic; unify names of these accessor functions prandtl_number() = 0.72 -mu() = 0.001 +mu = 0.001 equations = CompressibleEulerEquations2D(1.4) -equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu(), +equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu, Prandtl = prandtl_number()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux diff --git a/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity_amr.jl b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity_amr.jl index 898366969a..d9eaf4fdb7 100644 --- a/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity_amr.jl +++ b/examples/p4est_2d_dgsem/elixir_navierstokes_lid_driven_cavity_amr.jl @@ -4,12 +4,11 @@ using Trixi ############################################################################### # semidiscretization of the ideal compressible Navier-Stokes equations -# TODO: parabolic; unify names of these accessor functions prandtl_number() = 0.72 -mu() = 0.001 +mu = 0.001 equations = CompressibleEulerEquations2D(1.4) -equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu(), +equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu, Prandtl = prandtl_number()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl index 5df89fbcdf..d556d0ab70 100644 --- a/examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl +++ b/examples/p4est_3d_dgsem/elixir_navierstokes_blast_wave_amr.jl @@ -5,12 +5,11 @@ using Trixi ############################################################################### # semidiscretization of the compressible Navier-Stokes equations -# TODO: parabolic; unify names of these accessor functions prandtl_number() = 0.72 -mu() = 6.25e-4 # equivalent to Re = 1600 +mu = 6.25e-4 # equivalent to Re = 1600 equations = CompressibleEulerEquations3D(1.4) -equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu(), +equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu, Prandtl = prandtl_number()) function initial_condition_3d_blast_wave(x, t, equations::CompressibleEulerEquations3D) diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex.jl index d785013f5a..9c90e4d321 100644 --- a/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex.jl +++ b/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex.jl @@ -5,12 +5,11 @@ using Trixi ############################################################################### # semidiscretization of the compressible Navier-Stokes equations -# TODO: parabolic; unify names of these accessor functions prandtl_number() = 0.72 -mu() = 6.25e-4 # equivalent to Re = 1600 +mu = 6.25e-4 # equivalent to Re = 1600 equations = CompressibleEulerEquations3D(1.4) -equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu(), +equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu, Prandtl = prandtl_number()) """ diff --git a/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl b/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl index c15227a1c2..2741f0df17 100644 --- a/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl +++ b/examples/p4est_3d_dgsem/elixir_navierstokes_taylor_green_vortex_amr.jl @@ -5,12 +5,11 @@ using Trixi ############################################################################### # semidiscretization of the compressible Navier-Stokes equations -# TODO: parabolic; unify names of these accessor functions prandtl_number() = 0.72 -mu() = 6.25e-4 # equivalent to Re = 1600 +mu = 6.25e-4 # equivalent to Re = 1600 equations = CompressibleEulerEquations3D(1.4) -equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu(), +equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu, Prandtl = prandtl_number()) """ diff --git a/examples/tree_1d_dgsem/elixir_navierstokes_convergence_periodic.jl b/examples/tree_1d_dgsem/elixir_navierstokes_convergence_periodic.jl index 33ad0d5271..eab0840f38 100644 --- a/examples/tree_1d_dgsem/elixir_navierstokes_convergence_periodic.jl +++ b/examples/tree_1d_dgsem/elixir_navierstokes_convergence_periodic.jl @@ -5,7 +5,6 @@ using Trixi ############################################################################### # semidiscretization of the compressible Navier-Stokes equations -# TODO: parabolic; unify names of these accessor functions prandtl_number() = 0.72 mu() = 6.25e-4 # equivalent to Re = 1600 diff --git a/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl b/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl index 70d76fc907..b8e20e27f6 100644 --- a/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl +++ b/examples/tree_2d_dgsem/elixir_navierstokes_lid_driven_cavity.jl @@ -4,12 +4,11 @@ using Trixi ############################################################################### # semidiscretization of the ideal compressible Navier-Stokes equations -# TODO: parabolic; unify names of these accessor functions prandtl_number() = 0.72 -mu() = 0.001 +mu = 0.001 equations = CompressibleEulerEquations2D(1.4) -equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu(), +equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu, Prandtl = prandtl_number()) # Create DG solver with polynomial degree = 3 and (local) Lax-Friedrichs/Rusanov flux as surface flux diff --git a/examples/tree_2d_dgsem/elixir_navierstokes_shearlayer_amr.jl b/examples/tree_2d_dgsem/elixir_navierstokes_shearlayer_amr.jl index 4d92ea261e..a7492bafb4 100644 --- a/examples/tree_2d_dgsem/elixir_navierstokes_shearlayer_amr.jl +++ b/examples/tree_2d_dgsem/elixir_navierstokes_shearlayer_amr.jl @@ -5,12 +5,11 @@ using Trixi ############################################################################### # semidiscretization of the compressible Navier-Stokes equations -# TODO: parabolic; unify names of these accessor functions prandtl_number() = 0.72 -mu() = 1.0 / 3.0 * 10^(-5) # equivalent to Re = 30,000 +mu = 1.0 / 3.0 * 10^(-4) # equivalent to Re = 30,000 equations = CompressibleEulerEquations2D(1.4) -equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu(), +equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu, Prandtl = prandtl_number()) """ diff --git a/examples/tree_2d_dgsem/elixir_navierstokes_taylor_green_vortex.jl b/examples/tree_2d_dgsem/elixir_navierstokes_taylor_green_vortex.jl index a3e38bf6d9..c6e5f0bc40 100644 --- a/examples/tree_2d_dgsem/elixir_navierstokes_taylor_green_vortex.jl +++ b/examples/tree_2d_dgsem/elixir_navierstokes_taylor_green_vortex.jl @@ -5,12 +5,11 @@ using Trixi ############################################################################### # semidiscretization of the compressible Navier-Stokes equations -# TODO: parabolic; unify names of these accessor functions prandtl_number() = 0.72 -mu() = 6.25e-4 # equivalent to Re = 1600 +mu = 6.25e-4 # equivalent to Re = 1600 equations = CompressibleEulerEquations2D(1.4) -equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu(), +equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu, Prandtl = prandtl_number()) """ diff --git a/examples/tree_2d_dgsem/elixir_navierstokes_taylor_green_vortex_sutherland.jl b/examples/tree_2d_dgsem/elixir_navierstokes_taylor_green_vortex_sutherland.jl new file mode 100644 index 0000000000..9598ae184c --- /dev/null +++ b/examples/tree_2d_dgsem/elixir_navierstokes_taylor_green_vortex_sutherland.jl @@ -0,0 +1,94 @@ + +using OrdinaryDiffEq +using Trixi + +############################################################################### +# semidiscretization of the compressible Navier-Stokes equations + +prandtl_number() = 0.72 + +# Use Sutherland's law for a temperature-dependent viscosity. +# For details, see e.g. +# Frank M. White: Viscous Fluid Flow, 2nd Edition. +# 1991, McGraw-Hill, ISBN, 0-07-069712-4 +# Pages 28 and 29. +@inline function mu(u, equations) + T_ref = 291.15 + + R_specific_air = 287.052874 + T = R_specific_air * Trixi.temperature(u, equations) + + C_air = 120.0 + mu_ref_air = 1.827e-5 + + return mu_ref_air * (T_ref + C_air) / (T + C_air) * (T / T_ref)^1.5 +end + +equations = CompressibleEulerEquations2D(1.4) +equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu, + Prandtl = prandtl_number()) + +""" + initial_condition_taylor_green_vortex(x, t, equations::CompressibleEulerEquations2D) + +The classical viscous Taylor-Green vortex in 2D. +This forms the basis behind the 3D case found for instance in + - Jonathan R. Bull and Antony Jameson + Simulation of the Compressible Taylor Green Vortex using High-Order Flux Reconstruction Schemes + [DOI: 10.2514/6.2014-3210](https://doi.org/10.2514/6.2014-3210) +""" +function initial_condition_taylor_green_vortex(x, t, + equations::CompressibleEulerEquations2D) + A = 1.0 # magnitude of speed + Ms = 0.1 # maximum Mach number + + rho = 1.0 + v1 = A * sin(x[1]) * cos(x[2]) + v2 = -A * cos(x[1]) * sin(x[2]) + p = (A / Ms)^2 * rho / equations.gamma # scaling to get Ms + p = p + 1.0 / 4.0 * A^2 * rho * (cos(2 * x[1]) + cos(2 * x[2])) + + return prim2cons(SVector(rho, v1, v2, p), equations) +end +initial_condition = initial_condition_taylor_green_vortex + +volume_flux = flux_ranocha +solver = DGSEM(polydeg = 3, surface_flux = flux_hllc, + volume_integral = VolumeIntegralFluxDifferencing(volume_flux)) + +coordinates_min = (-1.0, -1.0) .* pi +coordinates_max = (1.0, 1.0) .* pi +mesh = TreeMesh(coordinates_min, coordinates_max, + initial_refinement_level = 4, + n_cells_max = 100_000) + +semi = SemidiscretizationHyperbolicParabolic(mesh, (equations, equations_parabolic), + initial_condition, solver) + +############################################################################### +# ODE solvers, callbacks etc. + +tspan = (0.0, 20.0) +ode = semidiscretize(semi, tspan) + +summary_callback = SummaryCallback() + +analysis_interval = 100 +analysis_callback = AnalysisCallback(semi, interval = analysis_interval, + save_analysis = true, + extra_analysis_integrals = (energy_kinetic, + energy_internal)) + +alive_callback = AliveCallback(analysis_interval = analysis_interval) + +callbacks = CallbackSet(summary_callback, + analysis_callback, + alive_callback) + +############################################################################### +# run the simulation + +time_int_tol = 1e-9 +sol = solve(ode, RDPK3SpFSAL49(); abstol = time_int_tol, reltol = time_int_tol, + ode_default_options()..., callback = callbacks) +summary_callback() # print the timer summary diff --git a/examples/tree_3d_dgsem/elixir_navierstokes_taylor_green_vortex.jl b/examples/tree_3d_dgsem/elixir_navierstokes_taylor_green_vortex.jl index 65bd9aa133..3e54c791ec 100644 --- a/examples/tree_3d_dgsem/elixir_navierstokes_taylor_green_vortex.jl +++ b/examples/tree_3d_dgsem/elixir_navierstokes_taylor_green_vortex.jl @@ -5,12 +5,11 @@ using Trixi ############################################################################### # semidiscretization of the compressible Navier-Stokes equations -# TODO: parabolic; unify names of these accessor functions prandtl_number() = 0.72 -mu() = 6.25e-4 # equivalent to Re = 1600 +mu = 6.25e-4 # equivalent to Re = 1600 equations = CompressibleEulerEquations3D(1.4) -equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu(), +equations_parabolic = CompressibleNavierStokesDiffusion3D(equations, mu = mu, Prandtl = prandtl_number()) """ diff --git a/src/equations/compressible_navier_stokes.jl b/src/equations/compressible_navier_stokes.jl index 3059771197..c530625f22 100644 --- a/src/equations/compressible_navier_stokes.jl +++ b/src/equations/compressible_navier_stokes.jl @@ -62,3 +62,17 @@ Under `GradientVariablesEntropy`, the Navier-Stokes discretization is provably e """ struct GradientVariablesPrimitive end struct GradientVariablesEntropy end + +""" + dynamic_viscosity(u, equations) + +Wrapper for the dynamic viscosity that calls +`dynamic_viscosity(u, equations.mu, equations)`, which dispatches on the type of +`equations.mu`. +For constant `equations.mu`, i.e., `equations.mu` is of `Real`-type it is returned directly. +In all other cases, `equations.mu` is assumed to be a function with arguments +`u` and `equations` and is called with these arguments. +""" +dynamic_viscosity(u, equations) = dynamic_viscosity(u, equations.mu, equations) +dynamic_viscosity(u, mu::Real, equations) = mu +dynamic_viscosity(u, mu::T, equations) where {T} = mu(u, equations) diff --git a/src/equations/compressible_navier_stokes_1d.jl b/src/equations/compressible_navier_stokes_1d.jl index d2c46ecc7d..3dbdf11c56 100644 --- a/src/equations/compressible_navier_stokes_1d.jl +++ b/src/equations/compressible_navier_stokes_1d.jl @@ -21,6 +21,9 @@ the [`CompressibleEulerEquations1D`](@ref). Fluid properties such as the dynamic viscosity ``\mu`` can be provided in any consistent unit system, e.g., [``\mu``] = kg m⁻¹ s⁻¹. +The viscosity ``\mu`` may be a constant or a function of the current state, e.g., +depending on temperature (Sutherland's law): ``\mu = \mu(T)``. +In the latter case, the function `mu` needs to have the signature `mu(u, equations)`. The particular form of the compressible Navier-Stokes implemented is ```math @@ -80,7 +83,7 @@ where w_2 = \frac{\rho v1}{p},\, w_3 = -\frac{\rho}{p} ``` """ -struct CompressibleNavierStokesDiffusion1D{GradientVariables, RealT <: Real, +struct CompressibleNavierStokesDiffusion1D{GradientVariables, RealT <: Real, Mu, E <: AbstractCompressibleEulerEquations{1}} <: AbstractCompressibleNavierStokesDiffusion{1, 3, GradientVariables} # TODO: parabolic @@ -89,7 +92,7 @@ struct CompressibleNavierStokesDiffusion1D{GradientVariables, RealT <: Real, gamma::RealT # ratio of specific heats inv_gamma_minus_one::RealT # = inv(gamma - 1); can be used to write slow divisions as fast multiplications - mu::RealT # viscosity + mu::Mu # viscosity Pr::RealT # Prandtl number kappa::RealT # thermal diffusivity for Fick's law @@ -103,16 +106,17 @@ function CompressibleNavierStokesDiffusion1D(equations::CompressibleEulerEquatio gradient_variables = GradientVariablesPrimitive()) gamma = equations.gamma inv_gamma_minus_one = equations.inv_gamma_minus_one - μ, Pr = promote(mu, Prandtl) # Under the assumption of constant Prandtl number the thermal conductivity - # constant is kappa = gamma μ / ((gamma-1) Pr). + # constant is kappa = gamma μ / ((gamma-1) Prandtl). # Important note! Factor of μ is accounted for later in `flux`. - kappa = gamma * inv_gamma_minus_one / Pr + # This avoids recomputation of kappa for non-constant μ. + kappa = gamma * inv_gamma_minus_one / Prandtl CompressibleNavierStokesDiffusion1D{typeof(gradient_variables), typeof(gamma), + typeof(mu), typeof(equations)}(gamma, inv_gamma_minus_one, - μ, Pr, kappa, + mu, Prandtl, kappa, equations, gradient_variables) end @@ -159,10 +163,12 @@ function flux(u, gradients, orientation::Integer, # in the implementation q1 = equations.kappa * dTdx - # Constant dynamic viscosity is copied to a variable for readability. - # Offers flexibility for dynamic viscosity via Sutherland's law where it depends - # on temperature and reference values, Ts and Tref such that mu(T) - mu = equations.mu + # In the simplest cases, the user passed in `mu` or `mu()` + # (which returns just a constant) but + # more complex functions like Sutherland's law are possible. + # `dynamic_viscosity` is a helper function that handles both cases + # by dispatching on the type of `equations.mu`. + mu = dynamic_viscosity(u, equations) # viscous flux components in the x-direction f1 = zero(rho) diff --git a/src/equations/compressible_navier_stokes_2d.jl b/src/equations/compressible_navier_stokes_2d.jl index 5df7c01ca5..3256343703 100644 --- a/src/equations/compressible_navier_stokes_2d.jl +++ b/src/equations/compressible_navier_stokes_2d.jl @@ -21,6 +21,9 @@ the [`CompressibleEulerEquations2D`](@ref). Fluid properties such as the dynamic viscosity ``\mu`` can be provided in any consistent unit system, e.g., [``\mu``] = kg m⁻¹ s⁻¹. +The viscosity ``\mu`` may be a constant or a function of the current state, e.g., +depending on temperature (Sutherland's law): ``\mu = \mu(T)``. +In the latter case, the function `mu` needs to have the signature `mu(u, equations)`. The particular form of the compressible Navier-Stokes implemented is ```math @@ -80,7 +83,7 @@ where w_2 = \frac{\rho v_1}{p},\, w_3 = \frac{\rho v_2}{p},\, w_4 = -\frac{\rho}{p} ``` """ -struct CompressibleNavierStokesDiffusion2D{GradientVariables, RealT <: Real, +struct CompressibleNavierStokesDiffusion2D{GradientVariables, RealT <: Real, Mu, E <: AbstractCompressibleEulerEquations{2}} <: AbstractCompressibleNavierStokesDiffusion{2, 4, GradientVariables} # TODO: parabolic @@ -89,7 +92,7 @@ struct CompressibleNavierStokesDiffusion2D{GradientVariables, RealT <: Real, gamma::RealT # ratio of specific heats inv_gamma_minus_one::RealT # = inv(gamma - 1); can be used to write slow divisions as fast multiplications - mu::RealT # viscosity + mu::Mu # viscosity Pr::RealT # Prandtl number kappa::RealT # thermal diffusivity for Fick's law @@ -103,16 +106,17 @@ function CompressibleNavierStokesDiffusion2D(equations::CompressibleEulerEquatio gradient_variables = GradientVariablesPrimitive()) gamma = equations.gamma inv_gamma_minus_one = equations.inv_gamma_minus_one - μ, Pr = promote(mu, Prandtl) # Under the assumption of constant Prandtl number the thermal conductivity - # constant is kappa = gamma μ / ((gamma-1) Pr). + # constant is kappa = gamma μ / ((gamma-1) Prandtl). # Important note! Factor of μ is accounted for later in `flux`. - kappa = gamma * inv_gamma_minus_one / Pr + # This avoids recomputation of kappa for non-constant μ. + kappa = gamma * inv_gamma_minus_one / Prandtl CompressibleNavierStokesDiffusion2D{typeof(gradient_variables), typeof(gamma), + typeof(mu), typeof(equations)}(gamma, inv_gamma_minus_one, - μ, Pr, kappa, + mu, Prandtl, kappa, equations, gradient_variables) end @@ -168,10 +172,12 @@ function flux(u, gradients, orientation::Integer, q1 = equations.kappa * dTdx q2 = equations.kappa * dTdy - # Constant dynamic viscosity is copied to a variable for readability. - # Offers flexibility for dynamic viscosity via Sutherland's law where it depends - # on temperature and reference values, Ts and Tref such that mu(T) - mu = equations.mu + # In the simplest cases, the user passed in `mu` or `mu()` + # (which returns just a constant) but + # more complex functions like Sutherland's law are possible. + # `dynamic_viscosity` is a helper function that handles both cases + # by dispatching on the type of `equations.mu`. + mu = dynamic_viscosity(u, equations) if orientation == 1 # viscous flux components in the x-direction diff --git a/src/equations/compressible_navier_stokes_3d.jl b/src/equations/compressible_navier_stokes_3d.jl index e5567ae578..9833122eb3 100644 --- a/src/equations/compressible_navier_stokes_3d.jl +++ b/src/equations/compressible_navier_stokes_3d.jl @@ -21,6 +21,9 @@ the [`CompressibleEulerEquations3D`](@ref). Fluid properties such as the dynamic viscosity ``\mu`` can be provided in any consistent unit system, e.g., [``\mu``] = kg m⁻¹ s⁻¹. +The viscosity ``\mu`` may be a constant or a function of the current state, e.g., +depending on temperature (Sutherland's law): ``\mu = \mu(T)``. +In the latter case, the function `mu` needs to have the signature `mu(u, equations)`. The particular form of the compressible Navier-Stokes implemented is ```math @@ -80,7 +83,7 @@ where w_2 = \frac{\rho v_1}{p},\, w_3 = \frac{\rho v_2}{p},\, w_4 = \frac{\rho v_3}{p},\, w_5 = -\frac{\rho}{p} ``` """ -struct CompressibleNavierStokesDiffusion3D{GradientVariables, RealT <: Real, +struct CompressibleNavierStokesDiffusion3D{GradientVariables, RealT <: Real, Mu, E <: AbstractCompressibleEulerEquations{3}} <: AbstractCompressibleNavierStokesDiffusion{3, 5, GradientVariables} # TODO: parabolic @@ -89,7 +92,7 @@ struct CompressibleNavierStokesDiffusion3D{GradientVariables, RealT <: Real, gamma::RealT # ratio of specific heats inv_gamma_minus_one::RealT # = inv(gamma - 1); can be used to write slow divisions as fast multiplications - mu::RealT # viscosity + mu::Mu # viscosity Pr::RealT # Prandtl number kappa::RealT # thermal diffusivity for Fick's law @@ -103,16 +106,17 @@ function CompressibleNavierStokesDiffusion3D(equations::CompressibleEulerEquatio gradient_variables = GradientVariablesPrimitive()) gamma = equations.gamma inv_gamma_minus_one = equations.inv_gamma_minus_one - μ, Pr = promote(mu, Prandtl) # Under the assumption of constant Prandtl number the thermal conductivity - # constant is kappa = gamma μ / ((gamma-1) Pr). + # constant is kappa = gamma μ / ((gamma-1) Prandtl). # Important note! Factor of μ is accounted for later in `flux`. - kappa = gamma * inv_gamma_minus_one / Pr + # This avoids recomputation of kappa for non-constant μ. + kappa = gamma * inv_gamma_minus_one / Prandtl CompressibleNavierStokesDiffusion3D{typeof(gradient_variables), typeof(gamma), + typeof(mu), typeof(equations)}(gamma, inv_gamma_minus_one, - μ, Pr, kappa, + mu, Prandtl, kappa, equations, gradient_variables) end @@ -181,10 +185,12 @@ function flux(u, gradients, orientation::Integer, q2 = equations.kappa * dTdy q3 = equations.kappa * dTdz - # Constant dynamic viscosity is copied to a variable for readability. - # Offers flexibility for dynamic viscosity via Sutherland's law where it depends - # on temperature and reference values, Ts and Tref such that mu(T) - mu = equations.mu + # In the simplest cases, the user passed in `mu` or `mu()` + # (which returns just a constant) but + # more complex functions like Sutherland's law are possible. + # `dynamic_viscosity` is a helper function that handles both cases + # by dispatching on the type of `equations.mu`. + mu = dynamic_viscosity(u, equations) if orientation == 1 # viscous flux components in the x-direction diff --git a/test/test_parabolic_2d.jl b/test/test_parabolic_2d.jl index 9f1382caa6..d47c34f9e7 100644 --- a/test/test_parabolic_2d.jl +++ b/test/test_parabolic_2d.jl @@ -476,20 +476,38 @@ end @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", "elixir_navierstokes_shearlayer_amr.jl"), l2=[ - 0.00526017743452336, - 0.4130430692895672, - 0.4310996183791349, - 1.1544344171604635, + 0.005155557460409018, + 0.4048446934219344, + 0.43040068852937047, + 1.1255130552079322, ], linf=[ - 0.03492185879198495, - 1.392635891671335, - 1.357551616406459, - 8.713760873018146, + 0.03287305649809613, + 1.1656793717431393, + 1.3917196016246969, + 8.146587380114653, ], tspan=(0.0, 0.7)) end +@trixi_testset "TreeMesh2D: elixir_navierstokes_taylor_green_vortex_sutherland.jl" begin + @test_trixi_include(joinpath(examples_dir(), "tree_2d_dgsem", + "elixir_navierstokes_taylor_green_vortex_sutherland.jl"), + l2=[ + 0.001452856280034929, + 0.0007538775539989481, + 0.0007538775539988681, + 0.011035506549989587, + ], + linf=[ + 0.003291912841311362, + 0.002986462478096974, + 0.0029864624780958637, + 0.0231954665514138, + ], + tspan=(0.0, 1.0)) +end + @trixi_testset "P4estMesh2D: elixir_advection_diffusion_periodic.jl" begin @test_trixi_include(joinpath(examples_dir(), "p4est_2d_dgsem", "elixir_advection_diffusion_periodic.jl"), diff --git a/test/test_unit.jl b/test/test_unit.jl index 03a78f6918..79950f58d5 100644 --- a/test/test_unit.jl +++ b/test/test_unit.jl @@ -1576,6 +1576,45 @@ end @test mesh.boundary_faces[:entire_boundary] == [1, 2] end + +@testset "Sutherlands Law" begin + function mu(u, equations) + T_ref = 291.15 + + R_specific_air = 287.052874 + T = R_specific_air * Trixi.temperature(u, equations) + + C_air = 120.0 + mu_ref_air = 1.827e-5 + + return mu_ref_air * (T_ref + C_air) / (T + C_air) * (T / T_ref)^1.5 + end + + function mu_control(u, equations, T_ref, R_specific, C, mu_ref) + T = R_specific * Trixi.temperature(u, equations) + + return mu_ref * (T_ref + C) / (T + C) * (T / T_ref)^1.5 + end + + # Dry air (values from Wikipedia: https://de.wikipedia.org/wiki/Sutherland-Modell) + T_ref = 291.15 + C = 120.0 # Sutherland's constant + R_specific = 287.052874 + mu_ref = 1.827e-5 + prandtl_number() = 0.72 + gamma = 1.4 + + equations = CompressibleEulerEquations2D(gamma) + equations_parabolic = CompressibleNavierStokesDiffusion2D(equations, mu = mu, + Prandtl = prandtl_number()) + + # Flow at rest + u = prim2cons(SVector(1.0, 0.0, 0.0, 1.0), equations_parabolic) + + # Comparison value from https://www.engineeringtoolbox.com/air-absolute-kinematic-viscosity-d_601.html at 18°C + @test isapprox(mu_control(u, equations_parabolic, T_ref, R_specific, C, mu_ref), + 1.803e-5, atol = 5e-8) +end end end #module