Skip to content

Commit

Permalink
Add first set of demos (#84)
Browse files Browse the repository at this point in the history
* Add more demos

* Install ipywidgets to make progressbar more compact

* Add writing/reading of time dependent mesh
  • Loading branch information
jorgensd authored Mar 5, 2024
1 parent 476549a commit 9126a44
Show file tree
Hide file tree
Showing 8 changed files with 390 additions and 11 deletions.
4 changes: 4 additions & 0 deletions _config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -35,5 +35,9 @@ sphinx:

config:
html_last_updated_fmt: "%b %d, %Y"
nb_custom_formats:
.py:
- jupytext.reads
- fmt: py

exclude_patterns: [".pytest_cache/*"]
15 changes: 15 additions & 0 deletions _toc.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,21 @@ format: jb-book
root: README

parts:
- caption: Introduction to IPyParallel
chapters:
- file: "docs/ipyparallel_intro"
- caption: Writing and reading mesh data
chapters:
- file: "docs/writing_mesh_checkpoint"
- file: "docs/partitioned_mesh"
- file: "docs/time_dependent_mesh"
# - file: "docs/meshtags"

# - caption: Writing and reading functions
# chapters:
# - file: "docs/writing_functions_checkpoint"
# - file: "docs/write_on_original_mesh"

- caption: Python API
chapters:
- file: "docs/api"
25 changes: 25 additions & 0 deletions docs/ipyparallel_intro.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
# # Introduction to IPython parallel
# The following demos heavily rely on IPython-parallel to illustrate how checkpointing works when
# using multiple MPI processes.
# We illustrate what happens in parallel by launching three MPI processes
# using [ipyparallel](https://ipyparallel.readthedocs.io/en/latest/)

import ipyparallel as ipp


def hello_mpi():
# We define all imports inside the function as they have to be launched on the remote engines
from mpi4py import MPI

print(f"Hello from rank {MPI.COMM_WORLD.rank}/{MPI.COMM_WORLD.size - 1}")


with ipp.Cluster(engines="mpi", n=3) as cluster:
# We send the query to run the function `hello_mpi` on all engines
query = cluster[:].apply_async(hello_mpi)
# We wait for all engines to finish
query.wait()
# We check that all engines exited successfully
assert query.successful(), query.error
# We print the output from each engine
print("".join(query.stdout))
Binary file modified docs/logo.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
98 changes: 98 additions & 0 deletions docs/partitioned_mesh.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,98 @@
# # Storing mesh partition
# This data is re-ordered when reading in a mesh, as the mesh is partitioned.
# This means that when storing the mesh to disk from DOLFINx, the geometry and
# connectivity arrays are re-ordered.
# If we want to avoid to re-partition the mesh every time you run a simulation
# (on a fixed number of processes), one can store the partitioning of the mesh
# in the checkpoint.

from pathlib import Path

import ipyparallel as ipp


def write_partitioned_mesh(filename: Path):
import subprocess

from mpi4py import MPI

import dolfinx

import adios4dolfinx

# Create a simple unit square mesh
mesh = dolfinx.mesh.create_unit_square(
MPI.COMM_WORLD,
10,
10,
cell_type=dolfinx.mesh.CellType.quadrilateral,
ghost_mode=dolfinx.mesh.GhostMode.shared_facet,
)

# Write mesh checkpoint
adios4dolfinx.write_mesh(filename, mesh, engine="BP4", store_partition_info=True)
# Inspect checkpoint on rank 0 with `bpls`
if mesh.comm.rank == 0:
output = subprocess.run(["bpls", "-a", "-l", filename], capture_output=True)
print(output.stdout.decode("utf-8"))


# We inspect the partitioned mesh

mesh_file = Path("partitioned_mesh.bp")
n = 3

# + tags=["hide-output"]
with ipp.Cluster(engines="mpi", n=n) as cluster:
query = cluster[:].apply_async(write_partitioned_mesh, mesh_file)
query.wait()
assert query.successful(), query.error
print("".join(query.stdout))

# -
# # Reading a partitioned mesh

# If we try to read the mesh in on a different number of processes, we will get an error


def read_partitioned_mesh(filename: Path, read_from_partition: bool = True):
from mpi4py import MPI

import adios4dolfinx

prefix = f"{MPI.COMM_WORLD.rank + 1}/{MPI.COMM_WORLD.size}: "
try:
mesh = adios4dolfinx.read_mesh(
filename, comm=MPI.COMM_WORLD, engine="BP4", read_from_partition=read_from_partition
)
print(f"{prefix} Mesh: {mesh.name} read successfully with {read_from_partition=}")
except ValueError as e:
print(f"{prefix} Caught exception: ", e)


with ipp.Cluster(engines="mpi", n=n + 1) as cluster:
# Read mesh from file with different number of processes
query = cluster[:].apply_async(read_partitioned_mesh, mesh_file)
query.wait()
assert query.successful()
print("".join(query.stdout))

# Read mesh from file with different number of processes (not using partitioning information).

# + tags=["hide-output"]
with ipp.Cluster(engines="mpi", n=n + 1) as cluster:
query = cluster[:].apply_async(read_partitioned_mesh, mesh_file, False)
query.wait()
assert query.successful()
print("".join(query.stdout))

# -
# Read mesh from file with same number of processes as was written,
# re-using partitioning information.

# + tags=["hide-output"]
with ipp.Cluster(engines="mpi", n=n) as cluster:
query = cluster[:].apply_async(read_partitioned_mesh, mesh_file, True)
query.wait()
assert query.successful()
print("".join(query.stdout))
89 changes: 89 additions & 0 deletions docs/time_dependent_mesh.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
# # Time-dependent mesh checkpoints
# As we have seen in the previous examples, we store information about the connectivity,
# the coordinates of the mesh nodes,
# as well as a reference element. Note that the only thing that can change for a mesh
# during a simulation are the coordinate of the mesh nodes.
# In the following example, we will demonstrate how to write a time-dependent mesh
# checkpoint to disk.

# First, we create a simple function to compute the volume of a mesh
from pathlib import Path

from mpi4py import MPI

import ipyparallel as ipp

import adios4dolfinx


def compute_volume(mesh, time_stamp):
from mpi4py import MPI

import dolfinx
import ufl

# Compute the volume of the mesh
vol_form = dolfinx.fem.form(1 * ufl.dx(domain=mesh))
vol_local = dolfinx.fem.assemble_scalar(vol_form)
vol_glob = mesh.comm.allreduce(vol_local, op=MPI.SUM)
if mesh.comm.rank == 0:
print(f"{mesh.comm.rank+1}/{mesh.comm.size} Time: {time_stamp} Mesh Volume: {vol_glob}")


def write_meshes(filename: Path):
from mpi4py import MPI

import dolfinx
import numpy as np

import adios4dolfinx

# Create a unit cube
mesh = dolfinx.mesh.create_unit_cube(
MPI.COMM_WORLD,
3,
6,
5,
cell_type=dolfinx.mesh.CellType.hexahedron,
ghost_mode=dolfinx.mesh.GhostMode.shared_facet,
)

# Write mesh to file, associated with time stamp 1.5
adios4dolfinx.write_mesh(filename, mesh, engine="BP4", time=1.5)
compute_volume(mesh, 1.5)
mesh.geometry.x[:, 0] += 0.1 * mesh.geometry.x[:, 0]
mesh.geometry.x[:, 1] += 0.3 * mesh.geometry.x[:, 1] * np.sin(mesh.geometry.x[:, 2])
compute_volume(mesh, 3.3)
# Write mesh to file, associated with time stamp 3.3
# Note that we set the mode to append, as we have already created the file
# and we do not want to overwrite the existing data
adios4dolfinx.write_mesh(
filename, mesh, engine="BP4", time=3.3, mode=adios4dolfinx.adios2_helpers.adios2.Mode.Append
)


# We write the sequence of meshes to file
mesh_file = Path("timedep_mesh.bp")
n = 3

with ipp.Cluster(engines="mpi", n=n) as cluster:
# Write mesh to file
cluster[:].push({"compute_volume": compute_volume})
query = cluster[:].apply_async(write_meshes, mesh_file)
query.wait()
assert query.successful(), query.error
print("".join(query.stdout))

# # Reading a time dependent mesh
# The only thing we need to do to read the mesh is to send in the associated time stamp.

second_mesh = adios4dolfinx.read_mesh(mesh_file, comm=MPI.COMM_WORLD, engine="BP4", time=3.3)
compute_volume(second_mesh, 3.3)

first_mesh = adios4dolfinx.read_mesh(mesh_file, comm=MPI.COMM_WORLD, engine="BP4", time=1.5)
compute_volume(first_mesh, 1.5)

# We observe that the volume of the mesh has changed, as we have perturbed the mesh
# between the two time stamps.
# We also note that we can read the meshes in on a different number of processes than
# we wrote them with and in a different order (as long as the time stamps are correct).
148 changes: 148 additions & 0 deletions docs/writing_mesh_checkpoint.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,148 @@
# # Writing a mesh checkpoint
#
# In this example, we will demonstrate how to write a mesh checkpoint to disk.
#
# We start by creating a simple unit-square mesh.

from pathlib import Path

from mpi4py import MPI

import dolfinx
import ipyparallel as ipp

mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, 10, 10)

# Note that when a mesh is created in DOLFINx, we send in an MPI communicator.
# The communicator is used to partition (distribute) the mesh across the available processes.
# This means that each process only have access to a sub-set of cells and nodes of the mesh.
# We can inspect these with the following commands:


def print_mesh_info(mesh: dolfinx.mesh.Mesh):
cell_map = mesh.topology.index_map(mesh.topology.dim)
node_map = mesh.geometry.index_map()
print(
f"Rank {mesh.comm.rank}: number of owned cells {cell_map.size_local}",
f", number of ghosted cells {cell_map.num_ghosts}\n",
f"Number of owned nodes {node_map.size_local}",
f", number of ghosted nodes {node_map.num_ghosts}",
)


print_mesh_info(mesh)

# ## Create a distributed mesh
# Next, we can use IPython parallel to inspect a partitioned mesh.
# We create a convenience function for creating a mesh that shares cells on the boundary
# between two processes if `ghosted=True`.


def create_distributed_mesh(ghosted: bool, N: int = 10):
"""
Create a distributed mesh with N x N cells. Share cells on process boundaries
if ghosted is set to True
"""
from mpi4py import MPI

import dolfinx

ghost_mode = dolfinx.mesh.GhostMode.shared_facet if ghosted else dolfinx.mesh.GhostMode.none
mesh = dolfinx.mesh.create_unit_square(MPI.COMM_WORLD, N, N, ghost_mode=ghost_mode)
print(f"{ghost_mode=}")
print_mesh_info(mesh)


# Next we start up a new cluster with three engines.
# As we defined `print_mesh_info` locally on this process, we need to push it to all engines.

# + tags=["hide-output"]
with ipp.Cluster(engines="mpi", n=3) as cluster:
# Push print_mesh_info to all engines
cluster[:].push({"print_mesh_info": print_mesh_info})

# Create mesh with ghosted cells
query_true = cluster[:].apply_async(create_distributed_mesh, True)
query_true.wait()
assert query_true.successful(), query_true.error
print("".join(query_true.stdout))
# Create mesh without ghosted cells
query_false = cluster[:].apply_async(create_distributed_mesh, False)
query_false.wait()
assert query_false.successful(), query_false.error
print("".join(query_false.stdout))

# -
# ## Writing a mesh checkpoint
# The input data to a mesh is:
# - A geometry: the set of points in R^D that are part of each cell
# - A two-dimensional connectivity array: A list that indicates which nodes of the geometry
# is part of each cell
# - A reference element: Used for push data back and forth from the reference element and
# computing Jacobians
# We now use adios4dolfinx to write a mesh to file.


def write_mesh(filename: Path):
import subprocess

from mpi4py import MPI

import dolfinx

import adios4dolfinx

# Create a simple unit square mesh
mesh = dolfinx.mesh.create_unit_square(
MPI.COMM_WORLD, 10, 10, cell_type=dolfinx.mesh.CellType.quadrilateral
)

# Write mesh checkpoint
adios4dolfinx.write_mesh(filename, mesh, engine="BP4")

# Inspect checkpoint on rank 0 with `bpls`
if mesh.comm.rank == 0:
output = subprocess.run(["bpls", "-a", "-l", str(filename.absolute())], capture_output=True)
print(output.stdout.decode("utf-8"))


mesh_file = Path("mesh.bp")

# + tags=["hide-output"]
with ipp.Cluster(engines="mpi", n=2) as cluster:
# Write mesh to file
query = cluster[:].apply_async(write_mesh, mesh_file)
query.wait()
assert query.successful(), query.error
print("".join(query.stdout))

# -
# We observe that we have stored all the data needed to re-create the mesh in the file `mesh.bp`.
# We can therefore read it (to any number of processes) with `adios4dolfinx.read_mesh`


def read_mesh(filename: Path):
from mpi4py import MPI

import dolfinx

import adios4dolfinx

mesh = adios4dolfinx.read_mesh(
filename, comm=MPI.COMM_WORLD, engine="BP4", ghost_mode=dolfinx.mesh.GhostMode.none
)
print_mesh_info(mesh)


# ## Reading mesh checkpoints (N-to-M)
# We can now read the checkpoint on a different number of processes than we wrote it on.

# + tags=["hide-output"]
with ipp.Cluster(engines="mpi", n=4) as cluster:
# Write mesh to file
cluster[:].push({"print_mesh_info": print_mesh_info})
query = cluster[:].apply_async(read_mesh, mesh_file)
query.wait()
assert query.successful(), query.error
print("".join(query.stdout))
# -
Loading

0 comments on commit 9126a44

Please sign in to comment.