diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5391bb397..b6bac7361 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -63,7 +63,7 @@ jobs: - name: "Main Script" run: | export SUMPY_FORCE_SYMBOLIC_BACKEND=sympy - export PYTEST_ADDOPTS=${PYTEST_ADDOPTS:-"-m 'not slowtest'"} + export PYTEST_ADDOPTS=${PYTEST_ADDOPTS:-"-m 'not slowtest' -vv"} curl -L -O https://tiker.net/ci-support-v0 . ci-support-v0 @@ -84,7 +84,7 @@ jobs: export LC_ALL=en_US.UTF-8 export LANG=en_US.UTF-8 export CONDA_ENVIRONMENT=.test-conda-env.yml - export PYTEST_ADDOPTS=${PYTEST_ADDOPTS:-"-m 'not slowtest'"} + export PYTEST_ADDOPTS=${PYTEST_ADDOPTS:-"-m 'not slowtest' -vv"} curl -L -O https://tiker.net/ci-support-v0 . ci-support-v0 @@ -99,7 +99,7 @@ jobs: - name: "Main Script" run: | export SUMPY_FORCE_SYMBOLIC_BACKEND=symengine - export PYTEST_ADDOPTS=${PYTEST_ADDOPTS:-"-m 'not slowtest'"} + export PYTEST_ADDOPTS=${PYTEST_ADDOPTS:-"-m 'not slowtest' -vv"} curl -L -O https://tiker.net/ci-support-v0 . ci-support-v0 diff --git a/doc/symbolic.rst b/doc/symbolic.rst index d2a3bf8c7..19d7cc65a 100644 --- a/doc/symbolic.rst +++ b/doc/symbolic.rst @@ -33,6 +33,11 @@ Maxwell's equations .. automodule:: pytential.symbolic.pde.maxwell +Elasticity equations +^^^^^^^^^^^^^^^^^^^^ + +.. automodule:: pytential.symbolic.elasticity + Stokes' equations ^^^^^^^^^^^^^^^^^ @@ -43,6 +48,19 @@ Scalar Beltrami equations .. automodule:: pytential.symbolic.pde.beltrami +Internals +^^^^^^^^^ + +.. autoclass:: pytential.symbolic.elasticity.ElasticityWrapperYoshida +.. autoclass:: pytential.symbolic.elasticity.ElasticityDoubleLayerWrapperYoshida + +Rewriting expressions with ``IntG``\ s +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. automodule:: pytential.symbolic.pde.systems.merge +.. automodule:: pytential.symbolic.pde.systems.reduce +.. automodule:: pytential.symbolic.pde.systems.deriv + Internal affairs ---------------- @@ -57,3 +75,9 @@ How a symbolic operator gets executed .. automodule:: pytential.symbolic.execution .. automodule:: pytential.symbolic.compiler + +Rewriting expressions with ``IntG``\ s internals +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. automethod:: pytential.symbolic.pde.systems.deriv.convert_target_transformation_to_source +.. automethod:: pytential.symbolic.pde.systems.deriv.rewrite_int_g_using_base_kernel diff --git a/pytential/symbolic/elasticity.py b/pytential/symbolic/elasticity.py new file mode 100644 index 000000000..fa6405b60 --- /dev/null +++ b/pytential/symbolic/elasticity.py @@ -0,0 +1,1085 @@ +__copyright__ = """ +Copyright (C) 2017 Natalie Beams +Copyright (C) 2022 Isuru Fernando +""" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import numpy as np + +from pytential import sym +from pytential.symbolic.pde.systems import (rewrite_using_base_kernel, + merge_int_g_exprs) +from pytential.symbolic.typing import ExpressionT +from sumpy.kernel import (StressletKernel, LaplaceKernel, StokesletKernel, + ElasticityKernel, BiharmonicKernel, Kernel, LineOfCompressionKernel, + AxisTargetDerivative, AxisSourceDerivative, TargetPointMultiplier) +from sumpy.symbolic import SpatialConstant +import pymbolic + +from abc import ABC, abstractmethod +from dataclasses import dataclass +from functools import cached_property +from enum import Enum + +__doc__ = """ +.. autoclass:: ElasticityWrapperBase +.. autoclass:: ElasticityDoubleLayerWrapperBase +.. autoclass:: Method + +.. automethod:: pytential.symbolic.elasticity.make_elasticity_wrapper +.. automethod:: pytential.symbolic.elasticity.make_elasticity_double_layer_wrapper +""" + + +# {{{ ElasiticityWrapper ABCs + +# It is OK if these "escape" into pytential expressions because mappers will +# use the MRO to dispatch them to `map_variable`. +_MU_SYM_DEFAULT = SpatialConstant("mu") +_NU_SYM_DEFAULT = SpatialConstant("nu") + + +@dataclass +class ElasticityWrapperBase(ABC): + """Wrapper class for the :class:`~sumpy.kernel.ElasticityKernel` kernel. + + This class is meant to shield the user from the messiness of writing + out every term in the expansion of the double-indexed Elasticity kernel + applied to the density vector. The object is created + to do some of the set-up and bookkeeping once, rather than every + time we want to create a symbolic expression based on the kernel -- say, + once when we solve for the density, and once when we want a symbolic + representation for the solution, for example. + + The :meth:`apply` function returns the integral expressions needed for + the vector velocity resulting from convolution with the vector density, + and is meant to work similarly to calling + :func:`~pytential.symbolic.primitives.S` (which is + :class:`~pytential.symbolic.primitives.IntG`). + + Similar functions are available for other useful things related to + the flow: :meth:`apply_derivative` (target derivative). + + .. automethod:: apply + .. automethod:: apply_derivative + """ + dim: int + mu: ExpressionT + nu: ExpressionT + + @abstractmethod + def apply(self, density_vec_sym, qbx_forced_limit, extra_deriv_dirs=()): + """Symbolic expressions for integrating Elasticity kernel. + + Returns an object array of symbolic expressions for the vector + resulting from integrating the dyadic Elasticity kernel with + variable *density_vec_sym*. + + :arg density_vec_sym: a symbolic vector variable for the density vector. + :arg qbx_forced_limit: the *qbx_forced_limit* argument to be passed on + to :class:`~pytential.symbolic.primitives.IntG`. + :arg extra_deriv_dirs: adds target derivatives to all the integral + objects with the given derivative axis. + """ + + def apply_derivative(self, deriv_dir, density_vec_sym, qbx_forced_limit): + """Symbolic derivative of velocity from Elasticity kernel. + + Returns an object array of symbolic expressions for the vector + resulting from integrating the *deriv_dir* target derivative of the + dyadic Elasticity kernel with variable *density_vec_sym*. + + :arg deriv_dir: integer denoting the axis direction for the derivative. + :arg density_vec_sym: a symbolic vector variable for the density vector. + :arg qbx_forced_limit: the *qbx_forced_limit* argument to be passed on + to :class:`~pytential.symbolic.primitives.IntG`. + """ + return self.apply(density_vec_sym, qbx_forced_limit, (deriv_dir,)) + + +@dataclass +class ElasticityDoubleLayerWrapperBase(ABC): + """Wrapper class for the double layer of + :class:`~sumpy.kernel.ElasticityKernel` kernel. + + This class is meant to shield the user from the messiness of writing + out every term in the expansion of the triple-indexed Stresslet + kernel applied to both a normal vector and the density vector. + The object is created to do some of the set-up and bookkeeping once, + rather than every time we want to create a symbolic expression based + on the kernel -- say, once when we solve for the density, and once when + we want a symbolic representation for the solution, for example. + + The :meth:`apply` function returns the integral expressions needed for + convolving the kernel with a vector density, and is meant to work + similarly to :func:`~pytential.symbolic.primitives.S` (which is + :class:`~pytential.symbolic.primitives.IntG`). + + Similar functions are available for other useful things related to + the flow: :meth:`apply_derivative` (target derivative). + + .. automethod:: apply + .. automethod:: apply_derivative + """ + dim: int + mu: ExpressionT + nu: ExpressionT + + @abstractmethod + def apply(self, density_vec_sym, dir_vec_sym, qbx_forced_limit, + extra_deriv_dirs=()): + """Symbolic expressions for integrating Stresslet kernel. + + Returns an object array of symbolic expressions for the vector + resulting from integrating the dyadic Stresslet kernel with + variable *density_vec_sym* and source direction vectors *dir_vec_sym*. + + :arg density_vec_sym: a symbolic vector variable for the density vector. + :arg dir_vec_sym: a symbolic vector variable for the direction vector. + :arg qbx_forced_limit: the *qbx_forced_limit* argument to be passed on + to :class:`~pytential.symbolic.primitives.IntG`. + :arg extra_deriv_dirs: adds target derivatives to all the integral + objects with the given derivative axis. + """ + raise NotImplementedError + + def apply_derivative(self, deriv_dir, density_vec_sym, dir_vec_sym, + qbx_forced_limit): + """Symbolic derivative of velocity from Elasticity kernel. + + Returns an object array of symbolic expressions for the vector + resulting from integrating the *deriv_dir* target derivative of the + dyadic Elasticity kernel with variable *density_vec_sym*. + + :arg deriv_dir: integer denoting the axis direction for the derivative. + :arg density_vec_sym: a symbolic vector variable for the density vector. + :arg dir_vec_sym: a symbolic vector variable for the normal direction. + :arg qbx_forced_limit: the *qbx_forced_limit* argument to be passed on + to :class:`~pytential.symbolic.primitives.IntG`. + """ + return self.apply(density_vec_sym, dir_vec_sym, qbx_forced_limit, + (deriv_dir,)) + +# }}} + + +# {{{ Naive and Biharmonic impl + +def _create_int_g(knl, deriv_dirs, density, **kwargs): + for deriv_dir in deriv_dirs: + knl = AxisTargetDerivative(deriv_dir, knl) + + kernel_arg_names = {karg.loopy_arg.name + for karg in (knl.get_args() + knl.get_source_args())} + + # When the kernel is Laplace, mu and nu are not kernel arguments + # Also when nu==0.5, it's not a kernel argument to StokesletKernel + for var_name in ["mu", "nu"]: + if var_name not in kernel_arg_names: + kwargs.pop(var_name) + + res = sym.int_g_vec(knl, density, **kwargs) + return res + + +@dataclass +class _ElasticityWrapperNaiveOrBiharmonic: + dim: int + mu: ExpressionT + nu: ExpressionT + base_kernel: Kernel + + def __post_init__(self): + if not (self.dim == 3 or self.dim == 2): + raise ValueError( + f"unsupported dimension given to ElasticityWrapper: {self.dim}") + + @cached_property + def kernel_dict(self): + d = {} + # The dictionary allows us to exploit symmetry -- that + # :math:`T_{01}` is identical to :math:`T_{10}` -- and avoid creating + # multiple expansions for the same kernel in a different ordering. + for i in range(self.dim): + for j in range(i, self.dim): + if self.nu == 0.5: + d[(i, j)] = StokesletKernel(dim=self.dim, icomp=i, + jcomp=j) + else: + d[(i, j)] = ElasticityKernel(dim=self.dim, icomp=i, + jcomp=j) + d[(j, i)] = d[(i, j)] + + return d + + def _get_int_g(self, idx, density_sym, dir_vec_sym, qbx_forced_limit, + deriv_dirs): + """ + Returns the convolution of the elasticity kernel given by `idx` + and its derivatives. + """ + res = _create_int_g(self.kernel_dict[idx], deriv_dirs, + density=density_sym*dir_vec_sym[idx[-1]], + qbx_forced_limit=qbx_forced_limit, mu=self.mu, + nu=self.nu)/(2*(1-self.nu)) + return res + + def apply(self, density_vec_sym, qbx_forced_limit, extra_deriv_dirs=()): + + sym_expr = np.zeros((self.dim,), dtype=object) + + # For stokeslet, there's no direction vector involved + # passing a list of ones instead to remove its usage. + for comp in range(self.dim): + for i in range(self.dim): + sym_expr[comp] += self._get_int_g((comp, i), + density_vec_sym[i], [1]*self.dim, + qbx_forced_limit, deriv_dirs=extra_deriv_dirs) + + return np.array(rewrite_using_base_kernel(sym_expr, + base_kernel=self.base_kernel)) + + +class ElasticityWrapperNaive(_ElasticityWrapperNaiveOrBiharmonic, + ElasticityWrapperBase): + def __init__(self, dim, mu, nu): + super().__init__(dim=dim, mu=mu, nu=nu, base_kernel=None) + ElasticityWrapperBase.__init__(self, dim=dim, mu=mu, nu=nu) + + +class ElasticityWrapperBiharmonic(_ElasticityWrapperNaiveOrBiharmonic, + ElasticityWrapperBase): + def __init__(self, dim, mu, nu): + super().__init__(dim=dim, mu=mu, nu=nu, + base_kernel=BiharmonicKernel(dim)) + ElasticityWrapperBase.__init__(self, dim=dim, mu=mu, nu=nu) + + +# }}} + + +# {{{ ElasticityDoubleLayerWrapper Naive and Biharmonic impl + +@dataclass +class _ElasticityDoubleLayerWrapperNaiveOrBiharmonic: + dim: int + mu: ExpressionT + nu: ExpressionT + base_kernel: Kernel + + def __post_init__(self): + if not (self.dim == 3 or self.dim == 2): + raise ValueError("unsupported dimension given to " + f"ElasticityDoubleLayerWrapper: {self.dim}") + + @cached_property + def kernel_dict(self): + d = {} + + for i in range(self.dim): + for j in range(i, self.dim): + for k in range(j, self.dim): + d[(i, j, k)] = StressletKernel(dim=self.dim, icomp=i, + jcomp=j, kcomp=k) + + # The dictionary allows us to exploit symmetry -- that + # :math:`T_{012}` is identical to :math:`T_{120}` -- and avoid creating + # multiple expansions for the same kernel in a different ordering. + for i in range(self.dim): + for j in range(self.dim): + for k in range(self.dim): + if (i, j, k) in d: + continue + s = tuple(sorted([i, j, k])) + d[(i, j, k)] = d[s] + + # For elasticity (nu != 0.5), we need the laplacian of the + # BiharmonicKernel which is the LaplaceKernel. + if self.nu != 0.5: + d["laplacian"] = LaplaceKernel(self.dim) + + return d + + def _get_int_g(self, idx, density_sym, dir_vec_sym, qbx_forced_limit, + deriv_dirs): + """ + Returns the convolution of the double layer of the elasticity kernel + given by `idx` and its derivatives. + """ + + nu = self.nu + kernel_indices = [idx] + dir_vec_indices = [idx[-1]] + coeffs = [1] + extra_deriv_dirs_vec = [[]] + + kernel_indices = [idx, "laplacian", "laplacian", "laplacian"] + dir_vec_indices = [idx[-1], idx[1], idx[0], idx[2]] + coeffs = [1, (1 - 2*nu)/self.dim, -(1 - 2*nu)/self.dim, -(1 - 2*nu)] + extra_deriv_dirs_vec = [[], [idx[0]], [idx[1]], [idx[2]]] + if idx[0] != idx[1]: + coeffs[-1] = 0 + + result = 0 + for kernel_idx, dir_vec_idx, coeff, extra_deriv_dirs in \ + zip(kernel_indices, dir_vec_indices, coeffs, + extra_deriv_dirs_vec): + if coeff == 0: + continue + knl = self.kernel_dict[kernel_idx] + result += _create_int_g(knl, tuple(deriv_dirs) + tuple(extra_deriv_dirs), + density=density_sym*dir_vec_sym[dir_vec_idx], + qbx_forced_limit=qbx_forced_limit, mu=self.mu, nu=self.nu) * \ + coeff + return result/(2*(1 - nu)) + + def apply(self, density_vec_sym, dir_vec_sym, qbx_forced_limit, + extra_deriv_dirs=()): + + sym_expr = np.zeros((self.dim,), dtype=object) + + for comp in range(self.dim): + for i in range(self.dim): + for j in range(self.dim): + sym_expr[comp] += self._get_int_g((comp, i, j), + density_vec_sym[i], dir_vec_sym, + qbx_forced_limit, deriv_dirs=extra_deriv_dirs) + + return np.array(rewrite_using_base_kernel(sym_expr, + base_kernel=self.base_kernel)) + + def apply_single_and_double_layer(self, stokeslet_density_vec_sym, + stresslet_density_vec_sym, dir_vec_sym, + qbx_forced_limit, stokeslet_weight, stresslet_weight, + extra_deriv_dirs=()): + + stokeslet_obj = _ElasticityWrapperNaiveOrBiharmonic(dim=self.dim, + mu=self.mu, nu=self.nu, base_kernel=self.base_kernel) + + sym_expr = 0 + if stresslet_weight != 0: + sym_expr += self.apply(stresslet_density_vec_sym, dir_vec_sym, + qbx_forced_limit, extra_deriv_dirs) * stresslet_weight + if stokeslet_weight != 0: + sym_expr += stokeslet_obj.apply(stokeslet_density_vec_sym, + qbx_forced_limit, extra_deriv_dirs) * stokeslet_weight + + return sym_expr + + +class ElasticityDoubleLayerWrapperNaive( + _ElasticityDoubleLayerWrapperNaiveOrBiharmonic, + ElasticityDoubleLayerWrapperBase): + def __init__(self, dim, mu, nu): + super().__init__(dim=dim, mu=mu, nu=nu, + base_kernel=None) + ElasticityDoubleLayerWrapperBase.__init__(self, dim=dim, + mu=mu, nu=nu) + + +class ElasticityDoubleLayerWrapperBiharmonic( + _ElasticityDoubleLayerWrapperNaiveOrBiharmonic, + ElasticityDoubleLayerWrapperBase): + def __init__(self, dim, mu, nu): + super().__init__(dim=dim, mu=mu, nu=nu, + base_kernel=BiharmonicKernel(dim)) + ElasticityDoubleLayerWrapperBase.__init__(self, dim=dim, + mu=mu, nu=nu) + +# }}} + + +# {{{ dispatch function + +class Method(Enum): + """Method to use in Elasticity/Stokes problem. + """ + naive = 1 + laplace = 2 + biharmonic = 3 + laplace_slow = 4 + + +def make_elasticity_wrapper( + dim: int, + mu: ExpressionT = _MU_SYM_DEFAULT, + nu: ExpressionT = _NU_SYM_DEFAULT, + method: Method = Method.naive) -> ElasticityWrapperBase: + """Creates a :class:`ElasticityWrapperBase` object depending on the input + values. + + :param: dim: dimension + :param: mu: viscosity symbol, defaults to a variable named "mu" + :param: nu: poisson ratio symbol, defaults to a variable named "nu" + :param: method: method to use, defaults to the *Method* enum value naive. + + :return: a :class:`ElasticityWrapperBase` object + """ + + if nu == 0.5: + from pytential.symbolic.stokes import StokesletWrapper + return StokesletWrapper(dim=dim, mu=mu, method=method) + if method == Method.naive: + return ElasticityWrapperNaive(dim=dim, mu=mu, nu=nu) + elif method == Method.biharmonic: + return ElasticityWrapperBiharmonic(dim=dim, mu=mu, nu=nu) + elif method == Method.laplace: + if nu == 0.5: + from pytential.symbolic.stokes import StokesletWrapperTornberg + return StokesletWrapperTornberg(dim=dim, + mu=mu, nu=nu) + else: + return ElasticityWrapperYoshida(dim=dim, + mu=mu, nu=nu) + elif method == Method.laplace_slow: + if nu == 0.5: + raise ValueError("invalid value of nu=0.5 for method laplace_slow") + else: + return ElasticityWrapperFu(dim=dim, + mu=mu, nu=nu) + else: + raise ValueError(f"invalid method: {method}." + "Needs to be one of naive, laplace, biharmonic") + + +def make_elasticity_double_layer_wrapper( + dim: int, + mu: ExpressionT = _MU_SYM_DEFAULT, + nu: ExpressionT = _NU_SYM_DEFAULT, + method: Method = Method.naive) -> ElasticityDoubleLayerWrapperBase: + """Creates a :class:`ElasticityDoubleLayerWrapperBase` object depending on the + input values. + + :param: dim: dimension + :param: mu: viscosity symbol, defaults to a variable named "mu" + :param: nu: poisson ratio symbol, defaults to a variable named "nu" + :param: method: method to use, defaults to the *Method* enum value naive. + + :return: a :class:`ElasticityDoubleLayerWrapperBase` object + """ + if nu == 0.5: + from pytential.symbolic.stokes import StressletWrapper + return StressletWrapper(dim=dim, mu=mu, method=method) + if method == Method.naive: + return ElasticityDoubleLayerWrapperNaive(dim=dim, mu=mu, + nu=nu) + elif method == Method.biharmonic: + return ElasticityDoubleLayerWrapperBiharmonic(dim=dim, mu=mu, + nu=nu) + elif method == Method.laplace: + if nu == 0.5: + from pytential.symbolic.stokes import StressletWrapperTornberg + return StressletWrapperTornberg(dim=dim, + mu=mu, nu=nu) + else: + return ElasticityDoubleLayerWrapperYoshida(dim=dim, + mu=mu, nu=nu) + elif method == Method.laplace_slow: + if nu == 0.5: + raise ValueError("invalid value of nu=0.5 for method laplace_slow") + else: + return ElasticityDoubleLayerWrapperFu(dim=dim, + mu=mu, nu=nu) + else: + raise ValueError(f"invalid method: {method}." + "Needs to be one of naive, laplace, biharmonic") + + +# }}} + + +# {{{ Yoshida + +@dataclass +class ElasticityDoubleLayerWrapperYoshida(ElasticityDoubleLayerWrapperBase): + r"""ElasticityDoubleLayer Wrapper using Yoshida et al's method [1] which uses + Laplace derivatives. + + [1] Yoshida, K. I., Nishimura, N., & Kobayashi, S. (2001). Application of + fast multipole Galerkin boundary integral equation method to elastostatic + crack problems in 3D. + International Journal for Numerical Methods in Engineering, 50(3), 525-547. + `DOI 3.0.CO;2-4>`__ # noqa + """ + dim: int + mu: ExpressionT + nu: ExpressionT + + def __post_init__(self): + if not self.dim == 3: + raise ValueError("unsupported dimension given to " + "ElasticityDoubleLayerWrapperYoshida: {self.dim}") + + @cached_property + def laplace_kernel(self): + return LaplaceKernel(dim=3) + + def apply(self, density_vec_sym, dir_vec_sym, qbx_forced_limit, + extra_deriv_dirs=()): + return self.apply_single_and_double_layer([0]*self.dim, + density_vec_sym, dir_vec_sym, qbx_forced_limit, 0, 1, + extra_deriv_dirs) + + def apply_single_and_double_layer(self, stokeslet_density_vec_sym, + stresslet_density_vec_sym, dir_vec_sym, + qbx_forced_limit, stokeslet_weight, stresslet_weight, + extra_deriv_dirs=()): + + mu = self.mu + nu = self.nu + lam = 2*nu*mu/(1-2*nu) + stokeslet_weight *= -1 + + def C(i, j, k, l): # noqa: E741 + res = 0 + if i == j and k == l: + res += lam + if i == k and j == l: + res += mu + if i == l and j == k: + res += mu + return res * stresslet_weight + + def add_extra_deriv_dirs(target_kernel): + for deriv_dir in extra_deriv_dirs: + target_kernel = AxisTargetDerivative(deriv_dir, target_kernel) + return target_kernel + + def P(i, j, int_g): + res = -int_g.copy(target_kernel=add_extra_deriv_dirs( + TargetPointMultiplier(j, + AxisTargetDerivative(i, int_g.target_kernel)))) + if i == j: + res += (3 - 4*nu)*int_g.copy( + target_kernel=add_extra_deriv_dirs(int_g.target_kernel)) + return res / (4*mu*(1 - nu)) + + def Q(i, int_g): + res = int_g.copy(target_kernel=add_extra_deriv_dirs( + AxisTargetDerivative(i, int_g.target_kernel))) + return res / (4*mu*(1 - nu)) + + sym_expr = np.zeros((3,), dtype=object) + + kernel = self.laplace_kernel + source = sym.nodes(3).as_vector() + normal = dir_vec_sym + sigma = stresslet_density_vec_sym + + source_kernels = [None]*4 + for i in range(3): + source_kernels[i] = AxisSourceDerivative(i, kernel) + source_kernels[3] = kernel + + for i in range(3): + for k in range(3): + densities = [0]*4 + for l in range(3): # noqa: E741 + for j in range(3): + for m in range(3): + densities[l] += C(k, l, m, j)*normal[m]*sigma[j] + densities[3] += stokeslet_weight * stokeslet_density_vec_sym[k] + int_g = sym.IntG(target_kernel=kernel, + source_kernels=tuple(source_kernels), + densities=tuple(densities), + qbx_forced_limit=qbx_forced_limit) + sym_expr[i] += P(i, k, int_g) + + densities = [0]*4 + for k in range(3): + for m in range(3): + for j in range(3): + for l in range(3): # noqa: E741 + densities[l] += \ + C(k, l, m, j)*normal[m]*sigma[j]*source[k] + if k == l: + densities[3] += \ + C(k, l, m, j)*normal[m]*sigma[j] + densities[3] += stokeslet_weight * source[k] \ + * stokeslet_density_vec_sym[k] + int_g = sym.IntG(target_kernel=kernel, + source_kernels=tuple(source_kernels), + densities=tuple(densities), + qbx_forced_limit=qbx_forced_limit) + sym_expr[i] += Q(i, int_g) + + return sym_expr + + +@dataclass +class ElasticityWrapperYoshida(ElasticityWrapperBase): + r"""Elasticity single layer using Yoshida et al's method [1] which uses Laplace + derivatives. + + [1] Yoshida, K. I., Nishimura, N., & Kobayashi, S. (2001). Application of + fast multipole Galerkin boundary integral equation method to elastostatic + crack problems in 3D. + International Journal for Numerical Methods in Engineering, 50(3), 525-547. + `DOI 3.0.CO;2-4>`__ # noqa + """ + dim: int + mu: ExpressionT + nu: ExpressionT + + def __post_init__(self): + if not self.dim == 3: + raise ValueError("unsupported dimension given to " + f"ElasticityDoubleLayerWrapperYoshida: {self.dim}") + + @cached_property + def stresslet(self): + return ElasticityDoubleLayerWrapperYoshida(3, self.mu, self.nu) + + def apply(self, density_vec_sym, qbx_forced_limit, extra_deriv_dirs=()): + return self.stresslet.apply_single_and_double_layer(density_vec_sym, + [0]*self.dim, [0]*self.dim, qbx_forced_limit, 1, 0, + extra_deriv_dirs) + + +# }}} + +# {{{ Fu + +@dataclass +class ElasticityDoubleLayerWrapperFu(ElasticityDoubleLayerWrapperBase): + r"""ElasticityDoubleLayer Wrapper using Fu et al's method [1] which uses + Laplace derivatives. + + [1] Fu, Y., Klimkowski, K. J., Rodin, G. J., Berger, E., Browne, J. C., + Singer, J. K., ... & Vemaganti, K. S. (1998). A fast solution method for + three‐dimensional many‐particle problems of linear elasticity. + International Journal for Numerical Methods in Engineering, 42(7), 1215-1229. + """ + dim: int + mu: ExpressionT + nu: ExpressionT + + def __post_init__(self): + if not self.dim == 3: + raise ValueError("unsupported dimension given to " + "ElasticityDoubleLayerWrapperFu: {self.dim}") + + @cached_property + def laplace_kernel(self): + return LaplaceKernel(dim=3) + + def apply(self, density_vec_sym, dir_vec_sym, qbx_forced_limit, + extra_deriv_dirs=()): + return self.apply_single_and_double_layer([0]*self.dim, + density_vec_sym, dir_vec_sym, qbx_forced_limit, 0, 1, + extra_deriv_dirs) + + def apply_single_and_double_layer(self, stokeslet_density_vec_sym, + stresslet_density_vec_sym, dir_vec_sym, + qbx_forced_limit, stokeslet_weight, stresslet_weight, + extra_deriv_dirs=()): + + mu = self.mu + nu = self.nu + stokeslet_weight *= -1 + + def add_extra_deriv_dirs(target_kernel): + for deriv_dir in extra_deriv_dirs: + target_kernel = AxisTargetDerivative(deriv_dir, target_kernel) + return target_kernel + + def P(i, j, int_g): + res = -int_g.copy(target_kernel=add_extra_deriv_dirs( + TargetPointMultiplier(j, + AxisTargetDerivative(i, int_g.target_kernel)))) + if i == j: + res += (3 - 4*nu)*int_g.copy( + target_kernel=add_extra_deriv_dirs(int_g.target_kernel)) + return res / (4*mu*(1 - nu)) + + def Q(i, int_g): + res = int_g.copy(target_kernel=add_extra_deriv_dirs( + AxisTargetDerivative(i, int_g.target_kernel))) + return res / (4*mu*(1 - nu)) + + def R(i, j, p, int_g): + res = int_g.copy(target_kernel=add_extra_deriv_dirs( + TargetPointMultiplier(j, AxisTargetDerivative(i, + AxisTargetDerivative(p, int_g.target_kernel))))) + if j == p: + res += (1 - 2*nu)*int_g.copy(target_kernel=add_extra_deriv_dirs( + AxisTargetDerivative(i, int_g.target_kernel))) + if i == j: + res -= (1 - 2*nu)*int_g.copy(target_kernel=add_extra_deriv_dirs( + AxisTargetDerivative(p, int_g.target_kernel))) + if i == p: + res -= 2*(1 - nu)*int_g.copy(target_kernel=add_extra_deriv_dirs( + AxisTargetDerivative(j, int_g.target_kernel))) + return res / (2*mu*(1 - nu)) + + def S(i, p, int_g): + res = int_g.copy(target_kernel=add_extra_deriv_dirs( + AxisTargetDerivative(i, + AxisTargetDerivative(p, int_g.target_kernel)))) + return res / (-2*mu*(1 - nu)) + + sym_expr = np.zeros((3,), dtype=object) + + kernel = self.laplace_kernel + source = sym.nodes(3).as_vector() + normal = dir_vec_sym + sigma = stresslet_density_vec_sym + + for i in range(3): + for j in range(3): + density = stokeslet_weight * stokeslet_density_vec_sym[j] + int_g = sym.IntG(target_kernel=kernel, + source_kernels=(kernel,), + densities=(density,), + qbx_forced_limit=qbx_forced_limit) + sym_expr[i] += P(i, j, int_g) + + density = sum(stokeslet_weight + * stokeslet_density_vec_sym[j] * source[j] for j in range(3)) + int_g = sym.IntG(target_kernel=kernel, + source_kernels=(kernel,), + densities=(density,), + qbx_forced_limit=qbx_forced_limit) + sym_expr[i] += Q(i, int_g) + + for j in range(3): + for p in range(3): + density = stresslet_weight * normal[p] * sigma[j] + int_g = sym.IntG(target_kernel=kernel, + source_kernels=(kernel,), + densities=(density,), + qbx_forced_limit=qbx_forced_limit) + sym_expr[i] += R(i, j, p, int_g) + + for p in range(3): + density = sum(stresslet_weight * normal[p] * sigma[j] * source[j] + for j in range(3)) + int_g = sym.IntG(target_kernel=kernel, + source_kernels=(kernel,), + densities=(density,), + qbx_forced_limit=qbx_forced_limit) + sym_expr[i] += S(i, p, int_g) + + return sym_expr + + +@dataclass +class ElasticityWrapperFu(ElasticityWrapperBase): + r"""Elasticity single layer using Fu et al's method [1] which uses + Laplace derivatives. + + [1] Fu, Y., Klimkowski, K. J., Rodin, G. J., Berger, E., Browne, J. C., + Singer, J. K., ... & Vemaganti, K. S. (1998). A fast solution method for + three‐dimensional many‐particle problems of linear elasticity. + International Journal for Numerical Methods in Engineering, 42(7), 1215-1229. + """ + dim: int + mu: ExpressionT + nu: ExpressionT + + def __post_init__(self): + if not self.dim == 3: + raise ValueError("unsupported dimension given to " + f"ElasticityDoubleLayerWrapperFu: {self.dim}") + + @cached_property + def stresslet(self): + return ElasticityDoubleLayerWrapperFu(3, self.mu, self.nu) + + def apply(self, density_vec_sym, qbx_forced_limit, extra_deriv_dirs=()): + return self.stresslet.apply_single_and_double_layer(density_vec_sym, + [0]*self.dim, [0]*self.dim, qbx_forced_limit, 1, 0, + extra_deriv_dirs) + +# }}} + + +# {{{ Kelvin operator + +class ElasticityOperator: + """ + .. automethod:: __init__ + .. automethod:: get_density_var + .. automethod:: operator + """ + + def __init__( + self, + dim: int, + mu: ExpressionT = _MU_SYM_DEFAULT, + nu: ExpressionT = _NU_SYM_DEFAULT, + method: Method = Method.naive): + + self.dim = dim + self.mu = mu + self.nu = nu + self.method = method + + def get_density_var(self, name="sigma"): + """ + :returns: a (potentially) modified right-hand side *b* that matches + requirements of the representation. + """ + return sym.make_sym_vector(name, self.dim) + + @abstractmethod + def operator(self, sigma): + """ + :returns: the integral operator that should be solved to obtain the + density *sigma*. + """ + raise NotImplementedError + + +class KelvinOperator(ElasticityOperator): + """Representation for free space Green's function for elasticity commonly + known as the Kelvin solution [1] given by Lord Kelvin. + [1] Gimbutas, Z., & Greengard, L. (2016). A fast multipole method for the + evaluation of elastostatic fields in a half-space with zero normal stress. + Advances in Computational Mathematics, 42(1), 175-198. + .. automethod:: __init__ + .. automethod:: operator + """ + + def __init__( + self, + mu: ExpressionT = _MU_SYM_DEFAULT, + nu: ExpressionT = _NU_SYM_DEFAULT, + method: Method = Method.naive) -> ElasticityWrapperBase: + + dim = 3 + super().__init__(dim=dim, method=method, mu=mu, nu=nu) + self.double_layer_op = make_elasticity_double_layer_wrapper( + dim=dim, mu=mu, nu=nu, method=method) + self.single_layer_op = make_elasticity_wrapper( + dim=dim, mu=mu, nu=nu, method=method) + self.laplace_kernel = LaplaceKernel(3) + + def operator(self, sigma, *, normal, qbx_forced_limit="avg"): + return self.double_layer_op.apply(sigma, normal, + qbx_forced_limit=qbx_forced_limit) + +# }}} + + +# {{{ Mindlin operator + +class MindlinOperator(ElasticityOperator): + """Representation for elasticity in a half-space with zero normal stress which + is based on Mindlin's explicit solution. See [1] and [2]. + [1] Mindlin, R. D. (1936). Force at a point in the interior of a semi‐infinite + solid. Physics, 7(5), 195-202. + [2] Gimbutas, Z., & Greengard, L. (2016). A fast multipole method for the + evaluation of elastostatic fields in a half-space with zero normal stress. + Advances in Computational Mathematics, 42(1), 175-198. + .. automethod:: __init__ + .. automethod:: operator + .. automethod:: free_space_operator + .. automethod:: get_density_var + """ + + def __init__(self, *, + method: Method = Method.biharmonic, + mu: ExpressionT = _MU_SYM_DEFAULT, + nu: ExpressionT = _NU_SYM_DEFAULT, + line_of_compression_tol: float = 0.0): + + super().__init__(dim=3, method=method, mu=mu, nu=nu) + self.free_space_op = KelvinOperator(method=method, mu=mu, + nu=nu) + self.modified_free_space_op = KelvinOperator( + method=method, mu=mu + 4*nu, nu=-nu) + self.compression_knl = LineOfCompressionKernel(3, 2, mu, nu) + + def K(self, sigma, normal, qbx_forced_limit): + return merge_int_g_exprs(self.free_space_op.double_layer_op.apply( + sigma, normal, qbx_forced_limit=qbx_forced_limit)) + + def A(self, sigma, normal, qbx_forced_limit): + result = -self.modified_free_space_op.double_layer_op.apply( + sigma, normal, qbx_forced_limit=qbx_forced_limit) + + new_density = sum(a*b for a, b in zip(sigma, normal)) + int_g = sym.S(self.free_space_op.laplace_kernel, new_density, + qbx_forced_limit=qbx_forced_limit) + + for i in range(3): + temp = 2*int_g.copy( + target_kernel=AxisTargetDerivative(i, int_g.target_kernel)) + if i == 2: + temp *= -1 + result[i] += temp + return result + + def B(self, sigma, normal, qbx_forced_limit): + sym_expr = np.zeros((3,), dtype=object) + mu = self.mu + nu = self.nu + lam = 2*nu*mu/(1-2*nu) + sigma_normal_product = sum(a*b for a, b in zip(sigma, normal)) + + source_kernel_dirs = [[0, 0], [1, 1], [2, 2], [0, 1]] + densities = [ + sigma[0]*normal[0]*2*mu, + sigma[1]*normal[1]*2*mu, + -sigma[2]*normal[2]*2*mu - 2*lam*sigma_normal_product, + (sigma[0]*normal[1] + sigma[1]*normal[0])*2*mu, + ] + source_kernels = [ + AxisSourceDerivative(a, AxisSourceDerivative(b, self.compression_knl)) + for a, b in source_kernel_dirs + ] + + kwargs = {"qbx_forced_limit": qbx_forced_limit} + args = [arg.loopy_arg.name for arg in self.compression_knl.get_args()] + for arg in args: + kwargs[arg] = pymbolic.var(arg) + + int_g = sym.IntG(source_kernels=tuple(source_kernels), + target_kernel=self.compression_knl, densities=tuple(densities), + **kwargs) + + for i in range(3): + if self.method == Method.naive: + source_kernels = [AxisSourceDerivative(i, knl) for knl in + int_g.source_kernels] + densities = [(-1)*density for density in int_g.densities] + sym_expr[i] = int_g.copy(source_kernels=tuple(source_kernels), + densities=tuple(densities)) + else: + sym_expr[i] = int_g.copy(target_kernel=AxisTargetDerivative( + i, int_g.target_kernel)) + + return sym_expr + + def C(self, sigma, normal, qbx_forced_limit): + result = np.zeros((3,), dtype=object) + mu = self.mu + nu = self.nu + lam = 2*nu*mu/(1-2*nu) + alpha = (lam + mu)/(lam + 2*mu) + y = sym.nodes(3).as_vector() + sigma_normal_product = sum(a*b for a, b in zip(sigma, normal)) + + laplace_kernel = self.free_space_op.laplace_kernel + + # H in Gimubtas et, al. + densities = [ + (-2)*(2 - alpha)*mu*sigma[0]*normal[0], + (-2)*(2 - alpha)*mu*sigma[1]*normal[1], + (-2)*(2 - alpha)*mu*sigma[2]*normal[2], + (-2)*(2 - alpha)*mu*(sigma[0]*normal[1] + sigma[1]*normal[0]), + (+2)*(2 - alpha)*mu*(sigma[0]*normal[2] + sigma[2]*normal[0]), + (+2)*(2 - alpha)*mu*(sigma[1]*normal[2] + sigma[2]*normal[1]), + ] + source_kernel_dirs = [[0, 0], [1, 1], [2, 2], [0, 1], [0, 2], [1, 2]] + source_kernels = [ + AxisSourceDerivative(a, AxisSourceDerivative(b, laplace_kernel)) + for a, b in source_kernel_dirs + ] + H = sym.IntG(source_kernels=tuple(source_kernels), + target_kernel=laplace_kernel, densities=tuple(densities), + qbx_forced_limit=qbx_forced_limit) + + # phi_c in Gimbutas et, al. + densities = [ + -2*alpha*mu*y[2]*sigma[0]*normal[0], + -2*alpha*mu*y[2]*sigma[1]*normal[1], + -2*alpha*mu*y[2]*sigma[2]*normal[2], + -2*alpha*mu*y[2]*(sigma[0]*normal[1] + sigma[1]*normal[0]), + +2*alpha*mu*y[2]*(sigma[0]*normal[2] + sigma[2]*normal[0]), + +2*alpha*mu*y[2]*(sigma[1]*normal[2] + sigma[2]*normal[1]), + ] + source_kernel_dirs = [[0, 0], [1, 1], [2, 2], [0, 1], [0, 2], [1, 2]] + source_kernels = [ + AxisSourceDerivative(a, AxisSourceDerivative(b, laplace_kernel)) + for a, b in source_kernel_dirs + ] + + # G in Gimbutas et, al. + densities.extend([ + (2*alpha - 2)*y[2]*mu*(sigma[0]*normal[2] + sigma[2]*normal[0]), + (2*alpha - 2)*y[2]*mu*(sigma[1]*normal[2] + sigma[2]*normal[1]), + (2*alpha - 2)*y[2]*(mu*-2*sigma[2]*normal[2] + - lam*sigma_normal_product), + ]) + source_kernels.extend( + [AxisSourceDerivative(i, laplace_kernel) for i in range(3)]) + + int_g = sym.IntG(source_kernels=tuple(source_kernels), + target_kernel=laplace_kernel, densities=tuple(densities), + qbx_forced_limit=qbx_forced_limit) + + for i in range(3): + if self.method == Method.naive: + source_kernels = [AxisSourceDerivative(i, knl) for knl in + int_g.source_kernels] + densities = [(-1)*density for density in int_g.densities] + + if i == 2: + # Target derivative w.r.t x[2] is flipped due to target image + densities[2] *= -1 + # Subtract H + source_kernels = source_kernels + list(H.source_kernels) + densities = densities + [(-1)*d for d in H.densities] + + result[i] = int_g.copy(source_kernels=tuple(source_kernels), + densities=tuple(densities)) + else: + result[i] = int_g.copy(target_kernel=AxisTargetDerivative( + i, int_g.target_kernel)) + if i == 2: + # Target derivative w.r.t x[2] is flipped due to target image + result[2] *= -1 + # Subtract H + result[2] -= H + + return result + + def free_space_operator(self, sigma, *, normal, qbx_forced_limit="avg"): + return self.free_space_op.operator(sigma=sigma, normal=normal, + qbx_forced_limit=qbx_forced_limit) + + def operator(self, sigma, *, normal, qbx_forced_limit="avg"): + resultA = self.A(sigma, normal=normal, qbx_forced_limit=qbx_forced_limit) + resultC = self.C(sigma, normal=normal, qbx_forced_limit=qbx_forced_limit) + resultB = self.B(sigma, normal=normal, qbx_forced_limit=qbx_forced_limit) + + if self.method == Method.biharmonic: + # A and C are both derivatives of Biharmonic Green's function + # TODO: make merge_int_g_exprs smart enough to merge two different + # kernels into two separate IntGs. + result = rewrite_using_base_kernel(resultA + resultC, + base_kernel=self.free_space_op.double_layer_op.base_kernel) + result += rewrite_using_base_kernel(resultB, + base_kernel=self.compression_knl) + return result + else: + return resultA + resultB + resultC + + def get_density_var(self, name="sigma"): + """ + :returns: a symbolic vector corresponding to the density. + """ + return sym.make_sym_vector(name, 3) + +# }}} diff --git a/pytential/symbolic/execution.py b/pytential/symbolic/execution.py index 8ece3dceb..8d0253687 100644 --- a/pytential/symbolic/execution.py +++ b/pytential/symbolic/execution.py @@ -870,10 +870,12 @@ def __call__(self, *args, **kwargs): raise TypeError("More than one positional argument supplied. " "None or an PyOpenCLArrayContext expected.") - return self.eval(kwargs, array_context=array_context) + timing_data = kwargs.pop("timing_data", None) + return self.eval(kwargs, array_context=array_context, + timing_data=timing_data) -def bind(places, expr, auto_where=None): +def bind(places, expr, auto_where=None, _merge_exprs=True): """ :arg places: a :class:`pytential.collection.GeometryCollection`. Alternatively, any list or mapping that is a valid argument for its @@ -895,6 +897,19 @@ def bind(places, expr, auto_where=None): auto_where = places.auto_where expr = _prepare_expr(places, expr, auto_where=auto_where) + + if _merge_exprs: + from pytential.symbolic.pde.systems import merge_int_g_exprs + from pymbolic.primitives import Expression + from pytential.qbx import QBXLayerPotentialSource + fmmlib = any(value.fmm_backend == "fmmlib" for value + in places.places.values() if isinstance(value, QBXLayerPotentialSource)) + if not fmmlib: + if isinstance(expr, (np.ndarray, list, tuple)): + expr = np.array(merge_int_g_exprs(list(expr)), dtype=object) + elif isinstance(expr, Expression): + expr = merge_int_g_exprs([expr])[0] + return BoundExpression(places, expr) # }}} diff --git a/pytential/symbolic/pde/systems/__init__.py b/pytential/symbolic/pde/systems/__init__.py new file mode 100644 index 000000000..a3239e697 --- /dev/null +++ b/pytential/symbolic/pde/systems/__init__.py @@ -0,0 +1,32 @@ +__copyright__ = "Copyright (C) 2023 Isuru Fernando" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +from .deriv import rewrite_using_base_kernel, get_deriv_relation +from .merge import merge_int_g_exprs +from .reduce import reduce_number_of_fmms + +__all__ = ( + "rewrite_using_base_kernel", + "get_deriv_relation", + "merge_int_g_exprs", + "reduce_number_of_fmms", + ) diff --git a/pytential/symbolic/pde/systems/deriv.py b/pytential/symbolic/pde/systems/deriv.py new file mode 100644 index 000000000..d28eca855 --- /dev/null +++ b/pytential/symbolic/pde/systems/deriv.py @@ -0,0 +1,605 @@ +__copyright__ = "Copyright (C) 2020 Isuru Fernando" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import numpy as np + +import sumpy.symbolic as sym +import pymbolic +from sumpy.kernel import (AxisTargetDerivative, AxisSourceDerivative, + ExpressionKernel, KernelWrapper, TargetPointMultiplier, Kernel, + DirectionalSourceDerivative) +from pytools import (memoize_on_first_arg, + generate_nonnegative_integer_tuples_summing_to_at_most + as gnitstam) + +from pytential.symbolic.primitives import (NodeCoordinateComponent, + hashable_kernel_args, IntG, DEFAULT_SOURCE) +from pytential.symbolic.mappers import IdentityMapper +from pytential.utils import chop, solve_from_lu +import pytential + +from typing import List, Mapping, Text, Any, Union, Tuple, Optional, Sequence +from pytential.symbolic.typing import ExpressionT + +import warnings +from dataclasses import dataclass + +import logging +logger = logging.getLogger(__name__) + +__all__ = ( + "rewrite_using_base_kernel", + "get_deriv_relation", + ) + +__doc__ = """ +.. autofunction:: rewrite_using_base_kernel +.. autofunction:: get_deriv_relation +.. autoclass:: DerivRelation +""" + + +# {{{ rewrite_using_base_kernel + +_NO_ARG_SENTINEL = object() + + +def rewrite_using_base_kernel(exprs: Sequence[ExpressionT], + base_kernel: Kernel = _NO_ARG_SENTINEL) -> List[ExpressionT]: + """ + Rewrites a list of expressions with :class:`~pytential.symbolic.primitives.IntG` + objects using *base_kernel*. Assumes that potentials are smooth, i.e. that + Schwarz's theorem holds. If applied to on-surface evaluation, then the layer + potentials to which this is applied must be one-sided limits, and the potential + must be non-singular (as might occur due to corners). + + For example, if *base_kernel* is the biharmonic kernel, and a Laplace kernel + is encountered, this will (forcibly) rewrite the Laplace kernel in terms of + derivatives of the Biharmonic kernel. + + If *base_kernel* is *None*, the expression list is returned as is. + If *base_kernel* is not given, the method will try to find a base kernel. + This is currently not implemented and will raise a ``NotImplementedError``. + + The routine will fail with a ``RuntimeError`` when *base_kernel* is given, but + method fails to find a way to rewrite. + """ + if base_kernel is None: + return list(exprs) + if base_kernel == _NO_ARG_SENTINEL: + raise NotImplementedError + mapper = RewriteUsingBaseKernelMapper(base_kernel) + return [mapper(expr) for expr in exprs] + + +class RewriteUsingBaseKernelMapper(IdentityMapper): + """Rewrites ``IntG``s using the base kernel. First this method replaces + ``IntG``s with :class:`sumpy.kernel.AxisTargetDerivative` to ``IntG``s + :class:`sumpy.kernel.AxisSourceDerivative` and + ``IntG``s with :class:`sumpy.kernel.TargetPointMultiplier` to ``IntG``s + without them using :class:`sumpy.kernel.ExpressionKernel` + and then converts them to the base kernel by finding + a relationship between the derivatives. + """ + def __init__(self, base_kernel): + self.base_kernel = base_kernel + + def map_int_g(self, expr): + # Convert IntGs with TargetPointMultiplier/AxisTargetDerivative to a sum of + # IntGs without TargetPointMultipliers + new_int_gs = convert_target_transformation_to_source(expr) + # Convert IntGs with different kernels to expressions containing + # IntGs with base_kernel or its derivatives + return sum(rewrite_int_g_using_base_kernel(new_int_g, + self.base_kernel) for new_int_g in new_int_gs) + + +def _get_sympy_kernel_expression(expr: ExpressionT, + kernel_arguments: Mapping[Text, Any]) -> sym.Basic: + """Convert a :mod:`pymbolic` expression to :mod:`sympy` expression + after substituting kernel arguments. + + For eg: `exp(I*k*r)/r` with `{k: 1}` is converted to the sympy expression + `exp(I*r)/r` + """ + from pymbolic.mapper.substitutor import substitute + from sumpy.symbolic import PymbolicToSympyMapperWithSymbols + + pymbolic_expr = substitute(expr, kernel_arguments) + + res = PymbolicToSympyMapperWithSymbols()(pymbolic_expr) + return res + + +def _monom_to_expr(monom: Sequence[int], + variables: Sequence[Union[sym.Basic, ExpressionT]]) \ + -> Union[sym.Basic, ExpressionT]: + """Convert a monomial to an expression using given variables. + + For eg: [3, 2, 1] with variables [x, y, z] is converted to x^3 y^2 z. + """ + prod = 1 + for i, nrepeats in enumerate(monom): + for _ in range(nrepeats): + prod *= variables[i] + return prod + + +def convert_target_transformation_to_source(int_g: IntG) -> List[IntG]: + """Convert an ``IntG`` with :class:`sumpy.kernel.AxisTargetDerivative` + or :class:`sumpy.kernel.TargetPointMultiplier` to a list + of ``IntG`` s without them and only source dependent transformations. + The sum of the list returned is equivalent to the input *int_g*. + + For example:: + + IntG(d/dx r, sigma) -> [IntG(d/dy r, -sigma)] + IntG(x*r, sigma) -> [IntG(r, sigma*y), IntG(r*(x -y), sigma)] + """ + import sympy + from pymbolic.interop.sympy import SympyToPymbolicMapper + conv = SympyToPymbolicMapper() + + knl = int_g.target_kernel + if not knl.is_translation_invariant: + warnings.warn(f"Translation variant kernel ({knl}) found.") + return [int_g] + + # we use a symbol for d = (x - y) + ds = sympy.symbols(f"d0:{knl.dim}") + sources = sympy.symbols(f"y0:{knl.dim}") + # instead of just x, we use x = (d + y) + targets = [d + source for d, source in zip(ds, sources)] + orig_expr = sympy.Function("f")(*ds) # pylint: disable=not-callable + expr = orig_expr + found = False + while isinstance(knl, KernelWrapper): + if isinstance(knl, TargetPointMultiplier): + expr = targets[knl.axis] * expr + found = True + elif isinstance(knl, AxisTargetDerivative): + # sympy can't differentiate w.r.t target because + # it's not a symbol, but d/d(x) = d/d(d) + expr = expr.diff(ds[knl.axis]) + found = True + else: + warnings.warn(f"Unknown target kernel ({knl}) found. " + "Returning IntG expression unchanged.") + return [int_g] + knl = knl.inner_kernel + + if not found: + return [int_g] + + int_g = int_g.copy(target_kernel=knl) + + sources_pymbolic = [NodeCoordinateComponent(i) for i in range(knl.dim)] + expr = expr.expand() + # Now the expr is an Add and looks like + # u_{d[0], d[1]}(d, y)*d[0]*y[1] + u(d, y) * d[1] + # or a single term like u(d, y) * d[1] + if isinstance(expr, sympy.Add): + kernel_terms = expr.args + else: + kernel_terms = [expr] + + result = [] + for kernel_term in kernel_terms: + deriv_factors = kernel_term.atoms(sympy.Derivative) + if len(deriv_factors) == 1: + # for eg: if kernel_terms is u_{d[0], d[1]}(d, y) * d[0] * y[1] + # deriv_factor is u_{d[0], d[1]} + (deriv_factor,) = deriv_factors + # eg: remaining_factors is d[0] * y[1] + remaining_factors = sympy.Poly(kernel_term.xreplace( + {deriv_factor: 1}), *ds, *sources) + # eg: derivatives is (d[0], 1), (d[1], 1) + derivatives = deriv_factor.args[1:] + elif len(deriv_factors) == 0: + # for eg: we have a term like u(d, y) * d[1] + # remaining_factors = d[1] + remaining_factors = sympy.Poly(kernel_term.xreplace( + {orig_expr: 1}), *ds, *sources) + derivatives = [] + else: + raise AssertionError("impossible condition") + + # apply the derivatives + new_source_kernels = [] + for source_kernel in int_g.source_kernels: + knl = source_kernel + for axis_var, nrepeats in derivatives: + axis = ds.index(axis_var) + for _ in range(nrepeats): + knl = AxisSourceDerivative(axis, knl) + new_source_kernels.append(knl) + new_int_g = int_g.copy(source_kernels=new_source_kernels) + + (monom, coeff,) = remaining_factors.terms()[0] + # Now from d[0]*y[1], we separate the two terms + # d terms end up in the expression and y terms end up in the density + d_terms, y_terms = monom[:len(ds)], monom[len(ds):] + expr_multiplier = _monom_to_expr(d_terms, ds) + density_multiplier = _monom_to_expr(y_terms, sources_pymbolic) \ + * conv(coeff) + # since d/d(d) = - d/d(y), we multiply by -1 to get source derivatives + density_multiplier *= (-1)**int(sum(nrepeats for _, nrepeats in derivatives)) + new_int_gs = _multiply_int_g(new_int_g, sym.sympify(expr_multiplier), + density_multiplier) + result.extend(new_int_gs) + return result + + +def _multiply_int_g(int_g: IntG, expr_multiplier: sym.Basic, + density_multiplier: ExpressionT) -> List[IntG]: + """Multiply the expression in ``IntG`` with the *expr_multiplier* + which is a symbolic (:mod:`sympy` or :mod:`symengine`) expression and + multiply the densities with *density_multiplier* which is a :mod:`pymbolic` + expression. + """ + result = [] + + base_kernel = int_g.target_kernel.get_base_kernel() + sym_d = sym.make_sym_vector("d", base_kernel.dim) + base_kernel_expr = _get_sympy_kernel_expression(base_kernel.expression, + int_g.kernel_arguments) + subst = {pymbolic.var(f"d{i}"): pymbolic.var("d")[i] for i in + range(base_kernel.dim)} + conv = sym.SympyToPymbolicMapper() + + if expr_multiplier == 1: + # if there's no expr_multiplier, only multiply the densities + return [int_g.copy(densities=tuple(density*density_multiplier + for density in int_g.densities))] + + for knl, density in zip(int_g.source_kernels, int_g.densities): + if expr_multiplier == 1: + new_knl = knl.get_base_kernel() + else: + new_expr = conv(knl.postprocess_at_source(base_kernel_expr, sym_d) + * expr_multiplier) + new_expr = pymbolic.substitute(new_expr, subst) + new_knl = ExpressionKernel(knl.dim, new_expr, + knl.get_base_kernel().global_scaling_const, + knl.is_complex_valued) + result.append(int_g.copy(target_kernel=new_knl, + densities=(density*density_multiplier,), + source_kernels=(new_knl,))) + return result + + +def rewrite_int_g_using_base_kernel(int_g: IntG, base_kernel: ExpressionKernel) \ + -> ExpressionT: + """Rewrite an *IntG* to an expression with *IntG*s having the + base kernel *base_kernel*. + """ + result = 0 + for knl, density in zip(int_g.source_kernels, int_g.densities): + result += _rewrite_int_g_using_base_kernel( + int_g.copy(source_kernels=(knl,), densities=(density,)), + base_kernel) + return result + + +def _rewrite_int_g_using_base_kernel(int_g: IntG, base_kernel: ExpressionKernel) \ + -> ExpressionT: + r"""Rewrites an ``IntG`` with only one source kernel to an expression with + ``IntG``\ s having the base kernel *base_kernel*. + """ + target_kernel = int_g.target_kernel.replace_base_kernel(base_kernel) + dim = target_kernel.dim + + result = 0 + + density, = int_g.densities + source_kernel, = int_g.source_kernels + deriv_relation = get_deriv_relation_kernel(source_kernel.get_base_kernel(), + base_kernel, hashable_kernel_arguments=( + hashable_kernel_args(int_g.kernel_arguments))) + + const = deriv_relation.const + # NOTE: we set a dofdesc here to force the evaluation of this integral + # on the source instead of the target when using automatic tagging + # see :meth:`pytential.symbolic.mappers.LocationTagger._default_dofdesc` + if int_g.source.geometry is None: + dd = int_g.source.copy(geometry=DEFAULT_SOURCE) + else: + dd = int_g.source + const *= pytential.sym.integral(dim, dim-1, density, dofdesc=dd) + + if const != 0 and target_kernel != target_kernel.get_base_kernel(): + # There might be some TargetPointMultipliers hanging around. + # FIXME: handle them instead of bailing out + return int_g + + if const != 0 and source_kernel != source_kernel.get_base_kernel(): + # We only handle the case where any source transformation is a derivative + # and the constant when applied becomes zero. We bail out if not + knl = source_kernel + while isinstance(knl, KernelWrapper): + if not isinstance(knl, + (AxisSourceDerivative, DirectionalSourceDerivative)): + return int_g + knl = knl.inner_kernel + const = 0 + + result += const + + new_kernel_args = filter_kernel_arguments([base_kernel], + int_g.kernel_arguments) + + for mi, c in deriv_relation.linear_combination: + knl = source_kernel.replace_base_kernel(base_kernel) + for d, val in enumerate(mi): + for _ in range(val): + knl = AxisSourceDerivative(d, knl) + c *= -1 + result += int_g.copy(source_kernels=(knl,), target_kernel=target_kernel, + densities=(density * c,), kernel_arguments=new_kernel_args) + return result + + +@dataclass +class DerivRelation: + """A class to hold the relationship between a kernel and a base kernel. + *linear_combination* is a list of pairs of (mi, coeff). + The relation is given by, + `kernel = const + sum(deriv(base_kernel, mi) * coeff)` + """ + const: ExpressionT + linear_combination: Sequence[Tuple[Tuple[int, ...], ExpressionT]] + + +def get_deriv_relation(kernels: Sequence[ExpressionKernel], + base_kernel: ExpressionKernel, + kernel_arguments: Mapping[Text, Any], + tol: float = 1e-10, + order: Optional[int] = None) \ + -> List[DerivRelation]: + r""" + Given a sequence of *kernels*, a *base_kernel* and an *order*, this + gives a relation between the *base_kernel* and each of the *kernels*. + For each kernel in *kernels* we have that the kernel is equal to the + linear combination of derivatives of *base_kernel* up to the order + *order* and a constant. i.e., + + kernel = \sum_{m \in M(order)} \partial^m baseKernel \partial x^m + + const. + + This is done by sampling the baseKernel and its derivatives at random + points to get a matrix ``A``, then sampling the kernel at the same + points to get a matrix ``b`` and solving for the system ``Ax = b`` using + an LU factorization of ``A``. The solution ``x`` is the vector of weights + in the linear combination. To represent a constant in the relation we + add a column of ones into ``A``. + + When *order* is not given, the algorithm starts with one and increases + the order upto the order of the PDE satisfied by the *base_kernel* until + a relation is found. + """ + res = [] + for knl in kernels: + res.append(get_deriv_relation_kernel(knl, base_kernel, + hashable_kernel_arguments=hashable_kernel_args(kernel_arguments), + tol=tol, order=order)) + return res + + +@memoize_on_first_arg +def get_deriv_relation_kernel(kernel: ExpressionKernel, + base_kernel: ExpressionKernel, + hashable_kernel_arguments: Tuple[Tuple[Text, Any], ...], + tol: float = 1e-10, + order: Optional[int] = None) \ + -> DerivRelation: + """Takes a *kernel* and a base_kernel* as input and re-writes the + *kernel* as a linear combination of derivatives of *base_kernel* up-to + order *order* and a constant. *tol* is an upper limit for small numbers that + are replaced with zero in the numerical procedure. + + :returns: the constant and a list of (multi-index, coeff) to represent the + linear combination of derivatives as a *DerivRelation* object. + """ + kernel_arguments = dict(hashable_kernel_arguments) + lu, rand, mis = _get_base_kernel_matrix_lu_factorization( + base_kernel, + order=order, + hashable_kernel_arguments=hashable_kernel_arguments) + dim = base_kernel.dim + sym_vec = sym.make_sym_vector("d", dim) + sympy_conv = sym.SympyToPymbolicMapper() + + expr = _get_sympy_kernel_expression(kernel.expression, kernel_arguments) + vec = [] + for i in range(len(mis)): + vec.append(evalf(expr.xreplace(dict(zip(sym_vec, rand[:, i]))))) + vec = sym.Matrix(vec) + result = [] + const = 0 + logger.debug("%s = ", kernel) + + sol = solve_from_lu(lu.L, lu.U, lu.perm, vec, lambda expr: expr.expand()) + for i, coeff in enumerate(sol): + coeff = chop(coeff, tol) + if coeff == 0: + continue + if mis[i] != (-1,)*dim: + coeff *= _get_sympy_kernel_expression(kernel.global_scaling_const, + kernel_arguments) + coeff /= _get_sympy_kernel_expression(base_kernel.global_scaling_const, + kernel_arguments) + result.append((mis[i], sympy_conv(coeff))) + logger.debug(" + %s.diff(%s)*%s", base_kernel, mis[i], coeff) + else: + const = sympy_conv(coeff * _get_sympy_kernel_expression( + kernel.global_scaling_const, kernel_arguments)) + logger.debug(" + %s", const) + return DerivRelation(const, result) + + +@dataclass +class LUFactorization: + L: sym.Matrix + U: sym.Matrix + perm: Sequence[Tuple[int, int]] + + +@memoize_on_first_arg +def _get_base_kernel_matrix_lu_factorization(base_kernel: ExpressionKernel, + hashable_kernel_arguments: Tuple[Tuple[Text, Any], ...], + order: Optional[int] = None, retries: int = 3) \ + -> Tuple[LUFactorization, np.ndarray, List[Tuple[int, ...]]]: + """ + Takes a *base_kernel* and samples the kernel and its derivatives upto + order *order*. + + :returns: a tuple with the LU factorization of the sampled matrix, + the sampled points, and the multi-indices corresponding to the + derivatives represented by the rows of the matrix. + """ + dim = base_kernel.dim + + pde = base_kernel.get_pde_as_diff_op() + if order is None: + order = pde.order + + if order > pde.order: + raise NotImplementedError("Computing derivative relation when " + "the base kernel's derivatives are linearly dependent has not" + "been implemented yet.") + + mis = sorted(gnitstam(order, dim), key=sum) + # (-1, -1, -1) represents a constant + # ((0,0,0) would be "function with no derivatives") + mis.append((-1,)*dim) + + if order == pde.order: + pde_mis = [ident.mi for eq in pde.eqs for ident in eq.keys()] + pde_mis = [mi for mi in pde_mis if sum(mi) == order] + logger.debug(f"Removing {pde_mis[-1]} to avoid linear dependent mis") + mis.remove(pde_mis[-1]) + + rand = np.random.randint(1, 10**15, (dim, len(mis))) + rand = rand.astype(object) + for i in range(rand.shape[0]): + for j in range(rand.shape[1]): + rand[i, j] = sym.sympify(rand[i, j])/10**15 + sym_vec = sym.make_sym_vector("d", dim) + + base_expr = _get_sympy_kernel_expression(base_kernel.expression, + dict(hashable_kernel_arguments)) + + mat = [] + for rand_vec_idx in range(rand.shape[1]): + row = [] + for mi in mis[:-1]: + expr = base_expr + for var_idx, nderivs in enumerate(mi): + if nderivs == 0: + continue + expr = expr.diff(sym_vec[var_idx], nderivs) + replace_dict = dict(zip(sym_vec, rand[:, rand_vec_idx])) + eval_expr = evalf(expr.xreplace(replace_dict)) + row.append(eval_expr) + row.append(1) + mat.append(row) + + mat = sym.Matrix(mat) + failed = False + try: + L, U, perm = mat.LUdecomposition() + except RuntimeError: + # symengine throws an error when rank deficient + # and sympy returns U with last row zero + failed = True + + if not sym.USE_SYMENGINE and all(expr == 0 for expr in U[-1, :]): + failed = True + + if failed: + if retries == 0: + # The derivatives of the base kernel are not linearly + # independent. + # TODO: Extract a linearly independent set and return them + raise NotImplementedError("Computing derivative relation when " + "the base kernel's derivatives are linearly dependent has not " + "been implemented yet.") + return _get_base_kernel_matrix_lu_factorization( + base_kernel=base_kernel, + hashable_kernel_arguments=hashable_kernel_arguments, + order=order, + retries=retries-1, + ) + + return LUFactorization(L, U, perm), rand, mis + + +def evalf(expr, prec=100): + """evaluate an expression numerically using ``prec`` + number of bits. + """ + from sumpy.symbolic import USE_SYMENGINE + if USE_SYMENGINE: + return expr.n(prec=prec) + else: + import sympy + dps = int(sympy.log(2**prec, 10)) + return expr.n(n=dps) + + +def filter_kernel_arguments(knls, kernel_arguments): + """From a dictionary of kernel arguments, filter out arguments + that are not needed for the kernels given as a list and return a new + dictionary. + """ + kernel_arg_names = set() + + for kernel in knls: + for karg in (kernel.get_args() + kernel.get_source_args()): + kernel_arg_names.add(karg.loopy_arg.name) + + return {k: v for (k, v) in kernel_arguments.items() if k in kernel_arg_names} + + +# }}} + + +if __name__ == "__main__": + logging.basicConfig(level=logging.DEBUG) + from sumpy.kernel import (StokesletKernel, BiharmonicKernel, # noqa:F401 + StressletKernel, ElasticityKernel, LaplaceKernel) + base_kernel = BiharmonicKernel(2) + #base_kernel = LaplaceKernel(3) + kernels = [StokesletKernel(2, 0, 1), StokesletKernel(2, 0, 0)] + kernels += [StressletKernel(2, 0, 0, 0), StressletKernel(2, 0, 0, 1), + StressletKernel(2, 0, 0, 1), StressletKernel(2, 0, 1, 1)] + get_deriv_relation(kernels, base_kernel, tol=1e-10, order=4, kernel_arguments={}) + + base_kernel = BiharmonicKernel(3) + sym_d = sym.make_sym_vector("d", base_kernel.dim) + sym_r = sym.sqrt(sum(a**2 for a in sym_d)) + conv = sym.SympyToPymbolicMapper() + expression_knl = ExpressionKernel(3, conv(sym_d[0]*sym_d[1]/sym_r**3), 1, False) + expression_knl2 = ExpressionKernel(3, conv(1/sym_r + sym_d[0]*sym_d[0]/sym_r**3), + 1, False) + kernels = [expression_knl, expression_knl2] + get_deriv_relation(kernels, base_kernel, tol=1e-10, order=4, kernel_arguments={}) diff --git a/pytential/symbolic/pde/systems/merge.py b/pytential/symbolic/pde/systems/merge.py new file mode 100644 index 000000000..0f8ec84d8 --- /dev/null +++ b/pytential/symbolic/pde/systems/merge.py @@ -0,0 +1,522 @@ +__copyright__ = "Copyright (C) 2020 Isuru Fernando" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +import numpy as np + +from pymbolic.mapper.coefficient import CoefficientCollector +from pymbolic.geometric_algebra.mapper import WalkMapper +from pymbolic.mapper import CombineMapper + +from sumpy.kernel import (AxisTargetDerivative, AxisSourceDerivative, + KernelWrapper, TargetPointMultiplier, + DirectionalSourceDerivative, DirectionalDerivative) + +from pytential.symbolic.primitives import ( + hashable_kernel_args, hashable_kernel_arg_value) +from pytential.symbolic.mappers import IdentityMapper +from .reduce import reduce_number_of_fmms + +from collections import defaultdict + +import logging +logger = logging.getLogger(__name__) + +__all__ = ( + "merge_int_g_exprs", +) + +__doc__ = """ +.. autofunction:: merge_int_g_exprs +""" + + +# {{{ merge_int_g_exprs + +def merge_int_g_exprs(exprs, source_dependent_variables=None): + """ + Merge expressions involving :class:`~pytential.symbolic.primitives.IntG` + objects. + Several techniques are used for merging and reducing number of FMMs: + + * :class:`sumpy.kernel.AxisTargetDerivative` instances are converted + to :class:`sumpy.kernel.AxisSourceDerivative` instances. + (by flipping signs, assuming translation-invariance). + Target derivatives will be brought back by the syzygy module + construction below if beneficial. + (For example, `D + d/dx(S)` can be re-written as `D - d/dy(S)` which can be + done in one FMM) + * If there is a sum of two *IntG* s with same target derivative and different + source derivatives of the same kernel, they are merged into one FMM. + * Reduce the number of FMMs by converting the *IntG* expression to + a matrix and factoring the matrix where the left operand matrix represents + a transformation at target and the right matrix represents a transformation + at source. For this to work, we need to know which variables depend on + source so that they do not end up in the left operand. User needs to supply + this as the argument *source_dependent_variable*. This is done by the + call to :func:`pytential.symbolic.pde.systems.reduce.reduce_number_of_fmms`. + + :arg base_kernel: A :class:`sumpy.kernel.Kernel` object if given will be used + for converting a :class:`~pytential.symbolic.primitives.IntG` to a linear + expression of same type with the kernel replaced by base_kernel and its + derivatives + :arg source_dependent_variable: When merging expressions, consider only these + variables as dependent on source. This is important when reducing the + number of FMMs needed for the output. + """ + # Using a dictionary instead of a set because sets are unordered + all_source_group_identifiers = {} + + result = np.array([0 for _ in exprs], dtype=object) + + int_g_cc = IntGCoefficientCollector() + int_gs_by_source_group = defaultdict(list) + + def add_int_gs_in_expr(expr): + for int_g in get_int_g_s([expr]): + source_group_identifier = get_int_g_source_group_identifier(int_g) + int_gs_by_source_group[source_group_identifier].append(int_g) + for density in int_g.densities: + add_int_gs_in_expr(density) + + for i, expr in enumerate(exprs): + int_gs_by_group = {} + try: + int_g_coeff_map = int_g_cc(expr) + except (RuntimeError, AssertionError): + # Don't touch this expression, because it's not linear. + # FIXME: if there's ever any use case, then we can extract + # some IntGs from them. + logger.debug("%s is not linear", expr) + result[i] += expr + add_int_gs_in_expr(expr) + continue + for int_g, coeff in int_g_coeff_map.items(): + if int_g == 1: + # coeff map may have some constant terms, add them to + result[i] += coeff + continue + + # convert DirectionalSourceDerivative to AxisSourceDerivative + # as kernel arguments need to be the same for merging + int_g = convert_directional_source_to_axis_source(int_g) + # convert TargetDerivative to source before checking the group + # as the target kernel has to be the same for merging + int_g = convert_target_deriv_to_source(int_g) + if not is_expr_target_dependent(coeff): + # move the coefficient inside + int_g = int_g.copy(densities=[density*coeff for density in + int_g.densities]) + coeff = 1 + + source_group_identifier = get_int_g_source_group_identifier(int_g) + target_group_identifier = get_int_g_target_group_identifier(int_g) + group = (source_group_identifier, target_group_identifier, coeff) + + all_source_group_identifiers[source_group_identifier] = 1 + + if group not in int_gs_by_group: + new_int_g = int_g + else: + prev_int_g = int_gs_by_group[group] + # Let's merge IntGs with the same group + new_int_g = merge_sum_of_two_int_gs(int_g, prev_int_g) + int_gs_by_group[group] = new_int_g + + # Do some simplifications after merging. Not stricty necessary + for (_, _, coeff), int_g in int_gs_by_group.items(): + # replace an IntG with d axis source derivatives to an IntG + # with one directional source derivative + # TODO: reenable this later + # result_int_g = convert_axis_source_to_directional_source(int_g) + # simplify the densities as they may become large due to pymbolic + # not doing automatic simplifications unlike sympy/symengine + result_int_g = int_g.copy( + densities=simplify_densities(int_g.densities)) + result[i] += result_int_g * coeff + add_int_gs_in_expr(result_int_g) + + # No IntGs found + if all(not int_gs for int_gs in int_gs_by_source_group): + return exprs + + # Do the calculation for each source_group_identifier separately + # and assemble them + replacements = {} + for int_gs in int_gs_by_source_group.values(): + # For each output, we now have a sum of int_gs with + # different target attributes. + # for eg: {+}S + {-}D (where {x} is the QBX limit). + # We can't merge them together, because merging implies + # that everything happens at the source level and therefore + # require same target attributes. + # + # To handle this case, we can treat them separately as in + # different source base kernels, but that would imply more + # FMMs than necessary. + # + # Take the following example, + # + # [ {+}(S + D), {-}S + {avg}D, {avg}S + {-}D] + # + # If we treated the target attributes separately, then we + # will be reducing [{+}(S + D), 0, 0], [0, {-}S, {-}D], + # [0, {avg}D, {avg}S] separately which results in + # [{+}(S + D)], [{-}S, {-}D], [{avg}S, {avg}D] as + # the reduced FMMs and pytential will calculate + # [S + D, S, D] as three separate FMMs and then assemble + # the three outputs by applying target attributes. + # + # Instead, we can do S, D as two separate FMMs and get the + # result for all three outputs. To do that, we will first + # get all five expressions in the example + # [ {+}(S + D), {-}S, {avg}D, {avg}S, {-}D] + # and then remove the target attributes to get, + # [S + D, S, D]. We will reduce these and restore the target + # attributes at the end + + targetless_int_g_mapping = defaultdict(list) + for int_g in int_gs: + common_int_g = remove_target_attributes(int_g) + targetless_int_g_mapping[common_int_g].append(int_g) + + insns_to_reduce = list(targetless_int_g_mapping.keys()) + reduced_insns = reduce_number_of_fmms(insns_to_reduce, + source_dependent_variables) + + for insn, reduced_insn in zip(insns_to_reduce, reduced_insns): + for int_g in targetless_int_g_mapping[insn]: + replacements[int_g] = restore_target_attributes(reduced_insn, int_g) + + mapper = IntGSubstitutor(replacements) + result = [mapper(expr) for expr in result] + + orig_count = get_number_of_fmms(exprs) + new_count = get_number_of_fmms(result) + if orig_count < new_count: + return exprs + + return result + + +class IntGCoefficientCollector(CoefficientCollector): + def __init__(self): + super().__init__({}) + + def map_int_g(self, expr): + return {expr: 1} + + def map_algebraic_leaf(self, expr, *args, **kwargs): + return {1: expr} + + handle_unsupported_expression = map_algebraic_leaf + + +def get_hashable_kernel_argument(arg): + if hasattr(arg, "__iter__"): + try: + return tuple(arg) + except TypeError: + pass + return arg + + +def get_normal_vector_names(kernel): + """Return the normal vector names in a kernel + """ + normal_vectors = set() + while isinstance(kernel, KernelWrapper): + if isinstance(kernel, DirectionalDerivative): + normal_vectors.add(kernel.dir_vec_name) + kernel = kernel.inner_kernel + return normal_vectors + + +def get_int_g_source_group_identifier(int_g): + """Return a identifier for a group for the *int_g* so that all elements in that + group have the same source attributes. + """ + target_arg_names = get_normal_vector_names(int_g.target_kernel) + args = {k: v for k, v in sorted( + int_g.kernel_arguments.items()) if k not in target_arg_names} + return (int_g.source, hashable_kernel_args(args), + int_g.target_kernel.get_base_kernel()) + + +def get_int_g_target_group_identifier(int_g): + """Return a identifier for a group for the *int_g* so that all elements in that + group have the same target attributes. + """ + target_arg_names = get_normal_vector_names(int_g.target_kernel) + args = {k: v for k, v in sorted( + int_g.kernel_arguments.items()) if k in target_arg_names} + return (int_g.target, int_g.qbx_forced_limit, int_g.target_kernel, + hashable_kernel_args(args)) + + +def filter_kernel_arguments(knls, kernel_arguments): + """From a dictionary of kernel arguments, filter out arguments + that are not needed for the kernels given as a list and return a new + dictionary. + """ + kernel_arg_names = set() + + for kernel in knls: + for karg in (kernel.get_args() + kernel.get_source_args()): + kernel_arg_names.add(karg.loopy_arg.name) + + return {k: v for (k, v) in kernel_arguments.items() if k in kernel_arg_names} + + +def convert_directional_source_to_axis_source(int_g): + """Convert an IntG with a DirectionalSourceDerivative instance + to an IntG with d AxisSourceDerivative instances. + """ + source_kernels = [] + densities = [] + for source_kernel, density in zip(int_g.source_kernels, int_g.densities): + knl_result = _convert_directional_source_knl_to_axis_source(source_kernel, + int_g.kernel_arguments) + for knl, coeff in knl_result: + source_kernels.append(knl) + densities.append(coeff * density) + + kernel_arguments = filter_kernel_arguments( + list(source_kernels) + [int_g.target_kernel], int_g.kernel_arguments) + return int_g.copy(source_kernels=tuple(source_kernels), + densities=tuple(densities), kernel_arguments=kernel_arguments) + + +def _convert_directional_source_knl_to_axis_source(knl, knl_arguments): + if isinstance(knl, DirectionalSourceDerivative): + dim = knl.dim + dir_vec = knl_arguments[knl.dir_vec_name] + + res = [] + inner_result = _convert_directional_source_knl_to_axis_source( + knl.inner_kernel, knl_arguments) + for inner_knl, coeff in inner_result: + for d in range(dim): + res.append((AxisSourceDerivative(d, inner_knl), coeff*dir_vec[d])) + return res + elif isinstance(knl, KernelWrapper): + inner_result = _convert_directional_source_knl_to_axis_source( + knl.inner_kernel, knl_arguments) + return [(knl.replace_inner_kernel(inner_knl), coeff) for + inner_knl, coeff in inner_result] + else: + return [(knl, 1)] + + +def convert_target_deriv_to_source(int_g): + """Converts AxisTargetDerivatives to AxisSourceDerivative instances + from an IntG. If there are outer TargetPointMultiplier transformations + they are preserved. + """ + knl = int_g.target_kernel + source_kernels = list(int_g.source_kernels) + coeff = 1 + multipliers = [] + while isinstance(knl, TargetPointMultiplier): + multipliers.append(knl.axis) + knl = knl.inner_kernel + + while isinstance(knl, AxisTargetDerivative): + coeff *= -1 + source_kernels = [AxisSourceDerivative(knl.axis, source_knl) for + source_knl in source_kernels] + knl = knl.inner_kernel + + # TargetPointMultiplier has to be the outermost kernel + # If it is the inner kernel, return early + if isinstance(knl, TargetPointMultiplier): + return int_g + + for axis in reversed(multipliers): + knl = TargetPointMultiplier(axis, knl) + + new_densities = tuple(density*coeff for density in int_g.densities) + return int_g.copy(target_kernel=knl, + densities=new_densities, + source_kernels=tuple(source_kernels)) + + +class IsExprTargetDependent(CombineMapper): + def combine(self, values): + import operator + from functools import reduce + return reduce(operator.or_, values, False) + + def map_constant(self, expr): + return False + + map_variable = map_constant + map_wildcard = map_constant + map_function_symbol = map_constant + + def map_common_subexpression(self, expr): + return self.rec(expr.child) + + def map_coordinate_component(self, expr): + return True + + def map_num_reference_derivative(self, expr): + return True + + def map_q_weight(self, expr): + return True + + +def is_expr_target_dependent(expr): + mapper = IsExprTargetDependent() + return mapper(expr) + + +def merge_kernel_arguments(x, y): + """merge two kernel argument dictionaries and raise a ValueError if + the two dictionaries do not agree for duplicate keys. + """ + res = x.copy() + for k, v in y.items(): + if k in res: + if hashable_kernel_arg_value(res[k]) \ + != hashable_kernel_arg_value(v): + raise ValueError(f"Error merging values for {k}." + f"values were {res[k]} and {v}") + else: + res[k] = v + return res + + +def merge_sum_of_two_int_gs(int_g_1, int_g_2): + kernel_arguments = merge_kernel_arguments(int_g_1.kernel_arguments, + int_g_2.kernel_arguments) + source_kernels = int_g_1.source_kernels + int_g_2.source_kernels + densities = int_g_1.densities + int_g_2.densities + + return int_g_1.copy( + source_kernels=tuple(source_kernels), + densities=tuple(densities), + kernel_arguments=kernel_arguments, + ) + + +def simplify_densities(densities): + """Simplify densities by converting to sympy and converting back + to trigger sympy's automatic simplification routines. + """ + from sumpy.symbolic import (SympyToPymbolicMapper, PymbolicToSympyMapper) + from pymbolic.mapper import UnsupportedExpressionError + to_sympy = PymbolicToSympyMapper() + to_pymbolic = SympyToPymbolicMapper() + result = [] + for density in densities: + try: + result.append(to_pymbolic(to_sympy(density))) + except (ValueError, NotImplementedError, UnsupportedExpressionError): + logger.debug("%s cannot be simplified", density) + result.append(density) + return tuple(result) + + +def remove_target_attributes(int_g): + """Remove target attributes from *int_g* and return an expression + that is common to all expression in the same source group. + """ + normals = get_normal_vector_names(int_g.target_kernel) + kernel_arguments = {k: v for k, v in int_g.kernel_arguments.items() if + k not in normals} + return int_g.copy(target=None, qbx_forced_limit=None, + target_kernel=int_g.target_kernel.get_base_kernel(), + kernel_arguments=kernel_arguments) + + +class IntGSubstitutor(IdentityMapper): + """Replaces IntGs with pymbolic expression given by the + replacements dictionary + """ + def __init__(self, replacements): + self.replacements = replacements + + def map_int_g(self, expr): + if expr in self.replacements: + new_expr = self.replacements[expr] + if new_expr != expr: + return self.rec(new_expr) + else: + expr = new_expr + + densities = [self.rec(density) for density in expr.densities] + return expr.copy(densities=tuple(densities)) + + +class GetIntGs(WalkMapper): + """A Mapper that walks expressions and collects + :class:`~pytential.symbolic.primitives.IntG` objects + """ + def __init__(self): + self.int_g_s = set() + + def map_int_g(self, expr): + self.int_g_s.add(expr) + + def map_constant(self, expr): + pass + + map_variable = map_constant + handle_unsupported_expression = map_constant + + +def get_int_g_s(exprs): + """Returns all :class:`~pytential.symbolic.primitives.IntG` objects + in a list of :mod:`pymbolic` expressions. + """ + get_int_g_mapper = GetIntGs() + [get_int_g_mapper(expr) for expr in exprs] + return get_int_g_mapper.int_g_s + + +def restore_target_attributes(expr, orig_int_g): + """Restore target attributes from *orig_int_g* to all the + :class:`~pytential.symbolic.primitives.IntG` objects in the + input *expr*. + """ + int_gs = get_int_g_s([expr]) + + replacements = { + int_g: int_g.copy(target=orig_int_g.target, + qbx_forced_limit=orig_int_g.qbx_forced_limit, + target_kernel=orig_int_g.target_kernel.replace_base_kernel( + int_g.target_kernel), + kernel_arguments=orig_int_g.kernel_arguments) + for int_g in int_gs} + + substitutor = IntGSubstitutor(replacements) + return substitutor(expr) + + +def get_number_of_fmms(exprs): + fmms = set() + for int_g in get_int_g_s(exprs): + fmms.add(remove_target_attributes(int_g)) + return len(fmms) + +# }}} diff --git a/pytential/symbolic/pde/systems/reduce.py b/pytential/symbolic/pde/systems/reduce.py new file mode 100644 index 000000000..75c352569 --- /dev/null +++ b/pytential/symbolic/pde/systems/reduce.py @@ -0,0 +1,551 @@ +__copyright__ = "Copyright (C) 2021 Isuru Fernando" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +from sumpy.kernel import (AxisTargetDerivative, AxisSourceDerivative, + KernelWrapper) + +from pymbolic.interop.sympy import PymbolicToSympyMapper, SympyToPymbolicMapper +from pymbolic.mapper import Mapper +from pymbolic.geometric_algebra.mapper import WalkMapper +from pymbolic.primitives import Product +import sympy +from collections import defaultdict +from math import prod + +import logging +logger = logging.getLogger(__name__) + + +__all__ = ( + "reduce_number_of_fmms", + ) + +__doc__ = """ +.. autofunction:: reduce_number_of_fmms +""" + + +# {{{ Reduce number of FMMs - main routine + +def reduce_number_of_fmms(int_gs, source_dependent_variables): + """ + Reduce the number of FMMs needed for a system of + :class:`~pytential.symbolic.primitives.IntG` objects. + + This is done by converting the ``IntG`` object to a matrix of polynomials + with d variables corresponding to d dimensions, where each variable represents + a (target) derivative operator along one of the axes. All the properties of + derivative operator that we want are reflected in the properties of the + polynomial including addition, multiplication and exact polynomial division. + + This matrix is factored into two matrices, where the left hand side matrix + represents a transformation at the target, and the right hand side matrix + represents a transformation at the source. + + If the expressions given are not linear, then the input expressions are + returned as is. + + :arg int_gs: list of ``IntG`` objects. + + :arg source_dependent_variables: list of :class:`pymbolic.primitives.Expression` + objects. When reducing FMMs, consider only these variables as dependent + on source. For eg: densities, source derivative vectors. + + Note: there is no argument for target-dependent variables as the algorithm + assumes that there are no target-dependent variables passed to this function. + (where a "source/target-dependent variable" is a symbolic variable that evaluates + to a vector discretized on the sources/targets) + """ + + dim = int_gs[0].target_kernel.dim + axis_vars = sympy.symbols(f"_x0:{dim}") + + # A high level driver for this function should send int_gs that share all the + # properties except for the densities, source transformations and target + # transformations. + assert _check_int_gs_common(int_gs) + + if source_dependent_variables is None: + source_dependent_variables = get_all_source_dependent_variables(int_gs) + + try: + mat, source_exprs = _create_matrix(int_gs, source_dependent_variables, + axis_vars) + except ValueError: + logger.debug("could not create matrix from %s", int_gs) + return int_gs + + mat = sympy.nsimplify(sympy.Matrix(mat)) + + # Create the quotient ring of polynomials + poly_ring = sympy.EX.old_poly_ring(*axis_vars, order=sympy.grevlex) + try: + pde = int_gs[0].target_kernel.get_base_kernel().get_pde_as_diff_op() + if len(pde.eqs) > 1: + ring = poly_ring + else: + eq = list(pde.eqs)[0] + sym_pde = sum(coeff * prod( + axis_vars[i]**ident.mi[i] for i in range(dim)) + for ident, coeff in eq.items()) + ring = poly_ring / [sym_pde] + # ring = poly_ring + except NotImplementedError: + ring = poly_ring + + # Factor the matrix into two + try: + left_factor, right_factor = factor(mat, axis_vars, ring) + except ValueError: + logger.debug("could not find a factorization for %s", mat) + return int_gs + + # If there are n inputs and m outputs, + # + # - matrix: R^{m x n}, + # - LHS: R^{m x k}, + # - RHS: R^{k x n}. + # + # If k is greater than or equal to n we are gaining nothing. + # Return as is. + if right_factor.shape[0] >= mat.shape[1]: + return int_gs + + base_kernel = int_gs[0].source_kernels[0].get_base_kernel() + base_int_g = int_gs[0].copy(target_kernel=base_kernel, + source_kernels=(base_kernel,), densities=(1,)) + + # Convert polynomials back to IntGs with source derivatives + source_int_gs = [[_convert_source_poly_to_int_g_derivs( + as_poly(expr, axis_vars), base_int_g, + axis_vars) for expr in row] for row in right_factor.tolist()] + + # For each row in the right factor, merge the IntGs to one IntG + # to get a total of k IntGs. + source_int_gs_merged = [] + for i in range(right_factor.shape[0]): + source_kernels = [] + densities = [] + for j in range(right_factor.shape[1]): + if source_int_gs[i][j] == 0: + continue + new_densities = [density * source_exprs[j] for density in + source_int_gs[i][j].densities] + source_kernels.extend(source_int_gs[i][j].source_kernels) + densities.extend(new_densities) + nonzero_intg = source_int_gs[i][j] + source_int_gs_merged.append(nonzero_intg.copy( + source_kernels=tuple(source_kernels), densities=tuple(densities))) + + # Now that we have the IntG expressions depending on the source + # we now have to attach the target dependent derivatives. + res = [0]*left_factor.shape[0] + for i in range(left_factor.shape[0]): + for j in range(left_factor.shape[1]): + res[i] += _convert_target_poly_to_int_g_derivs( + left_factor[i, j].as_poly(*axis_vars, domain=sympy.EX), + int_gs[i], source_int_gs_merged[j]) + + return res + + +def as_poly(expr, axis_vars): + res = expr.as_poly(*axis_vars, domain=sympy.EX) + if res is None: + return expr.simplify().as_poly(*axis_vars, domain=sympy.EX) + else: + return res + + +class GatherAllSourceDependentVariables(WalkMapper): + def __init__(self): + self.vars = {} + + def map_variable(self, expr): + from sumpy.symbolic import SpatialConstant + if not isinstance(expr, SpatialConstant): + self.vars[expr] = True + + def map_list(self, exprs): + for expr in exprs: + self.rec(expr) + + def map_int_g(self, expr): + self.vars[expr] = True + for density in expr.densities: + self.rec(density) + + def map_node_coordinate_component(self, expr): + self.vars[expr] = True + + map_num_reference_derivative = map_node_coordinate_component + map_interpolation = map_node_coordinate_component + + +def get_all_source_dependent_variables(int_gs): + mapper = GatherAllSourceDependentVariables() + for int_g in int_gs: + for density in int_g.densities: + mapper(density) + return list(mapper.vars.keys()) + +# }}} + + +# {{{ convert IntG expressions to a matrix + +def _check_int_gs_common(int_gs): + """Checks that the :class:`~pytential.symbolic.primtive.IntG` objects + have the same base kernel and other properties that would allow + merging them. + """ + from pytential.symbolic.pde.systems.merge import merge_kernel_arguments + + kernel_arguments = {} + base_kernel = int_gs[0].source_kernels[0].get_base_kernel() + common_int_g = int_gs[0].copy(target_kernel=base_kernel, + source_kernels=(base_kernel,), densities=(1,)) + + base_target_kernel = int_gs[0].target_kernel + + for int_g in int_gs: + for source_kernel in int_g.source_kernels: + if source_kernel.get_base_kernel() != base_kernel: + return False + + if int_g.target_kernel != base_target_kernel: + return False + + if common_int_g.qbx_forced_limit != int_g.qbx_forced_limit: + return False + + if common_int_g.source != int_g.source: + return False + + try: + kernel_arguments = merge_kernel_arguments(kernel_arguments, + int_g.kernel_arguments) + except ValueError: + return False + return True + + +def _create_matrix(int_gs, source_dependent_variables, axis_vars): + """Create a matrix from a list of :class:`~pytential.symbolic.primitives.IntG` + objects and returns the matrix and the expressions corresponding to each column. + Each expression is an expression containing ``source_dependent_variables``. + Each element in the matrix is a multi-variate polynomial and the variables + in the polynomial are from ``axis_vars`` input. Each polynomial represents + a derivative operator. + + Number of rows of the returned matrix is equal to the number of ``int_gs`` and + the number of columns is equal to the number of input source dependent + expressions. + """ + source_exprs = [] + coefficient_collector = CoefficientCollector(source_dependent_variables) + to_sympy = PymbolicToSympyMapper() + matrix = [] + + for int_g in int_gs: + row = [0]*len(source_exprs) + for density, source_kernel in zip(int_g.densities, int_g.source_kernels): + d = coefficient_collector(density) + for source_expr, coeff in d.items(): + if source_expr not in source_exprs: + source_exprs.append(source_expr) + row += [0] + poly = _kernel_source_derivs_as_poly(source_kernel, axis_vars) + row[source_exprs.index(source_expr)] += poly * to_sympy(coeff) + matrix.append(row) + + # At the beginning, we didn't know the number of columns of the matrix. + # Therefore we used a list for rows and they kept expanding. + # Here we are adding zero padding to make the result look like a matrix. + for row in matrix: + row += [0]*(len(source_exprs) - len(row)) + + return matrix, source_exprs + + +class CoefficientCollector(Mapper): + """From a density expression, extracts expressions that need to be + evaluated for each source and coefficients for each expression. + + For eg: when this mapper is given as ``s*(s + 2) + 3`` input, + it returns {s**2: 1, s: 2, 1: 3}. + + This is more general than + :class:`pymbolic.mapper.coefficient.CoefficientCollector` as that deals + only with linear expressions, but this collector works for polynomial + expressions too. + """ + def __init__(self, source_dependent_variables): + self.source_dependent_variables = source_dependent_variables + + def __call__(self, expr): + if expr in self.source_dependent_variables: + return {expr: 1} + return super().__call__(expr) + + def map_sum(self, expr): + stride_dicts = [self.rec(ch) for ch in expr.children] + + result = defaultdict(lambda: 0) + for stride_dict in stride_dicts: + for var, stride in stride_dict.items(): + result[var] += stride + return dict(result) + + def map_algebraic_leaf(self, expr): + if expr in self.source_dependent_variables: + return {expr: 1} + else: + return {1: expr} + + def map_node_coordinate_component(self, expr): + return {expr: 1} + + map_num_reference_derivative = map_node_coordinate_component + + def map_common_subexpression(self, expr): + return {expr: 1} + + def map_subscript(self, expr): + if expr in self.source_dependent_variables or \ + expr.aggregate in self.source_dependent_variables: + return {expr: 1} + else: + return {1: expr} + + def map_constant(self, expr): + return {1: expr} + + def map_product(self, expr): + if len(expr.children) > 2: + # rewrite products of more than two children as a nested + # product and recurse to make it easier to handle. + left = expr.children[0] + right = Product(tuple(expr.children[1:])) + new_prod = Product((left, right)) + return self.rec(new_prod) + elif len(expr.children) == 1: + return self.rec(expr.children[0]) + elif len(expr.children) == 0: + return {1: 1} + left, right = expr.children + d_left = self.rec(left) + d_right = self.rec(right) + d = defaultdict(lambda: 0) + for var_left, coeff_left in d_left.items(): + for var_right, coeff_right in d_right.items(): + d[var_left*var_right] += coeff_left*coeff_right + return dict(d) + + def map_quotient(self, expr): + d_num = self.rec(expr.numerator) + d_den = self.rec(expr.denominator) + if len(d_den) > 1: + raise ValueError + den_var, den_coeff = list(d_den.items())[0] + + return {num_var/den_var: num_coeff/den_coeff for + num_var, num_coeff in d_num.items()} + + def map_power(self, expr): + d_base = self.rec(expr.base) + d_exponent = self.rec(expr.exponent) + # d_exponent should look like {1: k} + if len(d_exponent) > 1 or 1 not in d_exponent: + raise RuntimeError("nonlinear expression") + exp, = d_exponent.values() + if exp == 1: + return d_base + if len(d_base) > 1: + raise NotImplementedError("powers are not implemented") + (var, coeff), = d_base.items() + return {var**exp: coeff**exp} + + rec = __call__ + + +def _kernel_source_derivs_as_poly(kernel, axis_vars): + """Converts a :class:`sumpy.kernel.Kernel` object to a polynomial. + A :class:`sumpy.kernel.Kernel` represents a derivative operator + and the derivative operator is converted to a polynomial with + variables given by `axis_vars`. + + For eg: for source x the derivative operator, + d/dx_1 dx_2 + d/dx_1 is converted to x_2 * x_1 + x_1. + """ + if isinstance(kernel, AxisSourceDerivative): + poly = _kernel_source_derivs_as_poly(kernel.inner_kernel, axis_vars) + return -axis_vars[kernel.axis]*poly + if isinstance(kernel, KernelWrapper): + raise ValueError + return 1 + +# }}} + + +# {{{ factor the matrix + +def minimal_generating_set(m): + """Computes a module with a minimal generating set as its generators + from an input module with possibly redundant generators. The output + does not necessarily have the smallest minimal generating set. + """ + gens = list(m.gens) + nonzero = [x for x in gens if any(y != m.ring.zero for y in x)] + basis = nonzero[:] + for x in nonzero: + others = basis[:] + others.remove(x) + if x in m.container.submodule(*others): + basis = others + return m.container.submodule(*basis) + + +def _convert_to_matrix(module, *generators): + result = [] + for syzygy in module: + row = [] + for dmp in syzygy: + try: + d = dmp.data.to_dict() + except AttributeError: + d = dmp.to_dict() + row.append(sympy.Poly(d, *generators, + domain=sympy.EX).as_expr()) + result.append(row) + return sympy.Matrix(result) + + +def syzygy_module(m, generators, ring): + """Takes as input a module of polynomials with domain :class:`sympy.EX` + represented as a matrix and returns the syzygy module. + The syzygy module *S* satisfies S m = 0 and is a left nullspace of the matrix. + Using :class:`sympy.EX` because that represents the domain with any symbolic + element. Usually we need an Integer or Rational domain, but since there can be + unrelated symbols like *mu* in the expression, we need to use a symbolic domain. + """ + + module = ring.free_module(m.shape[1]).submodule( + *[m[i, :] for i in range(m.shape[0])]) + return minimal_generating_set(module.syzygy_module()) + + +def factor(mat, axis_vars, ring): + """Return a "rank-revealing" factorisation of the matrix + For a matrix M, we want to find a factorisation such that M = L R + with minimum number of columns of L. The polynomials represent + derivative operators and therefore division is not well defined. + To avoid divisions, we work in a polynomial ring which doesn't + have division either. The revealed rank might not be the actual + rank. + + To get a good factorisation, what we do is first find a matrix + such that S M.T = 0 where S is the syzygy module converted to a matrix. + It can also be referred to as the left nullspace of the matrix. + Then, M S.T = 0 which implies that M is in the space spanned by + the syzygy module of S.T and to get R we get the transpose of that. + """ + if mat.shape[0] < mat.shape[1]: + # For sympy performance, we use a tall and skinny matrix + L, R = factor(mat.T, axis_vars, ring) + return R.T, L.T + + S_module = syzygy_module(mat.T, axis_vars, ring) + S = _convert_to_matrix(S_module.gens, *axis_vars) + + if len(S) == 0: + return mat, sympy.eye(mat.shape[1]) + R_module = syzygy_module(S.T, axis_vars, ring) + R = _convert_to_matrix(R_module.gens, *axis_vars) + L_module = [R_module.in_terms_of_generators(mat[i, :]) + for i in range(mat.shape[0])] + L = _convert_to_matrix(L_module, *axis_vars) + + if 0: + L2 = R.LUsolve(sympy.Matrix(mat), + iszerofunc=lambda x: x.simplify() == 0) + L2 = L2.applyfunc(lambda x: x.simplify()) + return L2, R + + return L, R + +# }}} + + +# {{{ convert factors back into IntGs + +def _convert_source_poly_to_int_g_derivs(poly, orig_int_g, axis_vars): + """This does the opposite of :func:`_kernel_source_derivs_as_poly` + and converts a polynomial back to a source derivative + operator. First it is converted to a :class:`sumpy.kernel.Kernel` + and then to a :class:`~pytential.symbolic.primitives.IntG`. + """ + from pytential.symbolic.pde.systems.merge import simplify_densities + + if poly == 0: + return 0 + + to_pymbolic = SympyToPymbolicMapper() + + orig_kernel = orig_int_g.source_kernels[0] + source_kernels = [] + densities = [] + for monom, coeff in poly.terms(): + kernel = orig_kernel + for idim, rep in enumerate(monom): + for _ in range(rep): + kernel = AxisSourceDerivative(idim, kernel) + source_kernels.append(kernel) + # (-1) below is because d/dx f(c - x) = - f'(c - x) + densities.append(to_pymbolic(coeff) * (-1)**sum(monom)) + return orig_int_g.copy(source_kernels=tuple(source_kernels), + densities=tuple(simplify_densities(densities))) + + +def _convert_target_poly_to_int_g_derivs(poly, orig_int_g, rhs_int_g): + """This does the opposite of :func:`_kernel_source_derivs_as_poly` + and converts a polynomial back to a target derivative + operator. It is applied to a :class:`~pytential.symbolic.primitives.IntG` + object and returns a new instance. + """ + to_pymbolic = SympyToPymbolicMapper() + + result = 0 + for monom, coeff in poly.terms(): + kernel = orig_int_g.target_kernel + for idim, rep in enumerate(monom): + for _ in range(rep): + kernel = AxisTargetDerivative(idim, kernel) + result += orig_int_g.copy(target_kernel=kernel, + source_kernels=rhs_int_g.source_kernels, + densities=rhs_int_g.densities) * to_pymbolic(coeff) + + return result + +# }}} + +# vim: fdm=marker diff --git a/pytential/symbolic/primitives.py b/pytential/symbolic/primitives.py index 869a4e7f7..a771a0d70 100644 --- a/pytential/symbolic/primitives.py +++ b/pytential/symbolic/primitives.py @@ -23,6 +23,7 @@ from sys import intern from warnings import warn from functools import partial +from collections import OrderedDict import numpy as np @@ -1315,14 +1316,15 @@ def laplace(ambient_dim, operand): # {{{ potentials -def hashable_kernel_args(kernel_arguments): - hashable_args = [] - for key, val in sorted(kernel_arguments.items()): - if isinstance(val, np.ndarray): - val = tuple(val) - hashable_args.append((key, val)) +def hashable_kernel_arg_value(val): + if isinstance(val, np.ndarray): + val = tuple(val) + return val + - return tuple(hashable_args) +def hashable_kernel_args(kernel_arguments): + return tuple([(key, hashable_kernel_arg_value(val)) for key, val in + sorted(kernel_arguments.items())]) class IntG(Expression): @@ -1407,6 +1409,18 @@ def __init__(self, target_kernel, source_kernels, densities, raise ValueError("invalid value (%s) of qbx_forced_limit" % qbx_forced_limit) + # Fold duplicates in source_kernels + knl_density_dict = OrderedDict() + for density, source_kernel in zip(densities, source_kernels): + if source_kernel in knl_density_dict: + knl_density_dict[source_kernel] += density + else: + knl_density_dict[source_kernel] = density + knl_density_dict = OrderedDict( + [(k, v) for k, v in knl_density_dict.items() if v]) + densities = tuple(knl_density_dict.values()) + source_kernels = tuple(knl_density_dict.keys()) + source_kernels = tuple(source_kernels) densities = tuple(densities) kernel_arg_names = set() diff --git a/pytential/symbolic/stokes.py b/pytential/symbolic/stokes.py index cfe8bef3c..78aa0f16e 100644 --- a/pytential/symbolic/stokes.py +++ b/pytential/symbolic/stokes.py @@ -1,4 +1,7 @@ -__copyright__ = "Copyright (C) 2017 Natalie Beams" +__copyright__ = """ +Copyright (C) 2017 Natalie Beams +Copyright (C) 2022 Isuru Fernando +""" __license__ = """ Permission is hereby granted, free of charge, to any person obtaining a copy @@ -23,11 +26,25 @@ import numpy as np from pytential import sym -from sumpy.kernel import StokesletKernel, StressletKernel, LaplaceKernel +from pytential.symbolic.pde.systems import rewrite_using_base_kernel +from sumpy.kernel import (LaplaceKernel, BiharmonicKernel, + AxisTargetDerivative, AxisSourceDerivative, TargetPointMultiplier) +from pytential.symbolic.elasticity import (ElasticityWrapperBase, + ElasticityDoubleLayerWrapperBase, + _ElasticityWrapperNaiveOrBiharmonic, + _ElasticityDoubleLayerWrapperNaiveOrBiharmonic, + Method, _MU_SYM_DEFAULT) +from pytential.symbolic.typing import ExpressionT +from dataclasses import dataclass +from functools import cached_property +from abc import abstractmethod __doc__ = """ -.. autoclass:: StokesletWrapper -.. autoclass:: StressletWrapper + +.. autoclass:: StokesletWrapperBase +.. autoclass:: StressletWrapperBase +.. automethod:: pytential.symbolic.stokes.StokesletWrapper +.. automethod:: pytential.symbolic.stokes.StressletWrapper .. autoclass:: StokesOperator .. autoclass:: HsiaoKressExteriorStokesOperator @@ -35,174 +52,41 @@ """ -# {{{ StokesletWrapper +# {{{ StokesletWrapper/StressletWrapper base classes -class StokesletWrapper: +class StokesletWrapperBase(ElasticityWrapperBase): """Wrapper class for the :class:`~sumpy.kernel.StokesletKernel` kernel. - This class is meant to shield the user from the messiness of writing - out every term in the expansion of the double-indexed Stokeslet kernel - applied to the density vector. The object is created - to do some of the set-up and bookkeeping once, rather than every - time we want to create a symbolic expression based on the kernel -- say, - once when we solve for the density, and once when we want a symbolic - representation for the solution, for example. - - The :meth:`apply` function returns the integral expressions needed for - the vector velocity resulting from convolution with the vector density, - and is meant to work similarly to calling - :func:`~pytential.symbolic.primitives.S` (which is - :class:`~pytential.symbolic.primitives.IntG`). - - Similar functions are available for other useful things related to - the flow: :meth:`apply_pressure`, :meth:`apply_derivative` (target derivative), - :meth:`apply_stress` (applies symmetric viscous stress tensor in - the requested direction). - - .. attribute:: kernel_dict + In addition to the methods in + :class:`pytential.symbolic.elasticity.ElasticityWrapperBase`, this class + also provides :meth:`apply_stress` which applies symmetric viscous stress tensor + in the requested direction and :meth:`apply_pressure`. - The dictionary allows us to exploit symmetry -- that - :math:`S_{01}` is identical to :math:`S_{10}` -- and avoid creating - multiple expansions for the same kernel in a different ordering. - - .. automethod:: __init__ .. automethod:: apply .. automethod:: apply_pressure .. automethod:: apply_derivative .. automethod:: apply_stress """ + def __init__(self, dim, mu): + super().__init__(dim=dim, mu=mu, nu=0.5) - def __init__(self, dim=None): - self.dim = dim - - if dim == 2: - self.kernel_dict = { - (2, 0): StokesletKernel(dim=2, icomp=0, jcomp=0), - (1, 1): StokesletKernel(dim=2, icomp=0, jcomp=1), - (0, 2): StokesletKernel(dim=2, icomp=1, jcomp=1) - } - - elif dim == 3: - self.kernel_dict = { - (2, 0, 0): StokesletKernel(dim=3, icomp=0, jcomp=0), - (1, 1, 0): StokesletKernel(dim=3, icomp=0, jcomp=1), - (1, 0, 1): StokesletKernel(dim=3, icomp=0, jcomp=2), - (0, 2, 0): StokesletKernel(dim=3, icomp=1, jcomp=1), - (0, 1, 1): StokesletKernel(dim=3, icomp=1, jcomp=2), - (0, 0, 2): StokesletKernel(dim=3, icomp=2, jcomp=2) - } - - else: - raise ValueError("unsupported dimension given to StokesletWrapper") - - def apply(self, density_vec_sym, mu_sym, qbx_forced_limit): - """Symbolic expressions for integrating Stokeslet kernel. - - Returns an object array of symbolic expressions for the vector - resulting from integrating the dyadic Stokeslet kernel with - variable *density_vec_sym*. - - :arg density_vec_sym: a symbolic vector variable for the density vector. - :arg mu_sym: a symbolic variable for the viscosity. - :arg qbx_forced_limit: the *qbx_forced_limit* argument to be passed on - to :class:`~pytential.symbolic.primitives.IntG`. - """ - - sym_expr = np.empty((self.dim,), dtype=object) - - for comp in range(self.dim): - - # Start variable count for kernel with 1 for the requested result - # component - base_count = np.zeros(self.dim, dtype=np.int32) - base_count[comp] += 1 - - for i in range(self.dim): - var_ctr = base_count.copy() - var_ctr[i] += 1 - ctr_key = tuple(var_ctr) - - if i < 1: - sym_expr[comp] = sym.int_g_vec( - self.kernel_dict[ctr_key], density_vec_sym[i], - qbx_forced_limit=qbx_forced_limit, mu=mu_sym) - - else: - sym_expr[comp] = sym_expr[comp] + sym.int_g_vec( - self.kernel_dict[ctr_key], density_vec_sym[i], - qbx_forced_limit=qbx_forced_limit, mu=mu_sym) - - return sym_expr - - def apply_pressure(self, density_vec_sym, mu_sym, qbx_forced_limit): + def apply_pressure(self, density_vec_sym, qbx_forced_limit, extra_deriv_dirs=()): """Symbolic expression for pressure field associated with the Stokeslet.""" + # Pressure representation doesn't differ depending on the implementation + # and is implemented in base class here. + lknl = LaplaceKernel(dim=self.dim) - from pytential.symbolic.mappers import DerivativeTaker - kernel = LaplaceKernel(dim=self.dim) - + sym_expr = 0 for i in range(self.dim): - - if i < 1: - sym_expr = DerivativeTaker(i).map_int_g( - sym.int_g_vec(kernel, density_vec_sym[i], - qbx_forced_limit=qbx_forced_limit)) - else: - sym_expr = sym_expr + (DerivativeTaker(i).map_int_g( - sym.int_g_vec(kernel, density_vec_sym[i], - qbx_forced_limit=qbx_forced_limit))) - - return sym_expr - - def apply_derivative(self, deriv_dir, density_vec_sym, - mu_sym, qbx_forced_limit): - """Symbolic derivative of velocity from Stokeslet. - - Returns an object array of symbolic expressions for the vector - resulting from integrating the *deriv_dir* target derivative of the - dyadic Stokeslet kernel with variable *density_vec_sym*. - - :arg deriv_dir: integer denoting the axis direction for the derivative. - :arg density_vec_sym: a symbolic vector variable for the density vector. - :arg mu_sym: a symbolic variable for the viscosity. - :arg qbx_forced_limit: the *qbx_forced_limit* argument to be passed on - to :class:`~pytential.symbolic.primitives.IntG`. - """ - - from pytential.symbolic.mappers import DerivativeTaker - - sym_expr = np.empty((self.dim,), dtype=object) - - for comp in range(self.dim): - - # Start variable count for kernel with 1 for the requested result - # component - base_count = np.zeros(self.dim, dtype=np.int32) - base_count[comp] += 1 - - for i in range(self.dim): - var_ctr = base_count.copy() - var_ctr[i] += 1 - ctr_key = tuple(var_ctr) - - if i < 1: - sym_expr[comp] = DerivativeTaker(deriv_dir).map_int_g( - sym.int_g_vec(self.kernel_dict[ctr_key], - density_vec_sym[i], - qbx_forced_limit=qbx_forced_limit, - mu=mu_sym)) - - else: - sym_expr[comp] = sym_expr[comp] + DerivativeTaker( - deriv_dir).map_int_g( - sym.int_g_vec(self.kernel_dict[ctr_key], - density_vec_sym[i], - qbx_forced_limit=qbx_forced_limit, - mu=mu_sym)) - + deriv_dirs = tuple(extra_deriv_dirs) + (i,) + knl = lknl + for deriv_dir in deriv_dirs: + knl = AxisTargetDerivative(deriv_dir, knl) + sym_expr += sym.int_g_vec(knl, density_vec_sym[i], + qbx_forced_limit=qbx_forced_limit) return sym_expr - def apply_stress(self, density_vec_sym, dir_vec_sym, - mu_sym, qbx_forced_limit): + def apply_stress(self, density_vec_sym, dir_vec_sym, qbx_forced_limit): r"""Symbolic expression for viscous stress applied to a direction. Returns a vector of symbolic expressions for the force resulting @@ -216,244 +100,61 @@ def apply_stress(self, density_vec_sym, dir_vec_sym, Note that this computation is very similar to computing a double-layer potential with the Stresslet kernel in - :class:`StressletWrapper`. The difference is that here the direction + :class:`StressletWrapperBase`. The difference is that here the direction vector is applied at the target points, while in the Stresslet the direction is applied at the source points. :arg density_vec_sym: a symbolic vector variable for the density vector. :arg dir_vec_sym: a symbolic vector for the application direction. - :arg mu_sym: a symbolic variable for the viscosity. :arg qbx_forced_limit: the *qbx_forced_limit* argument to be passed on to :class:`~pytential.symbolic.primitives.IntG`. """ + raise NotImplementedError - import itertools - - sym_expr = np.empty((self.dim,), dtype=object) - stresslet_obj = StressletWrapper(dim=self.dim) - - for comp in range(self.dim): - - # Start variable count for kernel with 1 for the requested result - # component - base_count = np.zeros(self.dim, dtype=np.int32) - base_count[comp] += 1 - - for i, j in itertools.product(range(self.dim), range(self.dim)): - var_ctr = base_count.copy() - var_ctr[i] += 1 - var_ctr[j] += 1 - ctr_key = tuple(var_ctr) - - if i + j < 1: - sym_expr[comp] = dir_vec_sym[i] * sym.int_g_vec( - stresslet_obj.kernel_dict[ctr_key], - density_vec_sym[j], - qbx_forced_limit=qbx_forced_limit, mu=mu_sym) - - else: - sym_expr[comp] = sym_expr[comp] + dir_vec_sym[i] * sym.int_g_vec( - stresslet_obj.kernel_dict[ctr_key], - density_vec_sym[j], - qbx_forced_limit=qbx_forced_limit, - mu=mu_sym) - - return sym_expr - -# }}} - - -# {{{ StressletWrapper -class StressletWrapper: +class StressletWrapperBase(ElasticityDoubleLayerWrapperBase): """Wrapper class for the :class:`~sumpy.kernel.StressletKernel` kernel. - This class is meant to shield the user from the messiness of writing - out every term in the expansion of the triple-indexed Stresslet - kernel applied to both a normal vector and the density vector. - The object is created to do some of the set-up and bookkeeping once, - rather than every time we want to create a symbolic expression based - on the kernel -- say, once when we solve for the density, and once when - we want a symbolic representation for the solution, for example. - - The :meth:`apply` function returns the integral expressions needed for - convolving the kernel with a vector density, and is meant to work - similarly to :func:`~pytential.symbolic.primitives.S` (which is - :class:`~pytential.symbolic.primitives.IntG`). - - Similar functions are available for other useful things related to - the flow: :meth:`apply_pressure`, :meth:`apply_derivative` (target derivative), - :meth:`apply_stress` (applies symmetric viscous stress tensor in - the requested direction). - - .. attribute:: kernel_dict - - The dictionary allows us to exploit symmetry -- that - :math:`T_{012}` is identical to :math:`T_{120}` -- and avoid creating - multiple expansions for the same kernel in a different ordering. + In addition to the methods in + :class:`pytential.symbolic.elasticity.ElasticityDoubleLayerWrapperBase`, this + class also provides :meth:`apply_stress` which applies symmetric viscous stress + tensor in the requested direction and :meth:`apply_pressure`. - .. automethod:: __init__ .. automethod:: apply .. automethod:: apply_pressure .. automethod:: apply_derivative .. automethod:: apply_stress """ + def __init__(self, dim, mu): + super().__init__(dim=dim, mu=mu, nu=0.5) - def __init__(self, dim=None): - self.dim = dim - - if dim == 2: - self.kernel_dict = { - (3, 0): StressletKernel(dim=2, icomp=0, jcomp=0, kcomp=0), - (2, 1): StressletKernel(dim=2, icomp=0, jcomp=0, kcomp=1), - (1, 2): StressletKernel(dim=2, icomp=0, jcomp=1, kcomp=1), - (0, 3): StressletKernel(dim=2, icomp=1, jcomp=1, kcomp=1) - } - - elif dim == 3: - self.kernel_dict = { - (3, 0, 0): StressletKernel(dim=3, icomp=0, jcomp=0, kcomp=0), - (2, 1, 0): StressletKernel(dim=3, icomp=0, jcomp=0, kcomp=1), - (2, 0, 1): StressletKernel(dim=3, icomp=0, jcomp=0, kcomp=2), - (1, 2, 0): StressletKernel(dim=3, icomp=0, jcomp=1, kcomp=1), - (1, 1, 1): StressletKernel(dim=3, icomp=0, jcomp=1, kcomp=2), - (1, 0, 2): StressletKernel(dim=3, icomp=0, jcomp=2, kcomp=2), - (0, 3, 0): StressletKernel(dim=3, icomp=1, jcomp=1, kcomp=1), - (0, 2, 1): StressletKernel(dim=3, icomp=1, jcomp=1, kcomp=2), - (0, 1, 2): StressletKernel(dim=3, icomp=1, jcomp=2, kcomp=2), - (0, 0, 3): StressletKernel(dim=3, icomp=2, jcomp=2, kcomp=2) - } - - else: - raise ValueError("unsupported dimension given to StressletWrapper") - - def apply(self, density_vec_sym, dir_vec_sym, mu_sym, qbx_forced_limit): - """Symbolic expressions for integrating Stresslet kernel. - - Returns an object array of symbolic expressions for the vector - resulting from integrating the dyadic Stresslet kernel with - variable *density_vec_sym* and source direction vectors *dir_vec_sym*. - - :arg density_vec_sym: a symbolic vector variable for the density vector. - :arg dir_vec_sym: a symbolic vector variable for the direction vector. - :arg mu_sym: a symbolic variable for the viscosity. - :arg qbx_forced_limit: the *qbx_forced_limit* argument to be passed on - to :class:`~pytential.symbolic.primitives.IntG`. + def apply_pressure(self, density_vec_sym, dir_vec_sym, qbx_forced_limit, + extra_deriv_dirs=()): + """Symbolic expression for pressure field associated with the Stresslet. """ + # Pressure representation doesn't differ depending on the implementation + # and is implemented in base class here. import itertools + lknl = LaplaceKernel(dim=self.dim) - sym_expr = np.empty((self.dim,), dtype=object) - - for comp in range(self.dim): - - # Start variable count for kernel with 1 for the requested result - # component - base_count = np.zeros(self.dim, dtype=np.int32) - base_count[comp] += 1 - - for i, j in itertools.product(range(self.dim), range(self.dim)): - var_ctr = base_count.copy() - var_ctr[i] += 1 - var_ctr[j] += 1 - ctr_key = tuple(var_ctr) - - if i + j < 1: - sym_expr[comp] = sym.int_g_vec( - self.kernel_dict[ctr_key], - dir_vec_sym[i] * density_vec_sym[j], - qbx_forced_limit=qbx_forced_limit, mu=mu_sym) - - else: - sym_expr[comp] = sym_expr[comp] + sym.int_g_vec( - self.kernel_dict[ctr_key], - dir_vec_sym[i] * density_vec_sym[j], - qbx_forced_limit=qbx_forced_limit, - mu=mu_sym) - - return sym_expr - - def apply_pressure(self, density_vec_sym, dir_vec_sym, mu_sym, qbx_forced_limit): - """Symbolic expression for pressure field associated with the Stresslet.""" - - import itertools - from pytential.symbolic.mappers import DerivativeTaker - kernel = LaplaceKernel(dim=self.dim) + factor = (2. * self.mu) - factor = (2. * mu_sym) + sym_expr = 0 for i, j in itertools.product(range(self.dim), range(self.dim)): - - if i + j < 1: - sym_expr = factor * DerivativeTaker(i).map_int_g( - DerivativeTaker(j).map_int_g( - sym.int_g_vec(kernel, - density_vec_sym[i] * dir_vec_sym[j], - qbx_forced_limit=qbx_forced_limit))) - else: - sym_expr = sym_expr + ( - factor * DerivativeTaker(i).map_int_g( - DerivativeTaker(j).map_int_g( - sym.int_g_vec(kernel, + deriv_dirs = tuple(extra_deriv_dirs) + (i, j) + knl = lknl + for deriv_dir in deriv_dirs: + knl = AxisTargetDerivative(deriv_dir, knl) + sym_expr += factor * sym.int_g_vec(knl, density_vec_sym[i] * dir_vec_sym[j], - qbx_forced_limit=qbx_forced_limit)))) - - return sym_expr - - def apply_derivative(self, deriv_dir, density_vec_sym, dir_vec_sym, - mu_sym, qbx_forced_limit): - """Symbolic derivative of velocity from stresslet. - - Returns an object array of symbolic expressions for the vector - resulting from integrating the *deriv_dir* target derivative of the - dyadic Stresslet kernel with variable *density_vec_sym* and source - direction vectors *dir_vec_sym*. - - :arg deriv_dir: integer denoting the axis direction for the derivative. - :arg density_vec_sym: a symbolic vector variable for the density vector. - :arg dir_vec_sym: a symbolic vector variable for the normal direction. - :arg mu_sym: a symbolic variable for the viscosity. - :arg qbx_forced_limit: the *qbx_forced_limit* argument to be passed on - to :class:`~pytential.symbolic.primitives.IntG`. - """ - - import itertools - from pytential.symbolic.mappers import DerivativeTaker - - sym_expr = np.empty((self.dim,), dtype=object) - - for comp in range(self.dim): - - # Start variable count for kernel with 1 for the requested result - # component - base_count = np.zeros(self.dim, dtype=np.int32) - base_count[comp] += 1 - - for i, j in itertools.product(range(self.dim), range(self.dim)): - var_ctr = base_count.copy() - var_ctr[i] += 1 - var_ctr[j] += 1 - ctr_key = tuple(var_ctr) - - if i + j < 1: - sym_expr[comp] = DerivativeTaker(deriv_dir).map_int_g( - sym.int_g_vec(self.kernel_dict[ctr_key], - dir_vec_sym[i] * density_vec_sym[j], - qbx_forced_limit=qbx_forced_limit, - mu=mu_sym)) - - else: - sym_expr[comp] = sym_expr[comp] + DerivativeTaker( - deriv_dir).map_int_g( - sym.int_g_vec(self.kernel_dict[ctr_key], - dir_vec_sym[i] * density_vec_sym[j], - qbx_forced_limit=qbx_forced_limit, - mu=mu_sym)) + qbx_forced_limit=qbx_forced_limit) return sym_expr def apply_stress(self, density_vec_sym, normal_vec_sym, dir_vec_sym, - mu_sym, qbx_forced_limit): + qbx_forced_limit): r"""Symbolic expression for viscous stress applied to a direction. Returns a vector of symbolic expressions for the force resulting @@ -469,10 +170,57 @@ def apply_stress(self, density_vec_sym, normal_vec_sym, dir_vec_sym, :arg normal_vec_sym: a symbolic vector variable for the normal vectors (outward facing normals at source locations). :arg dir_vec_sym: a symbolic vector for the application direction. - :arg mu_sym: a symbolic variable for the viscosity. :arg qbx_forced_limit: the *qbx_forced_limit* argument to be passed on to :class:`~pytential.symbolic.primitives.IntG`. """ + raise NotImplementedError + +# }}} + + +# {{{ Stokeslet/StressletWrapper Naive and Biharmonic + +class _StokesletWrapperNaiveOrBiharmonic(_ElasticityWrapperNaiveOrBiharmonic, + StokesletWrapperBase): + + def apply_pressure(self, density_vec_sym, qbx_forced_limit, + extra_deriv_dirs=()): + sym_expr = super().apply_pressure(density_vec_sym, qbx_forced_limit, + extra_deriv_dirs=extra_deriv_dirs) + res, = rewrite_using_base_kernel([sym_expr], base_kernel=self.base_kernel) + return res + + def apply_stress(self, density_vec_sym, dir_vec_sym, qbx_forced_limit): + + sym_expr = np.zeros((self.dim,), dtype=object) + stresslet_obj = _StressletWrapperNaiveOrBiharmonic(dim=self.dim, + mu=self.mu, nu=0.5, base_kernel=self.base_kernel) + + # For stokeslet, there's no direction vector involved + # passing a list of ones instead to remove its usage. + for comp in range(self.dim): + for i in range(self.dim): + for j in range(self.dim): + sym_expr[comp] += dir_vec_sym[i] * \ + stresslet_obj._get_int_g((comp, i, j), + density_vec_sym[j], [1]*self.dim, + qbx_forced_limit, deriv_dirs=[]) + + return sym_expr + + +class _StressletWrapperNaiveOrBiharmonic( + _ElasticityDoubleLayerWrapperNaiveOrBiharmonic, + StressletWrapperBase): + def apply_pressure(self, density_vec_sym, dir_vec_sym, qbx_forced_limit, + extra_deriv_dirs=()): + sym_expr = super().apply_pressure(density_vec_sym, dir_vec_sym, + qbx_forced_limit, extra_deriv_dirs=extra_deriv_dirs) + res, = rewrite_using_base_kernel([sym_expr], base_kernel=self.base_kernel) + return res + + def apply_stress(self, density_vec_sym, normal_vec_sym, dir_vec_sym, + qbx_forced_limit): sym_expr = np.empty((self.dim,), dtype=object) @@ -480,28 +228,254 @@ def apply_stress(self, density_vec_sym, normal_vec_sym, dir_vec_sym, sym_grad_matrix = np.empty((self.dim, self.dim), dtype=object) for i in range(self.dim): sym_grad_matrix[:, i] = self.apply_derivative(i, density_vec_sym, - normal_vec_sym, mu_sym, qbx_forced_limit) + normal_vec_sym, qbx_forced_limit) for comp in range(self.dim): # First, add the pressure term: sym_expr[comp] = - dir_vec_sym[comp] * self.apply_pressure( density_vec_sym, normal_vec_sym, - mu_sym, qbx_forced_limit) + qbx_forced_limit) # Now add the velocity derivative components for j in range(self.dim): sym_expr[comp] = sym_expr[comp] + ( - dir_vec_sym[j] * mu_sym * ( + dir_vec_sym[j] * self.mu * ( sym_grad_matrix[comp][j] + sym_grad_matrix[j][comp]) ) + return sym_expr + + +class StokesletWrapperNaive(_StokesletWrapperNaiveOrBiharmonic, + ElasticityWrapperBase): + def __init__(self, dim, mu): + super().__init__(dim=dim, mu=mu, nu=0.5, base_kernel=None) + ElasticityWrapperBase.__init__(self, dim=dim, mu=mu, nu=0.5) + + +class StressletWrapperNaive(_StressletWrapperNaiveOrBiharmonic, + ElasticityDoubleLayerWrapperBase): + + def __init__(self, dim, mu): + super().__init__(dim=dim, mu=mu, nu=0.5, base_kernel=None) + ElasticityDoubleLayerWrapperBase.__init__(self, dim=dim, mu=mu, + nu=0.5) + + +class StokesletWrapperBiharmonic(_StokesletWrapperNaiveOrBiharmonic, + ElasticityWrapperBase): + def __init__(self, dim, mu): + super().__init__(dim=dim, mu=mu, nu=0.5, + base_kernel=BiharmonicKernel(dim)) + ElasticityWrapperBase.__init__(self, dim=dim, mu=mu, nu=0.5) + + +class StressletWrapperBiharmonic(_StressletWrapperNaiveOrBiharmonic, + ElasticityDoubleLayerWrapperBase): + def __init__(self, dim, mu): + super().__init__(dim=dim, mu=mu, nu=0.5, + base_kernel=BiharmonicKernel(dim)) + ElasticityDoubleLayerWrapperBase.__init__(self, dim=dim, mu=mu, + nu=0.5) + +# }}} + + +# {{{ Stokeslet/Stresslet using Laplace (Tornberg) + +@dataclass +class StokesletWrapperTornberg(StokesletWrapperBase): + """A Stresslet wrapper using Tornberg and Greengard's method which + uses Laplace derivatives. + + [1] Tornberg, A. K., & Greengard, L. (2008). A fast multipole method for the + three-dimensional Stokes equations. + Journal of Computational Physics, 227(3), 1613-1619. + """ + dim: int + mu: ExpressionT + nu: ExpressionT + + def __post_init__(self): + if self.nu != 0.5: + raise ValueError("nu != 0.5 is not supported") + + def apply(self, density_vec_sym, qbx_forced_limit, extra_deriv_dirs=()): + stresslet = StressletWrapperTornberg(self.dim, self.mu, self.nu) + return stresslet.apply_single_and_double_layer(density_vec_sym, + [0]*self.dim, [0]*self.dim, + qbx_forced_limit=qbx_forced_limit, + stokeslet_weight=1, + stresslet_weight=0, + extra_deriv_dirs=extra_deriv_dirs) + + +@dataclass +class StressletWrapperTornberg(StressletWrapperBase): + """A Stresslet wrapper using Tornberg and Greengard's method which + uses Laplace derivatives. + + [1] Tornberg, A. K., & Greengard, L. (2008). A fast multipole method for the + three-dimensional Stokes equations. + Journal of Computational Physics, 227(3), 1613-1619. + """ + dim: int + mu: ExpressionT + nu: ExpressionT + + def __post_init__(self): + if self.nu != 0.5: + raise ValueError("nu != 0.5 is not supported") + + @cached_property + def laplace_kernel(self): + return LaplaceKernel(dim=self.dim) + + def apply(self, density_vec_sym, dir_vec_sym, qbx_forced_limit, + extra_deriv_dirs=()): + return self.apply_single_and_double_layer([0]*self.dim, + density_vec_sym, dir_vec_sym, + qbx_forced_limit=qbx_forced_limit, + stokeslet_weight=0, + stresslet_weight=1, + extra_deriv_dirs=extra_deriv_dirs) + + def _create_int_g(self, target_kernel, source_kernels, densities, + qbx_forced_limit): + new_source_kernels = [] + new_densities = [] + for source_kernel, density in zip(source_kernels, densities): + if density != 0.0: + new_source_kernels.append(source_kernel) + new_densities.append(density) + if not new_densities: + return 0 + return sym.IntG(target_kernel=target_kernel, + source_kernels=tuple(new_source_kernels), + densities=tuple(new_densities), + qbx_forced_limit=qbx_forced_limit) + + def apply_single_and_double_layer(self, stokeslet_density_vec_sym, + stresslet_density_vec_sym, dir_vec_sym, + qbx_forced_limit, stokeslet_weight, stresslet_weight, + extra_deriv_dirs=()): + + sym_expr = np.zeros((self.dim,), dtype=object) + + source = sym.nodes(self.dim).as_vector() + + # The paper in [1] ignores the scaling we use Stokeslet/Stresslet + # and gives formulae for the kernel expression only + # stokeslet_weight = StokesletKernel.global_scaling_const / + # LaplaceKernel.global_scaling_const + # stresslet_weight = StressletKernel.global_scaling_const / + # LaplaceKernel.global_scaling_const + stresslet_weight *= 3.0 + stokeslet_weight *= -0.5*self.mu**(-1) + + common_source_kernels = [ + AxisSourceDerivative(k, self.laplace_kernel) for + k in range(self.dim)] + common_source_kernels.append(self.laplace_kernel) + + for i in range(self.dim): + for j in range(self.dim): + densities = [(stresslet_weight/6.0)*( + stresslet_density_vec_sym[k] * dir_vec_sym[j] + + stresslet_density_vec_sym[j] * dir_vec_sym[k]) + for k in range(self.dim)] + densities.append(stokeslet_weight*stokeslet_density_vec_sym[j]) + target_kernel = TargetPointMultiplier(j, + AxisTargetDerivative(i, self.laplace_kernel)) + for deriv_dir in extra_deriv_dirs: + target_kernel = AxisTargetDerivative(deriv_dir, target_kernel) + sym_expr[i] -= self._create_int_g(target_kernel=target_kernel, + source_kernels=tuple(common_source_kernels), + densities=tuple(densities), + qbx_forced_limit=qbx_forced_limit) + + if i == j: + target_kernel = self.laplace_kernel + for deriv_dir in extra_deriv_dirs: + target_kernel = AxisTargetDerivative( + deriv_dir, target_kernel) + + sym_expr[i] += self._create_int_g(target_kernel=target_kernel, + source_kernels=common_source_kernels, + densities=densities, + qbx_forced_limit=qbx_forced_limit) + + common_density0 = sum(source[k] * stresslet_density_vec_sym[k] for + k in range(self.dim)) + common_density1 = sum(source[k] * dir_vec_sym[k] for + k in range(self.dim)) + common_density2 = sum(source[k] * stokeslet_density_vec_sym[k] for + k in range(self.dim)) + densities = [(stresslet_weight/6.0)*(common_density0 * dir_vec_sym[k] + + common_density1 * stresslet_density_vec_sym[k]) for + k in range(self.dim)] + densities.append(stokeslet_weight * common_density2) + + target_kernel = AxisTargetDerivative(i, self.laplace_kernel) + for deriv_dir in extra_deriv_dirs: + target_kernel = AxisTargetDerivative(deriv_dir, target_kernel) + sym_expr[i] += self._create_int_g(target_kernel=target_kernel, + source_kernels=tuple(common_source_kernels), + densities=tuple(densities), + qbx_forced_limit=qbx_forced_limit) return sym_expr # }}} +# {{{ StokesletWrapper dispatch method + +def StokesletWrapper( + dim: int, + mu: ExpressionT = _MU_SYM_DEFAULT, + method: Method = None + ): # noqa: N806 + if method is None: + import warnings + warnings.warn("Method argument not given. Falling back to 'naive'. " + "Method argument will be required in the future.") + method = Method.naive + if method == Method.naive: + return StokesletWrapperNaive(dim=dim, mu=mu) + elif method == Method.biharmonic: + return StokesletWrapperBiharmonic(dim=dim, mu=mu) + elif method == Method.laplace: + return StokesletWrapperTornberg(dim=dim, mu=mu, nu=0.5) + else: + raise ValueError(f"invalid method: {method}." + "Needs to be one of naive, laplace, biharmonic") + + +def StressletWrapper( + dim: int, + mu: ExpressionT = _MU_SYM_DEFAULT, + method: Method = None + ): # noqa: N806 + if method is None: + import warnings + warnings.warn("Method argument not given. Falling back to 'naive'. " + "Method argument will be required in the future.") + method = Method.naive + if method == Method.naive: + return StressletWrapperNaive(dim=dim, mu=mu) + elif method == Method.biharmonic: + return StressletWrapperBiharmonic(dim=dim, mu=mu) + elif method == Method.laplace: + return StressletWrapperTornberg(dim=dim, mu=mu, nu=0.5) + else: + raise ValueError(f"invalid method: {method}." + "Needs to be one of naive, laplace, biharmonic") + +# }}} + + # {{{ base Stokes operator class StokesOperator: @@ -518,20 +492,34 @@ class StokesOperator: .. automethod:: pressure """ - def __init__(self, ambient_dim, side): + def __init__(self, ambient_dim, side, stokeslet, stresslet, mu): """ :arg ambient_dim: dimension of the ambient space. :arg side: :math:`+1` for exterior or :math:`-1` for interior. """ - if side not in [+1, -1]: raise ValueError(f"invalid evaluation side: {side}") self.ambient_dim = ambient_dim self.side = side - self.stresslet = StressletWrapper(dim=self.ambient_dim) - self.stokeslet = StokesletWrapper(dim=self.ambient_dim) + if mu is not None: + import warnings + warnings.warn("Explicitly giving mu is deprecated. " + "Use stokeslet and stresslet arguments.") + else: + mu = _MU_SYM_DEFAULT + + if stresslet is None: + stresslet = StressletWrapper(dim=self.ambient_dim, + mu=mu) + + if stokeslet is None: + stokeslet = StokesletWrapper(dim=self.ambient_dim, + mu=mu) + + self.stokeslet = stokeslet + self.stresslet = stresslet @property def dim(self): @@ -543,13 +531,14 @@ def get_density_var(self, name="sigma"): """ return sym.make_sym_vector(name, self.ambient_dim) - def prepare_rhs(self, b, *, mu): + def prepare_rhs(self, b): """ :returns: a (potentially) modified right-hand side *b* that matches requirements of the representation. """ return b + @abstractmethod def operator(self, sigma): """ :returns: the integral operator that should be solved to obtain the @@ -557,13 +546,15 @@ def operator(self, sigma): """ raise NotImplementedError - def velocity(self, sigma, *, normal, mu, qbx_forced_limit=None): + @abstractmethod + def velocity(self, sigma, *, normal, qbx_forced_limit=None): """ :returns: a representation of the velocity field in the Stokes flow. """ raise NotImplementedError - def pressure(self, sigma, *, normal, mu, qbx_forced_limit=None): + @abstractmethod + def pressure(self, sigma, *, normal, qbx_forced_limit=None): """ :returns: a representation of the pressure in the Stokes flow. """ @@ -587,7 +578,8 @@ class HsiaoKressExteriorStokesOperator(StokesOperator): .. automethod:: __init__ """ - def __init__(self, *, omega, alpha=None, eta=None): + def __init__(self, *, omega, alpha=1.0, eta=1.0, + stokeslet=None, stresslet=None, mu=None): r""" :arg omega: farfield behaviour of the velocity field, as defined by :math:`A` in [HsiaoKress1985]_ Equation 2.3. @@ -595,7 +587,8 @@ def __init__(self, *, omega, alpha=None, eta=None): :arg eta: real parameter :math:`\eta > 0`. Choosing this parameter well can have a non-trivial effect on the conditioning. """ - super().__init__(ambient_dim=2, side=+1) + super().__init__(ambient_dim=2, side=+1, stokeslet=stokeslet, + stresslet=stresslet, mu=mu) # NOTE: in [hsiao-kress], there is an analysis on a circle, which # recommends values in @@ -603,61 +596,50 @@ def __init__(self, *, omega, alpha=None, eta=None): # so we choose alpha = eta = 1, which seems to be in line with some # of the presented numerical results too. - if alpha is None: - alpha = 1.0 - - if eta is None: - eta = 1.0 - self.omega = omega self.alpha = alpha self.eta = eta - def _farfield(self, mu, qbx_forced_limit): - length = sym.integral(self.ambient_dim, self.dim, 1) - return self.stokeslet.apply( - -self.omega / length, - mu, - qbx_forced_limit=qbx_forced_limit) - - def _operator(self, sigma, normal, mu, qbx_forced_limit): - slp_qbx_forced_limit = qbx_forced_limit - if slp_qbx_forced_limit == "avg": - slp_qbx_forced_limit = +1 + def _farfield(self, qbx_forced_limit): + source_dofdesc = sym.DOFDescriptor(None, discr_stage=sym.QBX_SOURCE_STAGE1) + length = sym.integral(self.ambient_dim, self.dim, 1, dofdesc=source_dofdesc) + result = self.stresslet.apply_single_and_double_layer( + -self.omega / length, [0]*self.ambient_dim, [0]*self.ambient_dim, + qbx_forced_limit=qbx_forced_limit, stokeslet_weight=1, + stresslet_weight=0) + return result + def _operator(self, sigma, normal, qbx_forced_limit): # NOTE: we set a dofdesc here to force the evaluation of this integral # on the source instead of the target when using automatic tagging # see :meth:`pytential.symbolic.mappers.LocationTagger._default_dofdesc` dd = sym.DOFDescriptor(None, discr_stage=sym.QBX_SOURCE_STAGE1) - int_sigma = sym.integral(self.ambient_dim, self.dim, sigma, dofdesc=dd) - meanless_sigma = sym.cse(sigma - sym.mean(self.ambient_dim, self.dim, sigma)) - op_k = self.stresslet.apply(sigma, normal, mu, - qbx_forced_limit=qbx_forced_limit) - op_s = ( - self.alpha / (2.0 * np.pi) * int_sigma - - self.stokeslet.apply(meanless_sigma, mu, - qbx_forced_limit=slp_qbx_forced_limit) - ) + meanless_sigma = sym.cse(sigma - sym.mean(self.ambient_dim, + self.dim, sigma, dofdesc=dd)) + + result = self.eta * self.alpha / (2.0 * np.pi) * int_sigma + result += self.stresslet.apply_single_and_double_layer(meanless_sigma, + sigma, normal, qbx_forced_limit=qbx_forced_limit, + stokeslet_weight=-self.eta, stresslet_weight=1) - return op_k + self.eta * op_s + return result - def prepare_rhs(self, b, *, mu): - return b + self._farfield(mu, qbx_forced_limit=+1) + def prepare_rhs(self, b): + return b + self._farfield(qbx_forced_limit=+1) - def operator(self, sigma, *, normal, mu): + def operator(self, sigma, *, normal, qbx_forced_limit="avg"): # NOTE: H. K. 1985 Equation 2.18 - return -0.5 * self.side * sigma - self._operator(sigma, normal, mu, "avg") + return -0.5 * self.side * sigma - self._operator( + sigma, normal, qbx_forced_limit) - def velocity(self, sigma, *, normal, mu, qbx_forced_limit=2): + def velocity(self, sigma, *, normal, qbx_forced_limit=2): # NOTE: H. K. 1985 Equation 2.16 - return ( - -self._farfield(mu, qbx_forced_limit) - - self._operator(sigma, normal, mu, qbx_forced_limit) - ) + return -self._farfield(qbx_forced_limit) \ + - self._operator(sigma, normal, qbx_forced_limit) - def pressure(self, sigma, *, normal, mu, qbx_forced_limit=2): + def pressure(self, sigma, *, normal, qbx_forced_limit=2): # FIXME: H. K. 1985 Equation 2.17 raise NotImplementedError @@ -675,13 +657,14 @@ class HebekerExteriorStokesOperator(StokesOperator): .. automethod:: __init__ """ - def __init__(self, *, eta=None): + def __init__(self, *, eta=None, stokeslet=None, stresslet=None, mu=None): r""" :arg eta: a parameter :math:`\eta > 0`. Choosing this parameter well can have a non-trivial effect on the conditioning of the operator. """ - super().__init__(ambient_dim=3, side=+1) + super().__init__(ambient_dim=3, side=+1, stokeslet=stokeslet, + stresslet=stresslet, mu=mu) # NOTE: eta is chosen here based on H. 1986 Figure 1, which is # based on solving on the unit sphere @@ -689,28 +672,25 @@ def __init__(self, *, eta=None): eta = 0.75 self.eta = eta + self.laplace_kernel = LaplaceKernel(3) - def _operator(self, sigma, normal, mu, qbx_forced_limit): - slp_qbx_forced_limit = qbx_forced_limit - if slp_qbx_forced_limit == "avg": - slp_qbx_forced_limit = self.side - - op_w = self.stresslet.apply(sigma, normal, mu, - qbx_forced_limit=qbx_forced_limit) - op_v = self.stokeslet.apply(sigma, mu, - qbx_forced_limit=slp_qbx_forced_limit) - - return op_w + self.eta * op_v + def _operator(self, sigma, normal, qbx_forced_limit): + result = self.stresslet.apply_single_and_double_layer(sigma, + sigma, normal, qbx_forced_limit=qbx_forced_limit, + stokeslet_weight=self.eta, stresslet_weight=1, + extra_deriv_dirs=()) + return result - def operator(self, sigma, *, normal, mu): + def operator(self, sigma, *, normal, qbx_forced_limit="avg"): # NOTE: H. 1986 Equation 17 - return -0.5 * self.side * sigma - self._operator(sigma, normal, mu, "avg") + return -0.5 * self.side * sigma - self._operator(sigma, + normal, qbx_forced_limit) - def velocity(self, sigma, *, normal, mu, qbx_forced_limit=2): + def velocity(self, sigma, *, normal, qbx_forced_limit=2): # NOTE: H. 1986 Equation 16 - return -self._operator(sigma, normal, mu, qbx_forced_limit) + return -self._operator(sigma, normal, qbx_forced_limit) - def pressure(self, sigma, *, normal, mu, qbx_forced_limit=2): + def pressure(self, sigma, *, normal, qbx_forced_limit=2): # FIXME: not given in H. 1986, but should be easy to derive using the # equivalent single-/double-layer pressure kernels raise NotImplementedError diff --git a/pytential/symbolic/typing.py b/pytential/symbolic/typing.py new file mode 100644 index 000000000..e0e23d063 --- /dev/null +++ b/pytential/symbolic/typing.py @@ -0,0 +1,29 @@ +__copyright__ = "Copyright (C) Isuru Fernando" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + + +from typing import Union +import numpy as np +from pymbolic.primitives import Expression + +IntegralT = Union[int, np.integer] +FloatT = Union[float, complex, np.floating, np.complexfloating] + +ExpressionT = Union[IntegralT, FloatT, Expression] diff --git a/pytential/unregularized.py b/pytential/unregularized.py index 08239a2d9..be5e51b36 100644 --- a/pytential/unregularized.py +++ b/pytential/unregularized.py @@ -119,6 +119,7 @@ def evaluate_wrapper(expr): def op_group_features(self, expr): from pytential.utils import sort_arrays_together + from sumpy.kernel import TargetTransformationRemover result = ( expr.source, *sort_arrays_together(expr.source_kernels, expr.densities, key=str), diff --git a/pytential/utils.py b/pytential/utils.py index 1863a30fd..9010e3baa 100644 --- a/pytential/utils.py +++ b/pytential/utils.py @@ -1,5 +1,5 @@ __copyright__ = """ -Copyright (C) 2020 Matt Wala +Copyright (C) 2020 Isuru Fernando """ __license__ = """ @@ -22,6 +22,9 @@ THE SOFTWARE. """ +import sumpy.symbolic as sym +from typing import Iterable, Callable + def sort_arrays_together(*arys, key=None): """Sort a sequence of arrays by considering them @@ -32,4 +35,88 @@ def sort_arrays_together(*arys, key=None): """ return zip(*sorted(zip(*arys), key=key)) + +def chop(expr: sym.Basic, tol) -> sym.Basic: + """Given a symbolic expression, remove all occurences of numbers + with absolute value less than a given tolerance and replace floating + point numbers that are close to an integer up to a given relative + tolerance by the integer. + """ + nums = expr.atoms(sym.Number) + replace_dict = {} + for num in nums: + if float(abs(num)) < tol: + replace_dict[num] = 0 + else: + new_num = float(num) + if abs((int(new_num) - new_num)/new_num) < tol: + new_num = int(new_num) + replace_dict[num] = new_num + return expr.xreplace(replace_dict) + + +def forward_substitution( + L: sym.Matrix, + b: sym.Matrix, + postprocess_division: Callable[[sym.Basic], sym.Basic], + ) -> sym.Matrix: + """Given a lower triangular matrix *L* and a column vector *b*, + solve the system ``Lx = b`` and apply the callable *postprocess_division* + on each expression at the end of division calls. + """ + n = len(b) + res = sym.Matrix(b) + for i in range(n): + for j in range(i): + res[i] -= L[i, j]*res[j] + res[i] = postprocess_division(res[i] / L[i, i]) + return res + + +def backward_substitution( + U: sym.Matrix, + b: sym.Matrix, + postprocess_division: Callable[[sym.Basic], sym.Basic], + ) -> sym.Matrix: + """Given an upper triangular matrix *U* and a column vector *b*, + solve the system ``Ux = b`` and apply the callable *postprocess_division* + on each expression at the end of division calls. + """ + n = len(b) + res = sym.Matrix(b) + for i in range(n-1, -1, -1): + for j in range(n - 1, i, -1): + res[i] -= U[i, j]*res[j] + res[i] = postprocess_division(res[i] / U[i, i]) + return res + + +def solve_from_lu( + L: sym.Matrix, + U: sym.Matrix, + perm: Iterable[int], + b: sym.Matrix, + postprocess_division: Callable[[sym.Basic], sym.Basic] + ) -> sym.Matrix: + """Given an LU factorization and a vector, solve a linear + system with intermediate results expanded to avoid + an explosion of the expression trees + + :param L: lower triangular matrix + :param U: upper triangular matrix + :param perm: permutation matrix + :param b: a column vector to solve for + :param postprocess_division: callable that is called after each division + """ + # Permute first + res = sym.Matrix(b) + for p, q in perm: + res[p], res[q] = res[q], res[p] + + return backward_substitution( + U, + forward_substitution(L, res, postprocess_division), + postprocess_division, + ) + # vim: foldmethod=marker diff --git a/test/test_pde_system_utils.py b/test/test_pde_system_utils.py new file mode 100644 index 000000000..3b62518ef --- /dev/null +++ b/test/test_pde_system_utils.py @@ -0,0 +1,503 @@ +__copyright__ = "Copyright (C) 2022 Isuru Fernando" + +__license__ = """ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. +""" + +from pytential.symbolic.primitives import (IntG, int_g_vec, D, + NodeCoordinateComponent) +from pytential import sym +import pytential +from pytential.symbolic.pde.systems import (merge_int_g_exprs, + rewrite_using_base_kernel) +from pytential.symbolic.pde.systems.deriv import ( + convert_target_transformation_to_source, rewrite_int_g_using_base_kernel) +from pytential.symbolic.pde.systems.merge import get_number_of_fmms +from pytential.symbolic.elasticity import MindlinOperator, Method + +import numpy as np + +from sumpy.kernel import ( + LaplaceKernel, HelmholtzKernel, ExpressionKernel, BiharmonicKernel, + StokesletKernel, DirectionalTargetDerivative, + AxisTargetDerivative, TargetPointMultiplier, AxisSourceDerivative) +from sumpy.symbolic import USE_SYMENGINE + +from pymbolic.primitives import make_sym_vector, Variable +import pymbolic.primitives as prim + + +def test_convert_target_deriv(): + knl = LaplaceKernel(2) + int_g = IntG(AxisTargetDerivative(0, knl), [AxisSourceDerivative(1, knl), knl], + [1, 2], qbx_forced_limit=1) + expected_int_g = IntG(knl, + [AxisSourceDerivative(0, AxisSourceDerivative(1, knl)), + AxisSourceDerivative(0, knl)], [-1, -2], qbx_forced_limit=1) + + assert sum(convert_target_transformation_to_source(int_g)) == expected_int_g + + +def test_convert_target_point_multiplier(): + xs = sym.nodes(3).as_vector() + + knl = LaplaceKernel(3) + int_g = IntG(TargetPointMultiplier(0, knl), [AxisSourceDerivative(1, knl), knl], + [1, 2], qbx_forced_limit=1) + + d = make_sym_vector("d", 3) + + r2 = d[2]**2 + d[1]**2 + d[0]**2 + eknl0 = ExpressionKernel(3, d[1]*d[0]*r2**prim.Quotient(-3, 2), + knl.global_scaling_const, False) + eknl2 = ExpressionKernel(3, d[0]*r2**prim.Quotient(-1, 2), + knl.global_scaling_const, False) + + r2 = d[0]**2 + d[1]**2 + d[2]**2 + eknl1 = ExpressionKernel(3, d[0]*d[1]*r2**prim.Quotient(-3, 2), + knl.global_scaling_const, False) + eknl3 = ExpressionKernel(3, d[0]*r2**prim.Quotient(-1, 2), + knl.global_scaling_const, False) + + possible_int_g1 = IntG(eknl0, [eknl0], [1], qbx_forced_limit=1) + \ + IntG(eknl2, [eknl2], [2], qbx_forced_limit=1) + \ + IntG(knl, [AxisSourceDerivative(1, knl), knl], + [xs[0], 2*xs[0]], qbx_forced_limit=1) + + possible_int_g2 = IntG(eknl1, [eknl1], [1], qbx_forced_limit=1) + \ + IntG(eknl3, [eknl3], [2], qbx_forced_limit=1) + \ + IntG(knl, [AxisSourceDerivative(1, knl), knl], + [xs[0], 2*xs[0]], qbx_forced_limit=1) + + assert sum(convert_target_transformation_to_source(int_g)) in \ + [possible_int_g1, possible_int_g2] + + +def test_product_rule(): + xs = sym.nodes(3).as_vector() + + knl = LaplaceKernel(3) + int_g = IntG(AxisTargetDerivative(0, TargetPointMultiplier(0, knl)), [knl], [1], + qbx_forced_limit=1) + + d = make_sym_vector("d", 3) + r2 = d[2]**2 + d[1]**2 + d[0]**2 + eknl0 = ExpressionKernel(3, d[0]**2*r2**prim.Quotient(-3, 2), + knl.global_scaling_const, False) + r2 = d[0]**2 + d[1]**2 + d[2]**2 + eknl1 = ExpressionKernel(3, d[0]**2*r2**prim.Quotient(-3, 2), + knl.global_scaling_const, False) + + possible_int_g1 = IntG(eknl0, [eknl0], [-1], qbx_forced_limit=1) + \ + IntG(knl, [AxisSourceDerivative(0, knl)], [xs[0]*(-1)], qbx_forced_limit=1) + possible_int_g2 = IntG(eknl1, [eknl1], [-1], qbx_forced_limit=1) + \ + IntG(knl, [AxisSourceDerivative(0, knl)], [xs[0]*(-1)], qbx_forced_limit=1) + + assert sum(convert_target_transformation_to_source(int_g)) in \ + [possible_int_g1, possible_int_g2] + + +def test_convert_helmholtz(): + xs = sym.nodes(3).as_vector() + + knl = HelmholtzKernel(3) + int_g = IntG(TargetPointMultiplier(0, knl), [knl], [1], + qbx_forced_limit=1, k=1) + + d = make_sym_vector("d", 3) + exp = prim.Variable("exp") + + if USE_SYMENGINE: + r2 = d[2]**2 + d[1]**2 + d[0]**2 + eknl = ExpressionKernel(3, exp(1j*r2**prim.Quotient(1, 2))*d[0] + * r2**prim.Quotient(-1, 2), + knl.global_scaling_const, knl.is_complex_valued) + else: + r2 = d[0]**2 + d[1]**2 + d[2]**2 + eknl = ExpressionKernel(3, d[0]*r2**prim.Quotient(-1, 2) + * exp(1j*r2**prim.Quotient(1, 2)), + knl.global_scaling_const, knl.is_complex_valued) + + expected_int_g = IntG(eknl, [eknl], [1], qbx_forced_limit=1, + kernel_arguments={"k": 1}) + \ + IntG(knl, [knl], [xs[0]], qbx_forced_limit=1, k=1) + + assert expected_int_g == sum(convert_target_transformation_to_source(int_g)) + + +def test_convert_int_g_base(): + knl = LaplaceKernel(3) + int_g = IntG(knl, [knl], [1], qbx_forced_limit=1) + + base_knl = BiharmonicKernel(3) + expected_int_g = sum( + IntG(base_knl, [AxisSourceDerivative(d, AxisSourceDerivative(d, base_knl))], + [-1], qbx_forced_limit=1) for d in range(3)) + + assert expected_int_g == rewrite_int_g_using_base_kernel(int_g, + base_kernel=base_knl) + + +def test_convert_int_g_base_with_const(): + knl = StokesletKernel(2, 0, 0) + int_g = IntG(knl, [knl], [1], qbx_forced_limit=1, mu=2) + + base_knl = BiharmonicKernel(2) + dim = 2 + dd = pytential.sym.DOFDescriptor(geometry=pytential.sym.DEFAULT_SOURCE) + + expected_int_g = (-0.1875)*prim.Power(np.pi, -1) * \ + pytential.sym.integral(dim, dim-1, 1, dofdesc=dd) + \ + IntG(base_knl, + [AxisSourceDerivative(1, AxisSourceDerivative(1, base_knl))], [0.5], + qbx_forced_limit=1) + assert rewrite_int_g_using_base_kernel(int_g, + base_kernel=base_knl) == expected_int_g + + +def test_convert_int_g_base_with_const_and_deriv(): + knl = StokesletKernel(2, 0, 0) + int_g = IntG(knl, [AxisSourceDerivative(0, knl)], [1], qbx_forced_limit=1, mu=2) + + base_knl = BiharmonicKernel(2) + + expected_int_g = IntG(base_knl, + [AxisSourceDerivative(1, AxisSourceDerivative(1, + AxisSourceDerivative(0, base_knl)))], [0.5], + qbx_forced_limit=1) + assert rewrite_int_g_using_base_kernel(int_g, + base_kernel=base_knl) == expected_int_g + + +def test_reduce_number_of_fmms(): + dim = 3 + knl = LaplaceKernel(dim) + densities = make_sym_vector("sigma", 2) + mu = Variable("mu") + + int_g1 = \ + mu * int_g_vec(AxisSourceDerivative(1, AxisSourceDerivative(0, knl)), + densities[0], qbx_forced_limit=1) + \ + int_g_vec(AxisSourceDerivative(1, AxisSourceDerivative(1, knl)), + densities[1] * mu, qbx_forced_limit=1) + + int_g2 = \ + int_g_vec(AxisSourceDerivative(2, AxisSourceDerivative(0, knl)), + densities[0], qbx_forced_limit=1) + \ + int_g_vec(AxisSourceDerivative(2, AxisSourceDerivative(1, knl)), + densities[1], qbx_forced_limit=1) + + # Merging reduces 4 FMMs to 2 FMMs and then further reduced to 1 FMM + result = merge_int_g_exprs([int_g1, int_g2], + source_dependent_variables=densities) + assert get_number_of_fmms(result) == 1 + + int_g3 = \ + IntG(target_kernel=AxisTargetDerivative(1, knl), + source_kernels=[AxisSourceDerivative(1, knl), + AxisSourceDerivative(0, knl)], + densities=[-densities[1], -densities[0]], + qbx_forced_limit=1) + + int_g4 = int_g3.copy(target_kernel=AxisTargetDerivative(2, knl)) + + assert result[0] == int_g3 * mu + assert result[1] == int_g4 + + +def test_source_dependent_variable(): + # Same example as test_reduce_number_of_fmms, but with + # mu marked as a source dependent variable + dim = 3 + knl = LaplaceKernel(dim) + densities = make_sym_vector("sigma", 2) + mu = Variable("mu") + nu = Variable("nu") + + int_g1 = \ + int_g_vec(AxisSourceDerivative(1, AxisSourceDerivative(0, knl)), + mu * nu * densities[0], qbx_forced_limit=1) + \ + int_g_vec(AxisSourceDerivative(1, AxisSourceDerivative(1, knl)), + mu * nu * densities[1], qbx_forced_limit=1) + + int_g2 = \ + int_g_vec(AxisSourceDerivative(2, AxisSourceDerivative(0, knl)), + densities[0], qbx_forced_limit=1) + \ + int_g_vec(AxisSourceDerivative(2, AxisSourceDerivative(1, knl)), + densities[1], qbx_forced_limit=1) + + result = merge_int_g_exprs([int_g1, int_g2], + source_dependent_variables=[mu, nu]) + + # Merging reduces 4 FMMs to 2 FMMs. No further reduction of FMMs. + int_g3 = \ + IntG(target_kernel=knl, + source_kernels=[AxisSourceDerivative(1, AxisSourceDerivative(1, knl)), + AxisSourceDerivative(1, AxisSourceDerivative(0, knl))], + densities=[mu * nu * densities[1], mu * nu * densities[0]], + qbx_forced_limit=1) + + int_g4 = \ + IntG(target_kernel=knl, + source_kernels=[AxisSourceDerivative(2, AxisSourceDerivative(1, knl)), + AxisSourceDerivative(2, AxisSourceDerivative(0, knl))], + densities=[densities[1], densities[0]], + qbx_forced_limit=1) + + assert result[0] == int_g3 + assert result[1] == int_g4 + + +def test_base_kernel_merge(): + # Same example as test_reduce_number_of_fmms, but with + # mu marked as a source dependent variable + dim = 3 + knl = LaplaceKernel(dim) + biharm_knl = BiharmonicKernel(dim) + density = make_sym_vector("sigma", 1)[0] + + int_g1 = \ + int_g_vec(TargetPointMultiplier(0, knl), + density, qbx_forced_limit=1) + + int_g2 = \ + int_g_vec(TargetPointMultiplier(1, knl), + density, qbx_forced_limit=1) + + exprs_rewritten = rewrite_using_base_kernel([int_g1, int_g2], + base_kernel=biharm_knl) + result = merge_int_g_exprs(exprs_rewritten, source_dependent_variables=[]) + + sources = [NodeCoordinateComponent(i) for i in range(dim)] + + source_kernels = list(reversed([ + AxisSourceDerivative(i, AxisSourceDerivative(i, biharm_knl)) + for i in range(dim)])) + + int_g3 = IntG(target_kernel=biharm_knl, + source_kernels=source_kernels + [AxisSourceDerivative(0, biharm_knl)], + densities=[density*sources[0]*(-1.0) for _ in range(dim)] + [2*density], + qbx_forced_limit=1) + int_g4 = IntG(target_kernel=biharm_knl, + source_kernels=source_kernels + [AxisSourceDerivative(1, biharm_knl)], + densities=[density*sources[1]*(-1.0) for _ in range(dim)] + [2*density], + qbx_forced_limit=1) + + assert result[0] == int_g3 + assert result[1] == int_g4 + + +def test_merge_different_kernels(): + # Test different kernels Laplace, Helmholtz(k=1), Helmholtz(k=2) + dim = 3 + laplace_knl = LaplaceKernel(dim) + helmholtz_knl = HelmholtzKernel(dim) + density = make_sym_vector("sigma", 1)[0] + + int_g1 = int_g_vec(laplace_knl, density, qbx_forced_limit=1) \ + + int_g_vec(helmholtz_knl, density, qbx_forced_limit=1, k=1) \ + + int_g_vec(AxisTargetDerivative(0, helmholtz_knl), + density, qbx_forced_limit=1, k=1) \ + + int_g_vec(helmholtz_knl, density, qbx_forced_limit=1, k=2) + + int_g2 = int_g_vec(AxisTargetDerivative(0, laplace_knl), + density, qbx_forced_limit=1) + + result = merge_int_g_exprs([int_g1, int_g2], + source_dependent_variables=[density]) + + int_g3 = int_g_vec(laplace_knl, density, qbx_forced_limit=1) \ + + IntG(target_kernel=helmholtz_knl, + source_kernels=[AxisSourceDerivative(0, helmholtz_knl), helmholtz_knl], + densities=[-density, density], + qbx_forced_limit=1, k=1) \ + + int_g_vec(helmholtz_knl, density, qbx_forced_limit=1, k=2) + + assert result[0] == int_g3 + assert result[1] == int_g2 + + result_no_sdv = merge_int_g_exprs([int_g1, int_g2]) + assert result == result_no_sdv + + +def test_merge_different_qbx_forced_limit(): + dim = 3 + laplace_knl = LaplaceKernel(dim) + density = make_sym_vector("sigma", 1)[0] + + int_g1 = int_g_vec(laplace_knl, density, qbx_forced_limit=1) + int_g2 = int_g1.copy(target_kernel=AxisTargetDerivative(0, laplace_knl)) + + int_g3, = merge_int_g_exprs([int_g2 + int_g1]) + int_g4 = int_g1.copy(qbx_forced_limit=2) + int_g2.copy(qbx_forced_limit=-2) + int_g5 = int_g1.copy(qbx_forced_limit=-2) + int_g2.copy(qbx_forced_limit=2) + + result = merge_int_g_exprs([int_g3, int_g4, int_g5], + source_dependent_variables=[density]) + + int_g6 = int_g_vec(laplace_knl, density, qbx_forced_limit=1) + int_g7 = int_g6.copy(target_kernel=AxisTargetDerivative(0, laplace_knl)) + int_g8 = int_g7 + int_g6 + int_g9 = int_g6.copy(qbx_forced_limit=2) \ + + int_g7.copy(qbx_forced_limit=-2) + int_g10 = int_g6.copy(qbx_forced_limit=-2) \ + + int_g7.copy(qbx_forced_limit=2) + + assert result[0] == int_g8 + assert result[1] == int_g9 + assert result[2] == int_g10 + + result_no_sdv = merge_int_g_exprs([int_g3, int_g4, int_g5]) + assert result == result_no_sdv + + +def test_merge_directional_source(): + from pymbolic.primitives import Variable + from pytential.symbolic.primitives import cse + + dim = 3 + laplace_knl = LaplaceKernel(dim) + density = Variable("density") + + int_g1 = int_g_vec(laplace_knl, density, qbx_forced_limit=1) + int_g2 = D(laplace_knl, density, qbx_forced_limit=1) + + source_kernels = [AxisSourceDerivative(d, laplace_knl) + for d in range(dim)] + [laplace_knl] + dsource = int_g2.kernel_arguments["dsource_vec"] + densities = [dsource[d]*cse(density) for d in range(dim)] + [density] + int_g3 = int_g2.copy(source_kernels=source_kernels, densities=densities, + kernel_arguments={}) + + result = merge_int_g_exprs([int_g1 + int_g2], + source_dependent_variables=[density]) + assert result[0] == int_g3 + + result = merge_int_g_exprs([int_g1 + int_g2]) + assert result[0] == int_g3 + + +def test_merge_directional_target(): + from pymbolic.primitives import Variable + + dim = 3 + knl = DirectionalTargetDerivative(LaplaceKernel(dim), "target_dir") + density = Variable("density") + target_dir1 = make_sym_vector("target_dir1", dim) + target_dir2 = make_sym_vector("target_dir2", dim) + + int_g1 = int_g_vec(knl, density, qbx_forced_limit=1, target_dir=target_dir1) + int_g2 = int_g_vec(knl, density, qbx_forced_limit=1, target_dir=target_dir2) + + result = merge_int_g_exprs([int_g1 + int_g2]) + assert result[0] == int_g1 + int_g2 + + +def test_restoring_target_attributes(): + from pymbolic.primitives import Variable + dim = 3 + laplace_knl = LaplaceKernel(dim) + density = Variable("density") + + int_g1 = int_g_vec(TargetPointMultiplier(0, AxisTargetDerivative(0, + laplace_knl)), density, qbx_forced_limit=1) + int_g2 = int_g_vec(AxisTargetDerivative(1, laplace_knl), + density, qbx_forced_limit=1) + + result = merge_int_g_exprs([int_g1, int_g2], + source_dependent_variables=[density]) + + assert result[0] == int_g1 + assert result[1] == int_g2 + + result_no_sdv = merge_int_g_exprs([int_g1, int_g2]) + assert result == result_no_sdv + + +def test_int_gs_in_densities(): + from pymbolic.primitives import Variable, Product + dim = 3 + laplace_knl = LaplaceKernel(dim) + density = Variable("density") + + int_g1 = \ + int_g_vec(laplace_knl, + int_g_vec(AxisSourceDerivative(2, laplace_knl), density, + qbx_forced_limit=1), qbx_forced_limit=1) + \ + int_g_vec(AxisTargetDerivative(0, laplace_knl), + int_g_vec(AxisSourceDerivative(1, laplace_knl), 2*density, + qbx_forced_limit=1), qbx_forced_limit=1) + + # In the above example the two inner source derivatives should + # be converted to target derivatives and the two outermost + # IntGs should be merged into one by converting the target + # derivative in the last term to a source derivative + result = merge_int_g_exprs([int_g1]) + + source_kernels = [AxisSourceDerivative(0, laplace_knl), laplace_knl] + + densities = [ + Product((-1, Product((int_g_vec(AxisTargetDerivative(1, laplace_knl), + density, qbx_forced_limit=1), -2)))), + int_g_vec(AxisTargetDerivative(2, laplace_knl), + density, qbx_forced_limit=1) * (-1) + ] + int_g3 = IntG(target_kernel=laplace_knl, + source_kernels=tuple(source_kernels), + densities=tuple(densities), + qbx_forced_limit=1) + + assert result[0] == int_g3 + + +def test_mindlin(): + sigma = make_sym_vector("sigma", 3) + normal = make_sym_vector("normal", 3) + + mindlin_op = MindlinOperator(method=Method.naive) + + c = mindlin_op.C(sigma=sigma, normal=normal, qbx_forced_limit=1) + assert get_number_of_fmms(c) == 3 + + c_opt = merge_int_g_exprs(c) + assert get_number_of_fmms(c_opt) == 2 + + c_opt_biharmonic = merge_int_g_exprs(rewrite_using_base_kernel( + c, base_kernel=BiharmonicKernel(3))) + assert get_number_of_fmms(c_opt_biharmonic) == 2 + + b = mindlin_op.B(sigma=sigma, normal=normal, qbx_forced_limit=1) + assert get_number_of_fmms(b) == 3 + + b_opt = merge_int_g_exprs(b) + assert get_number_of_fmms(b_opt) == 1 + + +def test_paper_reduce_example(): + from sympy import symbols, Matrix, EX + from pytential.symbolic.pde.systems.reduce import factor + y = y1, y2, y3 = symbols("y1, y2, y3") + m = Matrix([[y1 * y2, -2*y1**2-2*y3**2], [y1*y3, 2*y2*y3]]) + poly_ring = EX.old_poly_ring(*y) + ring = poly_ring / [y1**2 + y2**2 + y3**2] + + lhs, rhs = factor(m, y, ring) + assert lhs == Matrix([[y2], [y3]]) + + # When the PDE is not taken into account, rank is 2 + lhs, rhs = factor(m, y, poly_ring) + assert lhs == m diff --git a/test/test_stokes.py b/test/test_stokes.py index d1446fbbf..6f0cfc4e6 100644 --- a/test/test_stokes.py +++ b/test/test_stokes.py @@ -27,10 +27,15 @@ from arraycontext import flatten from pytential import GeometryCollection, bind, sym +from pytential.symbolic.stokes import StokesletWrapper +from pytential.symbolic.elasticity import (make_elasticity_wrapper, + make_elasticity_double_layer_wrapper, Method, + ElasticityDoubleLayerWrapperBase) from meshmode.discretization import Discretization from meshmode.discretization.poly_element import \ InterpolatoryQuadratureGroupFactory from pytools.obj_array import make_obj_array +from sumpy.symbolic import SpatialConstant from meshmode import _acf # noqa: F401 from arraycontext import pytest_generate_tests_for_array_contexts @@ -68,11 +73,13 @@ def dof_array_rel_error(actx, x, xref, p=None): def run_exterior_stokes(actx_factory, *, ambient_dim, target_order, qbx_order, resolution, - fmm_order=False, # FIXME: FMM is slower than direct evaluation + fmm_order=None, # FIXME: FMM is slower than direct evaluation source_ovsmp=None, radius=1.5, aspect_ratio=1.0, mu=1.0, + nu=0.4, visualize=False, + method="naive", _target_association_tolerance=0.05, _expansions_in_tree_have_extent=True): @@ -152,27 +159,44 @@ def run_exterior_stokes(actx_factory, *, # {{{ symbolic sym_normal = sym.make_sym_vector("normal", ambient_dim) - sym_mu = sym.var("mu") + sym_mu = SpatialConstant("mu2") + + if nu == 0.5: + sym_nu = 0.5 + else: + sym_nu = SpatialConstant("nu2") + + stokeslet = make_elasticity_wrapper(ambient_dim, mu=sym_mu, + nu=sym_nu, method=Method[method]) + stresslet = make_elasticity_double_layer_wrapper(ambient_dim, mu=sym_mu, + nu=sym_nu, method=Method[method]) if ambient_dim == 2: from pytential.symbolic.stokes import HsiaoKressExteriorStokesOperator sym_omega = sym.make_sym_vector("omega", ambient_dim) - op = HsiaoKressExteriorStokesOperator(omega=sym_omega) + op = HsiaoKressExteriorStokesOperator(omega=sym_omega, stokeslet=stokeslet, + stresslet=stresslet) elif ambient_dim == 3: from pytential.symbolic.stokes import HebekerExteriorStokesOperator - op = HebekerExteriorStokesOperator() + op = HebekerExteriorStokesOperator(stokeslet=stokeslet, stresslet=stresslet) else: raise AssertionError() sym_sigma = op.get_density_var("sigma") sym_bc = op.get_density_var("bc") - sym_op = op.operator(sym_sigma, normal=sym_normal, mu=sym_mu) - sym_rhs = op.prepare_rhs(sym_bc, mu=mu) + sym_op = op.operator(sym_sigma, normal=sym_normal) + sym_rhs = op.prepare_rhs(sym_bc) - sym_velocity = op.velocity(sym_sigma, normal=sym_normal, mu=sym_mu) + sym_velocity = op.velocity(sym_sigma, normal=sym_normal) - sym_source_pot = op.stokeslet.apply(sym_sigma, sym_mu, qbx_forced_limit=None) + if ambient_dim == 3: + sym_source_pot = op.stokeslet.apply(sym_sigma, qbx_forced_limit=None) + else: + # Use the naive method here as biharmonic requires source derivatives + # of point_source + sym_source_pot = make_elasticity_wrapper(ambient_dim, mu=sym_mu, + nu=sym_nu, method=Method.naive).apply(sym_sigma, qbx_forced_limit=None) # }}} @@ -193,20 +217,43 @@ def run_exterior_stokes(actx_factory, *, omega = bind(places, total_charge * sym.Ones())(actx) if ambient_dim == 2: - bc_context = {"mu": mu, "omega": omega} - op_context = {"mu": mu, "omega": omega, "normal": normal} + bc_context = {"mu2": mu, "omega": omega} + op_context = {"mu2": mu, "omega": omega, "normal": normal} else: bc_context = {} - op_context = {"mu": mu, "normal": normal} + op_context = {"mu2": mu, "normal": normal} + direct_context = {"mu2": mu} + + if sym_nu != 0.5: + bc_context["nu2"] = nu + op_context["nu2"] = nu + direct_context["nu2"] = nu - bc = bind(places, sym_source_pot, - auto_where=("point_source", "source"))(actx, sigma=charges, mu=mu) + bc_op = bind(places, sym_source_pot, + auto_where=("point_source", "source")) + bc = bc_op(actx, sigma=charges, **direct_context) rhs = bind(places, sym_rhs)(actx, bc=bc, **bc_context) bound_op = bind(places, sym_op) # }}} + fmm_timing_data = {} + bound_op.eval({"sigma": rhs, **op_context}, array_context=actx, + timing_data=fmm_timing_data) + + def print_timing_data(timings, name): + result = {k: 0 for k in list(timings.values())[0].keys()} + total = 0 + for k, timing in timings.items(): + for k, v in timing.items(): + result[k] += v["wall_elapsed"] + total += v["wall_elapsed"] + result["total"] = total + print(f"{name}={result}") + + # print_timing_data(fmm_timing_data, method) + # {{{ solve from pytential.linalg.gmres import gmres @@ -219,7 +266,6 @@ def run_exterior_stokes(actx_factory, *, progress=visualize, stall_iterations=0, hard_failure=True) - sigma = result.solution # }}} @@ -229,7 +275,8 @@ def run_exterior_stokes(actx_factory, *, velocity = bind(places, sym_velocity, auto_where=("source", "point_target"))(actx, sigma=sigma, **op_context) ref_velocity = bind(places, sym_source_pot, - auto_where=("point_source", "point_target"))(actx, sigma=charges, mu=mu) + auto_where=("point_source", "point_target"))(actx, sigma=charges, + **direct_context) v_error = [ dof_array_rel_error(actx, u, uref) @@ -265,11 +312,19 @@ def run_exterior_stokes(actx_factory, *, return h_max, v_error -@pytest.mark.parametrize("ambient_dim", [ - 2, - pytest.param(3, marks=pytest.mark.slowtest) +@pytest.mark.parametrize("ambient_dim, method, nu", [ + (2, "naive", 0.5), + (2, "laplace", 0.5), + (2, "biharmonic", 0.5), + pytest.param(3, "naive", 0.5, marks=pytest.mark.slowtest), + pytest.param(3, "biharmonic", 0.5, marks=pytest.mark.slowtest), + pytest.param(3, "laplace", 0.5, marks=pytest.mark.slowtest), + + (2, "biharmonic", 0.4), + pytest.param(3, "biharmonic", 0.4, marks=pytest.mark.slowtest), + pytest.param(3, "laplace", 0.4, marks=pytest.mark.slowtest), ]) -def test_exterior_stokes(actx_factory, ambient_dim, visualize=False): +def test_exterior_stokes(actx_factory, ambient_dim, method, nu, visualize=False): if visualize: logging.basicConfig(level=logging.INFO) @@ -281,8 +336,10 @@ def test_exterior_stokes(actx_factory, ambient_dim, visualize=False): qbx_order = 3 if ambient_dim == 2: + fmm_order = 10 resolutions = [20, 35, 50] elif ambient_dim == 3: + fmm_order = 6 resolutions = [0, 1, 2] else: raise ValueError(f"unsupported dimension: {ambient_dim}") @@ -291,9 +348,12 @@ def test_exterior_stokes(actx_factory, ambient_dim, visualize=False): h_max, errors = run_exterior_stokes(actx_factory, ambient_dim=ambient_dim, target_order=target_order, + fmm_order=fmm_order, qbx_order=qbx_order, source_ovsmp=source_ovsmp, resolution=resolution, + method=method, + nu=nu, visualize=visualize) for eoc, e in zip(eocs, errors): @@ -305,12 +365,17 @@ def test_exterior_stokes(actx_factory, ambient_dim, visualize=False): error_format="%.8e", eoc_format="%.2f")) + orders_lost = 0 + if method == "biharmonic": + orders_lost += 1 + elif nu != 0.5: + orders_lost += 0.5 for eoc in eocs: # This convergence data is not as clean as it could be. See # https://github.com/inducer/pytential/pull/32 # for some discussion. order = min(target_order, qbx_order) - assert eoc.order_estimate() > order - 0.5 + assert eoc.order_estimate() > order - 0.5 - orders_lost # }}} @@ -402,15 +467,14 @@ class StokesletIdentity: """[Pozrikidis1992] Problem 3.1.1""" def __init__(self, ambient_dim): - from pytential.symbolic.stokes import StokesletWrapper self.ambient_dim = ambient_dim - self.stokeslet = StokesletWrapper(self.ambient_dim) + self.stokeslet = StokesletWrapper(self.ambient_dim, mu=1) def apply_operator(self): sym_density = sym.normal(self.ambient_dim).as_vector() return self.stokeslet.apply( sym_density, - mu_sym=1, qbx_forced_limit=+1) + qbx_forced_limit=+1) def ref_result(self): return make_obj_array([1.0e-15 * sym.Ones()] * self.ambient_dim) @@ -461,15 +525,14 @@ class StressletIdentity: """[Pozrikidis1992] Equation 3.2.7""" def __init__(self, ambient_dim): - from pytential.symbolic.stokes import StokesletWrapper self.ambient_dim = ambient_dim - self.stokeslet = StokesletWrapper(self.ambient_dim) + self.stokeslet = StokesletWrapper(self.ambient_dim, mu=1) def apply_operator(self): sym_density = sym.normal(self.ambient_dim).as_vector() return self.stokeslet.apply_stress( sym_density, sym_density, - mu_sym=1, qbx_forced_limit="avg") + qbx_forced_limit="avg") def ref_result(self): return -0.5 * sym.normal(self.ambient_dim).as_vector() @@ -514,6 +577,249 @@ def test_stresslet_identity(actx_factory, cls, visualize=False): # }}} +# {{{ test Stokes PDE + +def run_stokes_pde(actx_factory, case, identity, resolution, visualize=False): + from sumpy.point_calculus import CalculusPatch + from pytential.target import PointsTarget + actx = actx_factory() + + dim = case.ambient_dim + qbx = case.get_layer_potential(actx, resolution, case.target_order) + + h_min = actx.to_numpy(bind(qbx, sym.h_min(dim))(actx)) + h_max = actx.to_numpy(bind(qbx, sym.h_max(dim))(actx)) + + if dim == 2: + h = h_max + else: + h = h_max / 5 + + cp = CalculusPatch([0, 0, 0][:dim], h=h, order=case.target_order + 1) + targets = PointsTarget(actx.freeze(actx.from_numpy(cp.points))) + + places = GeometryCollection({case.name: qbx, "cp": targets}, + auto_where=(case.name, "cp")) + + potential = bind(places, identity.apply_operator())(actx) + potential_host = actx.to_numpy(potential) + result = identity.apply_pde_using_calculus_patch(cp, potential_host) + result_pytential = actx.to_numpy(bind(places, + identity.apply_pde_using_pytential())(actx)) + + m = np.max([np.linalg.norm(p, ord=np.inf) for p in potential_host]) + error = [np.linalg.norm(x, ord=np.inf)/m for x in result] + + error += [np.linalg.norm(x, ord=np.inf)/m for x in result_pytential] + logger.info("resolution %4d h_min %.5e h_max %.5e error " + + ("%.5e " * places.ambient_dim), + resolution, h_min, h_max, *error) + + return h_max, error + + +class StokesPDE: + def __init__(self, ambient_dim, wrapper): + self.ambient_dim = ambient_dim + self.wrapper = wrapper + + def apply_operator(self): + dim = self.ambient_dim + args = { + "density_vec_sym": [1]*dim, + "qbx_forced_limit": None, + } + if isinstance(self.wrapper, ElasticityDoubleLayerWrapperBase): + args["dir_vec_sym"] = sym.normal(self.ambient_dim).as_vector() + + velocity = self.wrapper.apply(**args) + pressure = self.wrapper.apply_pressure(**args) + return make_obj_array([*velocity, pressure]) + + def apply_pde_using_pytential(self): + dim = self.ambient_dim + args = { + "density_vec_sym": [1]*dim, + "qbx_forced_limit": None, + } + if isinstance(self.wrapper, ElasticityDoubleLayerWrapperBase): + args["dir_vec_sym"] = sym.normal(self.ambient_dim).as_vector() + + d_u = [self.wrapper.apply(**args, extra_deriv_dirs=(i,)) + for i in range(dim)] + dd_u = [self.wrapper.apply(**args, extra_deriv_dirs=(i, i)) + for i in range(dim)] + laplace_u = [sum(dd_u[j][i] for j in range(dim)) for i in range(dim)] + d_p = [self.wrapper.apply_pressure(**args, extra_deriv_dirs=(i,)) + for i in range(dim)] + eqs = [laplace_u[i] - d_p[i] for i in range(dim)] + \ + [sum(d_u[i][i] for i in range(dim))] + return make_obj_array(eqs) + + def apply_pde_using_calculus_patch(self, cp, potential): + dim = self.ambient_dim + velocity = potential[:dim] + pressure = potential[dim] + eqs = [cp.laplace(velocity[i]) - cp.diff(i, pressure) for i in range(dim)] \ + + [sum(cp.diff(i, velocity[i]) for i in range(dim))] + return make_obj_array(eqs) + + +class ElasticityPDE: + def __init__(self, ambient_dim, wrapper): + self.ambient_dim = ambient_dim + self.wrapper = wrapper + + def apply_operator(self): + dim = self.ambient_dim + args = { + "density_vec_sym": [1]*dim, + "qbx_forced_limit": None, + } + if isinstance(self.wrapper, ElasticityDoubleLayerWrapperBase): + args["dir_vec_sym"] = sym.normal(self.ambient_dim).as_vector() + + return self.wrapper.apply(**args) + + def apply_pde_using_pytential(self): + dim = self.ambient_dim + args = { + "density_vec_sym": [1]*dim, + "qbx_forced_limit": None, + } + if isinstance(self.wrapper, ElasticityDoubleLayerWrapperBase): + args["dir_vec_sym"] = sym.normal(self.ambient_dim).as_vector() + + mu = self.wrapper.mu + nu = self.wrapper.nu + assert nu != 0.5 + lam = 2*nu*mu/(1-2*nu) + + derivs = {} + + for i in range(dim): + for j in range(i + 1): + derivs[(i, j)] = self.wrapper.apply(**args, + extra_deriv_dirs=(i, j)) + derivs[(j, i)] = derivs[(i, j)] + + laplace_u = sum(derivs[(i, i)] for i in range(dim)) + grad_of_div_u = [sum(derivs[(i, j)][j] for j in range(dim)) + for i in range(dim)] + + # Navier-Cauchy equations + eqs = [(lam + mu)*grad_of_div_u[i] + mu*laplace_u[i] for i in range(dim)] + return make_obj_array(eqs) + + def apply_pde_using_calculus_patch(self, cp, potential): + dim = self.ambient_dim + mu = self.wrapper.mu + nu = self.wrapper.nu + assert nu != 0.5 + lam = 2*nu*mu/(1-2*nu) + + laplace_u = [cp.laplace(potential[i]) for i in range(dim)] + grad_of_div_u = [ + sum(cp.diff(j, cp.diff(i, potential[j])) for j in range(dim)) + for i in range(dim)] + + # Navier-Cauchy equations + eqs = [(lam + mu)*grad_of_div_u[i] + mu*laplace_u[i] for i in range(dim)] + return make_obj_array(eqs) + + +@pytest.mark.parametrize("dim, method, nu, is_double_layer", [ + # Single layer + pytest.param(2, "biharmonic", 0.4, False), + pytest.param(2, "biharmonic", 0.5, False), + pytest.param(2, "laplace", 0.5, False), + pytest.param(3, "laplace", 0.5, False), + pytest.param(3, "laplace", 0.4, False), + pytest.param(2, "naive", 0.4, False, marks=pytest.mark.slowtest), + pytest.param(3, "naive", 0.4, False, marks=pytest.mark.slowtest), + pytest.param(2, "naive", 0.5, False, marks=pytest.mark.slowtest), + pytest.param(3, "naive", 0.5, False, marks=pytest.mark.slowtest), + # FIXME: re-enable when merge_int_g_exprs is in + pytest.param(3, "biharmonic", 0.4, False, marks=pytest.mark.skip), + pytest.param(3, "biharmonic", 0.5, False, marks=pytest.mark.skip), + # FIXME: re-enable when StokesletWrapperYoshida is implemented for 2D + pytest.param(2, "laplace", 0.4, False, marks=pytest.mark.xfail), + + # Double layer + pytest.param(2, "laplace", 0.5, True), + pytest.param(3, "laplace", 0.5, True), + pytest.param(3, "laplace", 0.4, True), + pytest.param(2, "naive", 0.4, True, marks=pytest.mark.slowtest), + pytest.param(3, "naive", 0.4, True, marks=pytest.mark.slowtest), + pytest.param(2, "naive", 0.5, True, marks=pytest.mark.slowtest), + pytest.param(3, "naive", 0.5, True, marks=pytest.mark.slowtest), + # FIXME: re-enable when merge_int_g_exprs is in + pytest.param(2, "biharmonic", 0.4, True, marks=pytest.mark.skip), + pytest.param(2, "biharmonic", 0.5, True, marks=pytest.mark.skip), + pytest.param(3, "biharmonic", 0.4, True, marks=pytest.mark.skip), + pytest.param(3, "biharmonic", 0.5, True, marks=pytest.mark.skip), + # FIXME: re-enable when StressletWrapperYoshida is implemented for 2D + pytest.param(2, "laplace", 0.4, True, marks=pytest.mark.xfail), + ]) +def test_elasticity_pde(actx_factory, dim, method, nu, is_double_layer, + visualize=False): + if visualize: + logging.basicConfig(level=logging.INFO) + + if dim == 2: + case_cls = eid.CircleTestCase + resolutions = [4, 8, 16] + else: + case_cls = eid.SpheroidTestCase + resolutions = [0, 1, 2] + + source_ovsmp = 4 if dim == 2 else 8 + case = case_cls(fmm_backend=None, + target_order=5, qbx_order=3, source_ovsmp=source_ovsmp, + resolutions=resolutions) + + if nu == 0.5: + pde_class = StokesPDE + else: + pde_class = ElasticityPDE + + if is_double_layer: + identity = pde_class(dim, make_elasticity_double_layer_wrapper( + case.ambient_dim, mu=1, nu=nu, method=Method[method])) + else: + identity = pde_class(dim, make_elasticity_wrapper( + case.ambient_dim, mu=1, nu=nu, method=Method[method])) + + from pytools.convergence import EOCRecorder + eocs = [EOCRecorder() for _ in range(2*case.ambient_dim)] + + for resolution in case.resolutions: + h_max, errors = run_stokes_pde( + actx_factory, case, identity, + resolution=resolution, + visualize=visualize) + + for eoc, e in zip(eocs, errors): + eoc.add_data_point(h_max, e) + + for eoc in eocs: + print(eoc.pretty_print( + abscissa_format="%.8e", + error_format="%.8e", + eoc_format="%.2f")) + + for eoc in eocs: + order = min(case.target_order, case.qbx_order) + # Sometimes the error has already converged to a value close to + # machine epsilon. In that case we have to reduce the resolution + # to increase the error to observe the error behaviour, but when + # reducing the resolution is not possible, we check that all the + # errors are below a certain threshold. + assert eoc.order_estimate() > order - 1 or eoc.max_error() < 1e-10 + +# }}} + + # You can test individual routines by typing # $ python test_stokes.py 'test_routine()' diff --git a/test/test_tools.py b/test/test_tools.py index 7f795a2ed..8365be325 100644 --- a/test/test_tools.py +++ b/test/test_tools.py @@ -276,8 +276,27 @@ def test_add_geometry_to_collection(actx_factory): _add_geometry_to_collection(actx, places, sources) _add_geometry_to_collection(actx, places, targets) + # }}} +# {{{ test solve_from_lu + +def test_solve_from_lu(): + import sumpy.symbolic as sym + from pytential.utils import solve_from_lu + x, y, z = sym.symbols("x, y, z") + m = sym.Matrix([[0, x, y], [1, 0, x], [y, 2, 5]]) + L, U, perm = m.LUdecomposition() + + b = sym.Matrix([z, 1, 2]) + sol = solve_from_lu(L, U, perm, b, lambda x: x.expand()) + expected = m.solve(b) + + assert (sol - expected).expand().applyfunc(lambda x: x.simplify()) \ + == sym.Matrix([0, 0, 0]) + + +# }}} # You can test individual routines by typing # $ python test_tools.py 'test_routine()'