Skip to content

Commit

Permalink
feat: use Generator in interp_decomp for scipy 1.15
Browse files Browse the repository at this point in the history
  • Loading branch information
alexfikl committed Jan 6, 2025
1 parent 6c69139 commit 28d8bba
Show file tree
Hide file tree
Showing 3 changed files with 41 additions and 12 deletions.
18 changes: 12 additions & 6 deletions pytential/linalg/skeletonization.py
Original file line number Diff line number Diff line change
Expand Up @@ -577,8 +577,10 @@ def _skeletonize_block_by_proxy_with_mats(
proxy_generator: ProxyGeneratorBase,
wrangler: SkeletonizationWrangler,
tgt_src_index: TargetAndSourceClusterList, *,
id_eps: float | None = None, id_rank: int | None = None,
max_particles_in_box: int | None = None
id_eps: float | None = None,
id_rank: int | None = None,
max_particles_in_box: int | None = None,
rng: np.random.Generator | None = None,
) -> "SkeletonizationResult":
nclusters = tgt_src_index.nclusters
if nclusters == 1:
Expand Down Expand Up @@ -619,6 +621,7 @@ def _skeletonize_block_by_proxy_with_mats(
R: np.ndarray = np.empty(nclusters, dtype=object)

from pytential.linalg import interp_decomp

for i in range(nclusters):
k = id_rank
src_mat = np.vstack(src_result[i])
Expand All @@ -632,7 +635,7 @@ def _skeletonize_block_by_proxy_with_mats(
assert np.all(isfinite), np.where(isfinite)

# skeletonize target points
k, idx, interp = interp_decomp(tgt_mat.T, rank=k, eps=id_eps)
k, idx, interp = interp_decomp(tgt_mat.T, rank=k, eps=id_eps, rng=rng)
assert 0 < k <= len(idx)

if k > max_allowable_rank:
Expand All @@ -644,7 +647,7 @@ def _skeletonize_block_by_proxy_with_mats(
assert L[i].shape == (tgt_mat.shape[0], k)

# skeletonize source points
k, idx, interp = interp_decomp(src_mat, rank=k, eps=None)
k, idx, interp = interp_decomp(src_mat, rank=k, eps=None, rng=rng)
assert 0 < k <= len(idx)

R[i] = interp
Expand Down Expand Up @@ -762,6 +765,7 @@ def skeletonize_by_proxy(

id_eps: float | None = None,
id_rank: int | None = None,
rng: np.random.Generator | None = None,
max_particles_in_box: int | None = None) -> np.ndarray:
r"""Evaluate and skeletonize a symbolic expression using proxy-based methods.
Expand Down Expand Up @@ -798,8 +802,10 @@ def skeletonize_by_proxy(
for ibcol in range(wrangler.ncols):
skels[ibrow, ibcol] = _skeletonize_block_by_proxy_with_mats(
actx, ibrow, ibcol, places, proxy, wrangler, tgt_src_index,
id_eps=id_eps, id_rank=id_rank,
max_particles_in_box=max_particles_in_box)
id_eps=id_eps,
id_rank=id_rank,
max_particles_in_box=max_particles_in_box,
rng=rng)

return skels

Expand Down
19 changes: 16 additions & 3 deletions pytential/linalg/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -326,7 +326,10 @@ def make_flat_cluster_diag(
# {{{ interpolative decomposition

def interp_decomp(
A: np.ndarray, *, rank: int | None, eps: float | None,
A: np.ndarray, *,
rank: int | None,
eps: float | None,
rng: np.random.Generator | None = None,
) -> tuple[int, np.ndarray, np.ndarray]:
"""Wrapper for :func:`~scipy.linalg.interpolative.interp_decomp` that
always has the same output signature.
Expand All @@ -337,11 +340,21 @@ def interp_decomp(
if rank is not None and eps is not None:
raise ValueError("providing both 'rank' and 'eps' is not supported")

from scipy import __version__

kwargs = {}
if __version__ >= "1.15.0":
if rng is None:
rng = np.random.default_rng()

kwargs["rng"] = rng

import scipy.linalg.interpolative as sli

if rank is None:
k, idx, proj = sli.interp_decomp(A, eps)
k, idx, proj = sli.interp_decomp(A, eps, **kwargs)
else:
idx, proj = sli.interp_decomp(A, rank)
idx, proj = sli.interp_decomp(A, rank, **kwargs)
k = rank

interp = sli.reconstruct_interp_matrix(idx, proj)
Expand Down
16 changes: 13 additions & 3 deletions test/test_linalg_skeletonization.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,9 +147,12 @@ def test_skeletonize_symbolic(actx_factory, case, visualize=False):

from pytential.linalg.skeletonization import (
_skeletonize_block_by_proxy_with_mats)

rng = np.random.default_rng(42)
_skeletonize_block_by_proxy_with_mats(
actx, 0, 0, places, proxy_generator, wrangler, tgt_src_index,
id_eps=1.0e-8
id_eps=1.0e-8,
rng=rng,
)

# }}}
Expand All @@ -161,6 +164,7 @@ def test_skeletonize_symbolic(actx_factory, case, visualize=False):
def run_skeletonize_by_proxy(actx, case, resolution,
places=None, mat=None,
ctol=None, rtol=None,
rng=None,
suffix="", visualize=False):
from pytools import ProcessTimer

Expand Down Expand Up @@ -238,7 +242,8 @@ def run_skeletonize_by_proxy(actx, case, resolution,
skeleton = _skeletonize_block_by_proxy_with_mats(
actx, 0, 0, places, proxy_generator, wrangler, tgt_src_index,
id_eps=case.id_eps,
max_particles_in_box=case.max_particles_in_box)
max_particles_in_box=case.max_particles_in_box,
rng=rng)

logger.info("[time] skeletonization by proxy: %s", p)

Expand Down Expand Up @@ -359,7 +364,9 @@ def test_skeletonize_by_proxy(actx_factory, case, visualize=False):
"""

import scipy.linalg.interpolative as sli

sli.seed(42)
rng = np.random.default_rng(42)

actx = actx_factory()

Expand All @@ -374,6 +381,7 @@ def test_skeletonize_by_proxy(actx_factory, case, visualize=False):
ctol=10,
# FIXME: why is the 3D error so large?
rtol=10**case.ambient_dim,
rng=rng,
visualize=visualize)

# }}}
Expand Down Expand Up @@ -406,7 +414,9 @@ def test_skeletonize_by_proxy_convergence(
(the ID tolerance).
"""
import scipy.linalg.interpolative as sli

sli.seed(42)
rng = np.random.default_rng(42)

actx = actx_factory()

Expand Down Expand Up @@ -440,7 +450,7 @@ def test_skeletonize_by_proxy_convergence(
case = replace(case, id_eps=id_eps[i], weighted_proxy=weighted)
rec_error[i], (places, mat) = run_skeletonize_by_proxy(
actx, case, r, places=places, mat=mat,
suffix=f"{suffix}_{i:04d}", visualize=False)
suffix=f"{suffix}_{i:04d}", rng=rng, visualize=False)

was_zero = rec_error[i] == 0.0
eoc.add_data_point(id_eps[i], rec_error[i])
Expand Down

0 comments on commit 28d8bba

Please sign in to comment.