Skip to content

Commit

Permalink
Merge branch 'main' into enable-type-checking
Browse files Browse the repository at this point in the history
  • Loading branch information
bouweandela authored Jun 6, 2024
2 parents e59e721 + 6adac1b commit 7f84689
Show file tree
Hide file tree
Showing 22 changed files with 453 additions and 401 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/ci-manifest.yml
Original file line number Diff line number Diff line change
Expand Up @@ -23,4 +23,4 @@ concurrency:
jobs:
manifest:
name: "check-manifest"
uses: scitools/workflows/.github/workflows/ci-manifest.yml@2024.05.0
uses: scitools/workflows/.github/workflows/ci-manifest.yml@2024.06.0
2 changes: 1 addition & 1 deletion .github/workflows/refresh-lockfiles.yml
Original file line number Diff line number Diff line change
Expand Up @@ -14,5 +14,5 @@ on:

jobs:
refresh_lockfiles:
uses: scitools/workflows/.github/workflows/refresh-lockfiles.yml@2024.05.0
uses: scitools/workflows/.github/workflows/refresh-lockfiles.yml@2024.06.0
secrets: inherit
4 changes: 2 additions & 2 deletions .pre-commit-config.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ repos:
- id: no-commit-to-branch

- repo: https://github.com/astral-sh/ruff-pre-commit
rev: "v0.4.4"
rev: "v0.4.7"
hooks:
- id: ruff
types: [file, python]
Expand All @@ -38,7 +38,7 @@ repos:
types: [file, python]

- repo: https://github.com/codespell-project/codespell
rev: "v2.2.6"
rev: "v2.3.0"
hooks:
- id: codespell
types_or: [asciidoc, python, markdown, rst]
Expand Down
28 changes: 23 additions & 5 deletions docs/src/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,12 +21,14 @@

import datetime
from importlib.metadata import version as get_version
from inspect import getsource
import ntpath
import os
from pathlib import Path
import re
from subprocess import run
import sys
from tempfile import gettempdir
from urllib.parse import quote
import warnings

Expand Down Expand Up @@ -409,6 +411,26 @@ def _dotv(version):
# -- sphinx-gallery config ----------------------------------------------------
# See https://sphinx-gallery.github.io/stable/configuration.html


def reset_modules(gallery_conf, fname):
"""Force re-registering of nc-time-axis with matplotlib for each example.
Required for sphinx-gallery>=0.11.0.
"""
from sys import modules

_ = modules.pop("nc_time_axis", None)


# https://sphinx-gallery.github.io/dev/configuration.html#importing-callables
reset_modules_dir = Path(gettempdir()) / reset_modules.__name__
reset_modules_dir.mkdir(exist_ok=True)
(reset_modules_dir / f"{reset_modules.__name__}.py").write_text(
getsource(reset_modules)
)
sys.path.insert(0, str(reset_modules_dir))


sphinx_gallery_conf = {
# path to your example scripts
"examples_dirs": ["../gallery_code"],
Expand All @@ -420,11 +442,7 @@ def _dotv(version):
"ignore_pattern": r"__init__\.py",
# force gallery building, unless overridden (see src/Makefile)
"plot_gallery": "'True'",
# force re-registering of nc-time-axis with matplotlib for each example,
# required for sphinx-gallery>=0.11.0
"reset_modules": (
lambda gallery_conf, fname: sys.modules.pop("nc_time_axis", None),
),
"reset_modules": f"{reset_modules.__name__}.{reset_modules.__name__}",
}

# -----------------------------------------------------------------------------
Expand Down
4 changes: 2 additions & 2 deletions docs/src/copyright.rst
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@ are licensed under the UK's Open Government Licence:

(C) British Crown Copyright |copyright_years|

You may use and re-use the information featured on this website (not including logos) free of
You may use and reuse the information featured on this website (not including logos) free of
charge in any format or medium, under the terms of the
`Open Government Licence <https://www.nationalarchives.gov.uk/doc/open-government-licence>`_.
We encourage users to establish hypertext links to this website.

Any email enquiries regarding the use and re-use of this information resource should be
Any email enquiries regarding the use and reuse of this information resource should be
sent to: [email protected].
5 changes: 3 additions & 2 deletions docs/src/developers_guide/contributing_benchmarks.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,9 @@ Benchmarking
Iris includes architecture for benchmarking performance and other metrics of
interest. This is done using the `Airspeed Velocity`_ (ASV) package.

Full detail on the setup and how to run or write benchmarks is in
`benchmarks/README.md`_ in the Iris repository.

.. note:: Full detail on the setup and how to run or write benchmarks is in
`benchmarks/README.md`_ in the Iris repository.

Continuous Integration
----------------------
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ Here is a journey that demonstrates:

* How to apply dask.bags to an existing script
* The equal importance of optimisation of non-parallel parts of a script
* Protection against multiple softwares trying to manage parallelism
* Protection against multiple software trying to manage parallelism
simultaneously


Expand Down
2 changes: 1 addition & 1 deletion docs/src/further_topics/netcdf_io.rst
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ creation of the :data:`iris.fileformats.netcdf.loader.CHUNK_CONTROL` class.
Custom Chunking: Set
^^^^^^^^^^^^^^^^^^^^

There are three context manangers within :data:`~iris.fileformats.netcdf.loader.CHUNK_CONTROL`. The most basic is
There are three context managers within :data:`~iris.fileformats.netcdf.loader.CHUNK_CONTROL`. The most basic is
:meth:`~iris.fileformats.netcdf.loader.ChunkControl.set`. This allows you to specify the chunksize for each dimension,
and to specify a ``var_name`` specifically to change.

Expand Down
2 changes: 1 addition & 1 deletion docs/src/further_topics/ugrid/partner_packages.rst
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ Applications
* Regrid unstructured to structured.
* Regrid with dask integration, computing in parallel and maintaining data
laziness.
* | Save a prepared regridder for re-use in subsequent runs.
* | Save a prepared regridder for reuse in subsequent runs.
| Regridders can even be re-used on sources with different masks - a
significant efficiency gain.
Expand Down
2 changes: 1 addition & 1 deletion docs/src/further_topics/which_regridder_to_use.rst
Original file line number Diff line number Diff line change
Expand Up @@ -341,7 +341,7 @@ of ``kg m-2`` as an area weighted sum. With ``mdtol=0`` this will consistently
underestimate this total and with ``mdtol=1`` will consistently overestimate. This can
be somewhat mitigated with a choice of ``mdtol=0.5``, but you should still be aware of
potential inaccuracies. It should be noted that this choice of ``mdtol`` is highly
context dependent and there wil likely be occasions where a choice of ``mdtol=0`` or
context dependent and there will likely be occasions where a choice of ``mdtol=0`` or
``mdtol=1`` is more suitable. The important thing is to *know your data, know what*
*you're doing with your data and know how your regridder fits in this process*.

Expand Down
4 changes: 2 additions & 2 deletions docs/src/whatsnew/3.6.rst
Original file line number Diff line number Diff line change
Expand Up @@ -15,11 +15,11 @@ This document explains the changes made to Iris for this release

We're so excited about our recent support for **delayed saving of lazy data
to netCDF** (:pull:`5191`) that we're celebrating this important step change
in behavour with its very own dedicated release 🥳
in behaviour with its very own dedicated release 🥳

By using ``iris.save(..., compute=False)`` you can now save to multiple NetCDF files
in parallel. See the new ``compute`` keyword in :func:`iris.fileformats.netcdf.save`.
This can share and re-use any common (lazy) result computations, and it makes much
This can share and reuse any common (lazy) result computations, and it makes much
better use of resources during any file-system waiting (i.e., it can use such periods
to progress the *other* saves).

Expand Down
14 changes: 8 additions & 6 deletions docs/src/whatsnew/latest.rst
Original file line number Diff line number Diff line change
Expand Up @@ -55,14 +55,14 @@ This document explains the changes made to Iris for this release
🚀 Performance Enhancements
===========================

#. N/A

#. `@bouweandela`_ added the option to specify the Dask chunks of the target
array in :func:`iris.util.broadcast_to_shape`. (:pull:`5620`)

#. `@schlunma`_ allowed :func:`iris.analysis.cartography.area_weights` to
return dask arrays with arbitrary chunks. (:pull:`5658`)

#. `@bouweandela`_ made :meth:`iris.cube.Cube.rolling_window` work with lazy
data. (:pull:`5795`)

🔥 Deprecations
===============
Expand Down Expand Up @@ -114,15 +114,17 @@ This document explains the changes made to Iris for this release
#. `@pp-mo`_ reworked benchmark peak-memory measurement to use the
`tracemalloc <https://docs.python.org/3.12/library/tracemalloc.html>`_
package.
(:pull: `5948`)
(:pull:`5948`)

#. `@pp-mo`_ added a benchmark 'trialrun' sub-command, to quickly test
benchmarks during development. (:pull: `5957`)
benchmarks during development. (:pull:`5957`)

#. `@pp-mo`_ moved several memory-measurement benchmarks from 'on-demand' to
the standard set, in hopes that use of 'tracemalloc' (:pull: `5948`) makes
the standard set, in hopes that use of 'tracemalloc' (:pull:`5948`) makes
the results consistent enough to monitor for performance changes.
(:pull: `5959`)
(:pull:`5959`)

#. `@rcomer`_ made some :meth:`~iris.cube.Cube.slices_over` tests go faster (:pull:`5973`)

#. `@bouweandela`_ enabled mypy checks for type hints. (:pull:`5956`)

Expand Down
2 changes: 1 addition & 1 deletion lib/iris/analysis/calculus.py
Original file line number Diff line number Diff line change
Expand Up @@ -760,7 +760,7 @@ def spatial_vectors_with_phenom_name(i_cube, j_cube, k_cube=None):
The cube standard names must match one of the combinations in
:data:`DIRECTIONAL_NAMES`.
This routine is designed to identify the vector quantites which each
This routine is designed to identify the vector quantities which each
of the cubes provided represent and return a list of their 3d
spatial dimension names and associated phenomenon.
For example, given a cube of "u wind" and "v wind" the return value
Expand Down
19 changes: 8 additions & 11 deletions lib/iris/cube.py
Original file line number Diff line number Diff line change
Expand Up @@ -4555,12 +4555,6 @@ def rolling_window(self, coord, aggregator, window, **kwargs):
-------
:class:`iris.cube.Cube`.
Notes
-----
.. note::
This operation does not yet have support for lazy evaluation.
Examples
--------
>>> import iris, iris.analysis
Expand Down Expand Up @@ -4664,7 +4658,7 @@ def rolling_window(self, coord, aggregator, window, **kwargs):
# this will add an extra dimension to the data at dimension + 1 which
# represents the rolled window (i.e. will have a length of window)
rolling_window_data = iris.util.rolling_window(
self.data, window=window, axis=dimension
self.core_data(), window=window, axis=dimension
)

# now update all of the coordinates to reflect the aggregation
Expand All @@ -4683,7 +4677,7 @@ def rolling_window(self, coord, aggregator, window, **kwargs):
"coordinate." % coord_.name()
)

new_bounds = iris.util.rolling_window(coord_.points, window)
new_bounds = iris.util.rolling_window(coord_.core_points(), window)

if np.issubdtype(new_bounds.dtype, np.str_):
# Handle case where the AuxCoord contains string. The points
Expand Down Expand Up @@ -4729,9 +4723,12 @@ def rolling_window(self, coord, aggregator, window, **kwargs):
kwargs["weights"] = iris.util.broadcast_to_shape(
weights, rolling_window_data.shape, (dimension + 1,)
)
data_result = aggregator.aggregate(
rolling_window_data, axis=dimension + 1, **kwargs
)

if aggregator.lazy_func is not None and self.has_lazy_data():
agg_method = aggregator.lazy_aggregate
else:
agg_method = aggregator.aggregate
data_result = agg_method(rolling_window_data, axis=dimension + 1, **kwargs)
result = aggregator.post_process(new_cube, data_result, [coord], **kwargs)
return result

Expand Down
4 changes: 2 additions & 2 deletions lib/iris/fileformats/netcdf/saver.py
Original file line number Diff line number Diff line change
Expand Up @@ -459,7 +459,7 @@ def write(
cube : :class:`iris.cube.Cube`
A :class:`iris.cube.Cube` to be saved to a netCDF file.
local_keys : iterable of str, optional
An interable of cube attribute keys. Any cube attributes with
An iterable of cube attribute keys. Any cube attributes with
matching keys will become attributes on the data variable rather
than global attributes.
Expand Down Expand Up @@ -2442,7 +2442,7 @@ def save(
Underlying netCDF file format, one of 'NETCDF4', 'NETCDF4_CLASSIC',
'NETCDF3_CLASSIC' or 'NETCDF3_64BIT'. Default is 'NETCDF4' format.
local_keys : iterable of str, optional
An interable of cube attribute keys. Any cube attributes with
An iterable of cube attribute keys. Any cube attributes with
matching keys will become attributes on the data variable rather
than global attributes.
Expand Down
17 changes: 17 additions & 0 deletions lib/iris/plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,23 @@
PlotDefn = collections.namedtuple("PlotDefn", ("coords", "transpose"))


class _GeoAxesPatched(cartopy.mpl.geoaxes.GeoAxes):
# TODO: see cartopy#2390
# Remove this once the bug is addressed in a Cartopy release.
def _draw_preprocess(self, renderer):
super()._draw_preprocess(renderer)

for artist in self.artists:
if hasattr(artist, "_draw_gridliner"):
# Note this is only necessary since Cartopy v0.23, but is not
# wasteful for earlier versions as _draw_gridliner() includes
# a check for whether a draw is necessary.
artist._draw_gridliner(renderer=renderer)


cartopy.mpl.geoaxes.GeoAxes = _GeoAxesPatched


def _get_plot_defn_custom_coords_picked(cube, coords, mode, ndims=2):
def names(coords):
result = []
Expand Down
31 changes: 23 additions & 8 deletions lib/iris/tests/unit/cube/test_Cube.py
Original file line number Diff line number Diff line change
Expand Up @@ -877,7 +877,7 @@ def setUp(self):
self.cell_measure = CellMeasure([0, 1, 2, 0, 1, 2], long_name="bar")
self.multi_dim_cube.add_cell_measure(self.cell_measure, 1)

self.mock_agg = mock.Mock(spec=Aggregator)
self.mock_agg = mock.Mock(spec=Aggregator, lazy_func=None)
self.mock_agg.aggregate = mock.Mock(return_value=np.empty([4]))
self.mock_agg.post_process = mock.Mock(side_effect=lambda x, y, z: x)

Expand Down Expand Up @@ -919,6 +919,21 @@ def test_kwargs(self):
)
self.assertMaskedArrayEqual(expected_result, res_cube.data)

def test_lazy(self):
window = 2
self.cube.data = da.ma.masked_array(
self.cube.data, mask=([True, False, False, False, True, False])
)
res_cube = self.cube.rolling_window("val", iris.analysis.MEAN, window, mdtol=0)
self.assertTrue(self.cube.has_lazy_data())
self.assertTrue(res_cube.has_lazy_data())
expected_result = ma.array(
[-99.0, 1.5, 2.5, -99.0, -99.0],
mask=[True, False, False, True, True],
dtype=np.float64,
)
self.assertMaskedArrayEqual(expected_result, res_cube.data)

def test_ancillary_variables_and_cell_measures_kept(self):
res_cube = self.multi_dim_cube.rolling_window("val", self.mock_agg, 3)
self.assertEqual(res_cube.ancillary_variables(), [self.ancillary_variable])
Expand Down Expand Up @@ -1036,10 +1051,10 @@ def test_all_permutations(self):
@tests.skip_data
class Test_slices_over(tests.IrisTest):
def setUp(self):
self.cube = stock.realistic_4d()
self.cube = stock.realistic_4d()[:, :7, :10, :10]
# Define expected iterators for 1D and 2D test cases.
self.exp_iter_1d = range(len(self.cube.coord("model_level_number").points))
self.exp_iter_2d = np.ndindex(6, 70, 1, 1)
self.exp_iter_2d = np.ndindex(6, 7, 1, 1)
# Define maximum number of interactions for particularly long
# (and so time-consuming) iterators.
self.long_iterator_max = 5
Expand Down Expand Up @@ -1075,7 +1090,7 @@ def test_1d_slice_nonexistent_dimension_given(self):
_ = self.cube.slices_over(self.cube.ndim + 1)

def test_2d_slice_coord_given(self):
# Slicing over these two dimensions returns 420 2D cubes, so only check
# Slicing over these two dimensions returns 42 2D cubes, so only check
# cubes up to `self.long_iterator_max` to keep test runtime sensible.
res = self.cube.slices_over(
[self.cube.coord("time"), self.cube.coord("model_level_number")]
Expand All @@ -1094,7 +1109,7 @@ def test_2d_slice_nonexistent_coord_given(self):
)

def test_2d_slice_coord_name_given(self):
# Slicing over these two dimensions returns 420 2D cubes, so only check
# Slicing over these two dimensions returns 42 2D cubes, so only check
# cubes up to `self.long_iterator_max` to keep test runtime sensible.
res = self.cube.slices_over(["time", "model_level_number"])
for ct in range(self.long_iterator_max):
Expand All @@ -1109,7 +1124,7 @@ def test_2d_slice_nonexistent_coord_name_given(self):
_ = self.cube.slices_over(["time", "wibble"])

def test_2d_slice_dimension_given(self):
# Slicing over these two dimensions returns 420 2D cubes, so only check
# Slicing over these two dimensions returns 42 2D cubes, so only check
# cubes up to `self.long_iterator_max` to keep test runtime sensible.
res = self.cube.slices_over([0, 1])
for ct in range(self.long_iterator_max):
Expand All @@ -1135,11 +1150,11 @@ def test_2d_slice_nonexistent_dimension_given(self):
_ = self.cube.slices_over([0, self.cube.ndim + 1])

def test_multidim_slice_coord_given(self):
# Slicing over surface altitude returns 100x100 2D cubes, so only check
# Slicing over surface altitude returns 10x10 2D cubes, so only check
# cubes up to `self.long_iterator_max` to keep test runtime sensible.
res = self.cube.slices_over("surface_altitude")
# Define special ndindex iterator for the different dims sliced over.
nditer = np.ndindex(1, 1, 100, 100)
nditer = np.ndindex(1, 1, 10, 10)
for ct in range(self.long_iterator_max):
indices = list(next(nditer))
# Replace the dimensions not iterated over with spanning slices.
Expand Down
Loading

0 comments on commit 7f84689

Please sign in to comment.