Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Apply ruff/pyupgrade rules (UP) #426

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 3 additions & 3 deletions ome_zarr/axes.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def _validate_axes_types(self) -> None:
unknown_types = [atype for atype in axes_types if atype not in known_types]
if len(unknown_types) > 1:
raise ValueError(
"Too many unknown axes types. 1 allowed, found: %s" % unknown_types
f"Too many unknown axes types. 1 allowed, found: {unknown_types}"
)

def _last_index(item: str, item_list: list[Any]) -> int:
Expand All @@ -93,7 +93,7 @@ def _get_names(self) -> list[str]:
axes_names = []
for axis in self.axes:
if "name" not in axis:
raise ValueError("Axis Dict %s has no 'name'" % axis)
raise ValueError(f"Axis Dict {axis} has no 'name'")
axes_names.append(axis["name"])
return axes_names

Expand All @@ -106,7 +106,7 @@ def _validate_03(self) -> None:
if val_axes not in [("z", "y", "x"), ("c", "y", "x"), ("t", "y", "x")]:
raise ValueError(
"3D data must have axes ('z', 'y', 'x') or ('c', 'y', 'x')"
" or ('t', 'y', 'x'), not %s" % (val_axes,)
f" or ('t', 'y', 'x'), not {val_axes}"
)
elif len(val_axes) == 4:
if val_axes not in [
Expand Down
18 changes: 9 additions & 9 deletions ome_zarr/format.py
Original file line number Diff line number Diff line change
Expand Up @@ -290,14 +290,14 @@ def validate_coordinate_transformations(
ct_count = len(coordinate_transformations)
if ct_count != nlevels:
raise ValueError(
"coordinate_transformations count: %s must match datasets %s"
% (ct_count, nlevels)
f"coordinate_transformations count: {ct_count} must match "
f"datasets {nlevels}"
)
for transformations in coordinate_transformations:
assert isinstance(transformations, list)
types = [t.get("type", None) for t in transformations]
if any([t is None for t in types]):
raise ValueError("Missing type in: %s" % transformations)
raise ValueError(f"Missing type in: {transformations}")
# validate scales...
if sum(t == "scale" for t in types) != 1:
raise ValueError(
Expand All @@ -308,12 +308,12 @@ def validate_coordinate_transformations(
raise ValueError("First coordinate_transformations must be 'scale'")
first = transformations[0]
if "scale" not in transformations[0]:
raise ValueError("Missing scale argument in: %s" % first)
raise ValueError(f"Missing scale argument in: {first}")
scale = first["scale"]
if len(scale) != ndim:
raise ValueError(
"'scale' list %s must match number of image dimensions: %s"
% (scale, ndim)
f"'scale' list {scale} must match "
f"number of image dimensions: {ndim}"
)
for value in scale:
if not isinstance(value, (float, int)):
Expand All @@ -329,12 +329,12 @@ def validate_coordinate_transformations(
elif sum(translation_types) == 1:
transformation = transformations[types.index("translation")]
if "translation" not in transformation:
raise ValueError("Missing scale argument in: %s" % first)
raise ValueError(f"Missing scale argument in: {first}")
translation = transformation["translation"]
if len(translation) != ndim:
raise ValueError(
"'translation' list %s must match image dimensions count: %s"
% (translation, ndim)
f"'translation' list {translation} must match "
f"image dimensions count: {ndim}"
)
for value in translation:
if not isinstance(value, (float, int)):
Expand Down
4 changes: 2 additions & 2 deletions ome_zarr/reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ def __init__(self, node: Node) -> None:
for resolution in self.datasets:
data: da.core.Array = self.array(resolution, version)
chunk_sizes = [
str(c[0]) + (" (+ %s)" % c[-1] if c[-1] != c[0] else "")
str(c[0]) + (f" (+ {c[-1]})" if c[-1] != c[0] else "")
for c in data.chunks
]
LOGGER.info("resolution: %s", resolution)
Expand Down Expand Up @@ -353,7 +353,7 @@ def __init__(self, node: Node) -> None:

colormaps = []
contrast_limits: Optional[list[Optional[Any]]] = [None for x in channels]
names: list[str] = [("channel_%d" % idx) for idx, ch in enumerate(channels)]
names: list[str] = [(f"channel_{idx}") for idx, ch in enumerate(channels)]
visibles: list[bool] = [True for x in channels]

for idx, ch in enumerate(channels):
Expand Down
2 changes: 1 addition & 1 deletion ome_zarr/scale.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ def __create_group(
if i == 0:
path = "base"
else:
path = "%s" % i
path = str(i)
grp.create_dataset(path, data=pyramid[i])
series.append({"path": path})
return grp
Expand Down
2 changes: 1 addition & 1 deletion ome_zarr/writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -919,7 +919,7 @@ def _create_mip(
if image.shape[-1] == 1 or image.shape[-2] == 1:
raise ValueError(
"Can't downsample if size of x or y dimension is 1. "
"Shape: %s" % (image.shape,)
f"Shape: {image.shape}"
)
mip = scaler.func(image)
else:
Expand Down
Loading