Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Ruff upgrade #7741

Merged
merged 2 commits into from
Mar 5, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .github/workflows/python-checks.yml
Original file line number Diff line number Diff line change
Expand Up @@ -62,7 +62,7 @@ jobs:

- name: install ruff
if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }}
run: pip install ruff==0.6.0
run: pip install ruff==0.9.9
shell: bash

- name: ruff check
Expand Down
2 changes: 1 addition & 1 deletion invokeai/app/invocations/baseinvocation.py
Original file line number Diff line number Diff line change
Expand Up @@ -417,7 +417,7 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None

ui_type = field.json_schema_extra.get("ui_type", None)
if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"):
logger.warn(f"\"UIType.{ui_type.split('_')[-1]}\" is deprecated, ignoring")
logger.warn(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring')
field.json_schema_extra.pop("ui_type")
return None

Expand Down
2 changes: 1 addition & 1 deletion invokeai/app/invocations/compel.py
Original file line number Diff line number Diff line change
Expand Up @@ -513,7 +513,7 @@ def log_tokenization_for_text(
usedTokens += 1

if usedTokens > 0:
print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):')
print(f"\n>> [TOKENLOG] Tokens {display_label or ''} ({usedTokens}):")
print(f"{tokenized}\x1b[0m")

if discarded != "":
Expand Down
6 changes: 3 additions & 3 deletions invokeai/app/invocations/segment_anything.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,9 +185,9 @@ def _filter_masks(
# Find the largest mask.
return [max(masks, key=lambda x: float(x.sum()))]
elif self.mask_filter == "highest_box_score":
assert (
bounding_boxes is not None
), "Bounding boxes must be provided to use the 'highest_box_score' mask filter."
assert bounding_boxes is not None, (
"Bounding boxes must be provided to use the 'highest_box_score' mask filter."
)
assert len(masks) == len(bounding_boxes)
# Find the index of the bounding box with the highest score.
# Note that we fallback to -1.0 if the score is None. This is mainly to satisfy the type checker. In most
Expand Down
6 changes: 3 additions & 3 deletions invokeai/app/services/config/config_default.py
Original file line number Diff line number Diff line change
Expand Up @@ -476,9 +476,9 @@ def load_and_migrate_config(config_path: Path) -> InvokeAIAppConfig:
try:
# Meta is not included in the model fields, so we need to validate it separately
config = InvokeAIAppConfig.model_validate(loaded_config_dict)
assert (
config.schema_version == CONFIG_SCHEMA_VERSION
), f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}"
assert config.schema_version == CONFIG_SCHEMA_VERSION, (
f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}"
)
return config
except Exception as e:
raise RuntimeError(f"Failed to load config file {config_path}: {e}") from e
Expand Down
10 changes: 5 additions & 5 deletions invokeai/backend/image_util/pngwriter.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,10 +91,10 @@ def normalize_prompt(self):

switches = []
switches.append(f'"{opt.prompt}"')
switches.append(f"-s{opt.steps or t2i.steps}")
switches.append(f"-W{opt.width or t2i.width}")
switches.append(f"-H{opt.height or t2i.height}")
switches.append(f"-C{opt.cfg_scale or t2i.cfg_scale}")
switches.append(f"-s{opt.steps or t2i.steps}")
switches.append(f"-W{opt.width or t2i.width}")
switches.append(f"-H{opt.height or t2i.height}")
switches.append(f"-C{opt.cfg_scale or t2i.cfg_scale}")
switches.append(f"-A{opt.sampler_name or t2i.sampler_name}")
# to do: put model name into the t2i object
# switches.append(f'--model{t2i.model_name}')
Expand All @@ -109,7 +109,7 @@ def normalize_prompt(self):
if opt.gfpgan_strength:
switches.append(f"-G{opt.gfpgan_strength}")
if opt.upscale:
switches.append(f'-U {" ".join([str(u) for u in opt.upscale])}')
switches.append(f"-U {' '.join([str(u) for u in opt.upscale])}")
if opt.variation_amount > 0:
switches.append(f"-v{opt.variation_amount}")
if opt.with_variations:
Expand Down
2 changes: 1 addition & 1 deletion invokeai/backend/model_manager/load/memory_snapshot.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def get_pretty_snapshot_diff(snapshot_1: Optional[MemorySnapshot], snapshot_2: O

def get_msg_line(prefix: str, val1: int, val2: int) -> str:
diff = val2 - val1
return f"{prefix: <30} ({(diff/GB):+5.3f}): {(val1/GB):5.3f}GB -> {(val2/GB):5.3f}GB\n"
return f"{prefix: <30} ({(diff / GB):+5.3f}): {(val1 / GB):5.3f}GB -> {(val2 / GB):5.3f}GB\n"

msg = ""

Expand Down
30 changes: 16 additions & 14 deletions invokeai/backend/model_manager/load/model_cache/model_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,7 @@ def put(self, key: str, model: AnyModel) -> None:
self._cached_models[key] = cache_record
self._cache_stack.append(key)
self._logger.debug(
f"Added model {key} (Type: {model.__class__.__name__}, Wrap mode: {wrapped_model.__class__.__name__}, Model size: {size/MB:.2f}MB)"
f"Added model {key} (Type: {model.__class__.__name__}, Wrap mode: {wrapped_model.__class__.__name__}, Model size: {size / MB:.2f}MB)"
)

@synchronized
Expand Down Expand Up @@ -303,7 +303,7 @@ def _load_locked_model(self, cache_entry: CacheRecord, working_mem_bytes: Option
# 2. If the model can't fit fully into VRAM, then unload all other models and load as much of the model as
# possible.
vram_bytes_freed = self._offload_unlocked_models(model_vram_needed, working_mem_bytes)
self._logger.debug(f"Unloaded models (if necessary): vram_bytes_freed={(vram_bytes_freed/MB):.2f}MB")
self._logger.debug(f"Unloaded models (if necessary): vram_bytes_freed={(vram_bytes_freed / MB):.2f}MB")

# Check the updated vram_available after offloading.
vram_available = self._get_vram_available(working_mem_bytes)
Expand All @@ -317,7 +317,7 @@ def _load_locked_model(self, cache_entry: CacheRecord, working_mem_bytes: Option
vram_bytes_freed_from_own_model = self._move_model_to_ram(cache_entry, -vram_available)
vram_available = self._get_vram_available(working_mem_bytes)
self._logger.debug(
f"Unloaded {vram_bytes_freed_from_own_model/MB:.2f}MB from the model being locked ({cache_entry.key})."
f"Unloaded {vram_bytes_freed_from_own_model / MB:.2f}MB from the model being locked ({cache_entry.key})."
)

# Move as much of the model as possible into VRAM.
Expand All @@ -333,10 +333,12 @@ def _load_locked_model(self, cache_entry: CacheRecord, working_mem_bytes: Option
self._logger.info(
f"Loaded model '{cache_entry.key}' ({cache_entry.cached_model.model.__class__.__name__}) onto "
f"{self._execution_device.type} device in {(time.time() - start_time):.2f}s. "
f"Total model size: {model_total_bytes/MB:.2f}MB, "
f"VRAM: {model_cur_vram_bytes/MB:.2f}MB ({loaded_percent:.1%})"
f"Total model size: {model_total_bytes / MB:.2f}MB, "
f"VRAM: {model_cur_vram_bytes / MB:.2f}MB ({loaded_percent:.1%})"
)
self._logger.debug(
f"Loaded model onto execution device: model_bytes_loaded={(model_bytes_loaded / MB):.2f}MB, "
)
self._logger.debug(f"Loaded model onto execution device: model_bytes_loaded={(model_bytes_loaded/MB):.2f}MB, ")
self._logger.debug(
f"After loading: {self._get_vram_state_str(model_cur_vram_bytes, model_total_bytes, vram_available)}"
)
Expand Down Expand Up @@ -495,10 +497,10 @@ def _get_vram_state_str(self, model_cur_vram_bytes: int, model_total_bytes: int,
"""Helper function for preparing a VRAM state log string."""
model_cur_vram_bytes_percent = model_cur_vram_bytes / model_total_bytes if model_total_bytes > 0 else 0
return (
f"model_total={model_total_bytes/MB:.0f} MB, "
+ f"model_vram={model_cur_vram_bytes/MB:.0f} MB ({model_cur_vram_bytes_percent:.1%} %), "
f"model_total={model_total_bytes / MB:.0f} MB, "
+ f"model_vram={model_cur_vram_bytes / MB:.0f} MB ({model_cur_vram_bytes_percent:.1%} %), "
# + f"vram_total={int(self._max_vram_cache_size * GB)/MB:.0f} MB, "
+ f"vram_available={(vram_available/MB):.0f} MB, "
+ f"vram_available={(vram_available / MB):.0f} MB, "
)

def _offload_unlocked_models(self, vram_bytes_required: int, working_mem_bytes: Optional[int] = None) -> int:
Expand All @@ -509,7 +511,7 @@ def _offload_unlocked_models(self, vram_bytes_required: int, working_mem_bytes:
int: The number of bytes freed based on believed model sizes. The actual change in VRAM may be different.
"""
self._logger.debug(
f"Offloading unlocked models with goal of making room for {vram_bytes_required/MB:.2f}MB of VRAM."
f"Offloading unlocked models with goal of making room for {vram_bytes_required / MB:.2f}MB of VRAM."
)
vram_bytes_freed = 0
# TODO(ryand): Give more thought to the offloading policy used here.
Expand All @@ -527,7 +529,7 @@ def _offload_unlocked_models(self, vram_bytes_required: int, working_mem_bytes:
cache_entry_bytes_freed = self._move_model_to_ram(cache_entry, vram_bytes_to_free)
if cache_entry_bytes_freed > 0:
self._logger.debug(
f"Unloaded {cache_entry.key} from VRAM to free {(cache_entry_bytes_freed/MB):.0f} MB."
f"Unloaded {cache_entry.key} from VRAM to free {(cache_entry_bytes_freed / MB):.0f} MB."
)
vram_bytes_freed += cache_entry_bytes_freed

Expand Down Expand Up @@ -609,7 +611,7 @@ def make_room(self, bytes_needed: int) -> None:
external references to the model, there's nothing that the cache can do about it, and those models will not be
garbage-collected.
"""
self._logger.debug(f"Making room for {bytes_needed/MB:.2f}MB of RAM.")
self._logger.debug(f"Making room for {bytes_needed / MB:.2f}MB of RAM.")
self._log_cache_state(title="Before dropping models:")

ram_bytes_available = self._get_ram_available()
Expand All @@ -625,7 +627,7 @@ def make_room(self, bytes_needed: int) -> None:
if not cache_entry.is_locked:
ram_bytes_freed += cache_entry.cached_model.total_bytes()
self._logger.debug(
f"Dropping {model_key} from RAM cache to free {(cache_entry.cached_model.total_bytes()/MB):.2f}MB."
f"Dropping {model_key} from RAM cache to free {(cache_entry.cached_model.total_bytes() / MB):.2f}MB."
)
self._delete_cache_entry(cache_entry)
del cache_entry
Expand All @@ -650,7 +652,7 @@ def make_room(self, bytes_needed: int) -> None:
gc.collect()

TorchDevice.empty_cache()
self._logger.debug(f"Dropped {models_cleared} models to free {ram_bytes_freed/MB:.2f}MB of RAM.")
self._logger.debug(f"Dropped {models_cleared} models to free {ram_bytes_freed / MB:.2f}MB of RAM.")
self._log_cache_state(title="After dropping models:")

def _delete_cache_entry(self, cache_entry: CacheRecord) -> None:
Expand Down
18 changes: 9 additions & 9 deletions invokeai/backend/model_manager/merge.py
Original file line number Diff line number Diff line change
Expand Up @@ -115,19 +115,19 @@ def merge_diffusion_models_and_save(
base_models: Set[BaseModelType] = set()
variant = None if self._installer.app_config.precision == "float32" else "fp16"

assert (
len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference
), "When merging three models, only the 'add_difference' merge method is supported"
assert len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference, (
"When merging three models, only the 'add_difference' merge method is supported"
)

for key in model_keys:
info = store.get_model(key)
model_names.append(info.name)
assert isinstance(
info, MainDiffusersConfig
), f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging"
assert info.variant == ModelVariantType(
"normal"
), f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged"
assert isinstance(info, MainDiffusersConfig), (
f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging"
)
assert info.variant == ModelVariantType("normal"), (
f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged"
)

# tally base models used
base_models.add(info.base)
Expand Down
14 changes: 8 additions & 6 deletions invokeai/backend/model_manager/util/libc_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,19 +37,21 @@ class Struct_mallinfo2(ctypes.Structure):

def __str__(self) -> str:
s = ""
s += f"{'arena': <10}= {(self.arena/2**30):15.5f} # Non-mmapped space allocated (GB) (uordblks + fordblks)\n"
s += (
f"{'arena': <10}= {(self.arena / 2**30):15.5f} # Non-mmapped space allocated (GB) (uordblks + fordblks)\n"
)
s += f"{'ordblks': <10}= {(self.ordblks): >15} # Number of free chunks\n"
s += f"{'smblks': <10}= {(self.smblks): >15} # Number of free fastbin blocks \n"
s += f"{'hblks': <10}= {(self.hblks): >15} # Number of mmapped regions \n"
s += f"{'hblkhd': <10}= {(self.hblkhd/2**30):15.5f} # Space allocated in mmapped regions (GB)\n"
s += f"{'hblkhd': <10}= {(self.hblkhd / 2**30):15.5f} # Space allocated in mmapped regions (GB)\n"
s += f"{'usmblks': <10}= {(self.usmblks): >15} # Unused\n"
s += f"{'fsmblks': <10}= {(self.fsmblks/2**30):15.5f} # Space in freed fastbin blocks (GB)\n"
s += f"{'fsmblks': <10}= {(self.fsmblks / 2**30):15.5f} # Space in freed fastbin blocks (GB)\n"
s += (
f"{'uordblks': <10}= {(self.uordblks/2**30):15.5f} # Space used by in-use allocations (non-mmapped)"
f"{'uordblks': <10}= {(self.uordblks / 2**30):15.5f} # Space used by in-use allocations (non-mmapped)"
" (GB)\n"
)
s += f"{'fordblks': <10}= {(self.fordblks/2**30):15.5f} # Space in free blocks (non-mmapped) (GB)\n"
s += f"{'keepcost': <10}= {(self.keepcost/2**30):15.5f} # Top-most, releasable space (GB)\n"
s += f"{'fordblks': <10}= {(self.fordblks / 2**30):15.5f} # Space in free blocks (non-mmapped) (GB)\n"
s += f"{'keepcost': <10}= {(self.keepcost / 2**30):15.5f} # Top-most, releasable space (GB)\n"
return s


Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -73,36 +73,36 @@ def _make_sdxl_unet_conversion_map() -> List[Tuple[str, str]]:
for j in range(2):
# loop over resnets/attentions for downblocks
hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}."
sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0."
sd_down_res_prefix = f"input_blocks.{3 * i + j + 1}.0."
unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix))

if i < 3:
# no attention layers in down_blocks.3
hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}."
sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1."
sd_down_atn_prefix = f"input_blocks.{3 * i + j + 1}.1."
unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix))

for j in range(3):
# loop over resnets/attentions for upblocks
hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}."
sd_up_res_prefix = f"output_blocks.{3*i + j}.0."
sd_up_res_prefix = f"output_blocks.{3 * i + j}.0."
unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix))

# if i > 0: commentout for sdxl
# no attention layers in up_blocks.0
hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}."
sd_up_atn_prefix = f"output_blocks.{3*i + j}.1."
sd_up_atn_prefix = f"output_blocks.{3 * i + j}.1."
unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix))

if i < 3:
# no downsample in down_blocks.3
hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv."
sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op."
sd_downsample_prefix = f"input_blocks.{3 * (i + 1)}.0.op."
unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix))

# no upsample in up_blocks.3
hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0."
sd_upsample_prefix = f"output_blocks.{3*i + 2}.{2}." # change for sdxl
sd_upsample_prefix = f"output_blocks.{3 * i + 2}.{2}." # change for sdxl
unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix))

hf_mid_atn_prefix = "mid_block.attentions.0."
Expand All @@ -111,7 +111,7 @@ def _make_sdxl_unet_conversion_map() -> List[Tuple[str, str]]:

for j in range(2):
hf_mid_res_prefix = f"mid_block.resnets.{j}."
sd_mid_res_prefix = f"middle_block.{2*j}."
sd_mid_res_prefix = f"middle_block.{2 * j}."
unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix))

unet_conversion_map_resnet = [
Expand All @@ -133,13 +133,13 @@ def _make_sdxl_unet_conversion_map() -> List[Tuple[str, str]]:
unet_conversion_map.append((sd, hf))

for j in range(2):
hf_time_embed_prefix = f"time_embedding.linear_{j+1}."
sd_time_embed_prefix = f"time_embed.{j*2}."
hf_time_embed_prefix = f"time_embedding.linear_{j + 1}."
sd_time_embed_prefix = f"time_embed.{j * 2}."
unet_conversion_map.append((sd_time_embed_prefix, hf_time_embed_prefix))

for j in range(2):
hf_label_embed_prefix = f"add_embedding.linear_{j+1}."
sd_label_embed_prefix = f"label_emb.0.{j*2}."
hf_label_embed_prefix = f"add_embedding.linear_{j + 1}."
sd_label_embed_prefix = f"label_emb.0.{j * 2}."
unet_conversion_map.append((sd_label_embed_prefix, hf_label_embed_prefix))

unet_conversion_map.append(("input_blocks.0.0.", "conv_in."))
Expand Down
4 changes: 2 additions & 2 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -115,8 +115,8 @@ dependencies = [
]
"dev" = ["jurigged", "pudb", "snakeviz", "gprof2dot"]
"test" = [
"ruff>=0.3.3",
"ruff-lsp>=0.0.53",
"ruff~=0.9.9",
"ruff-lsp~=0.0.62",
"mypy",
"pre-commit",
"pytest>6.0.0",
Expand Down
12 changes: 6 additions & 6 deletions tests/app/services/download/test_download_queue.py
Original file line number Diff line number Diff line change
Expand Up @@ -212,12 +212,12 @@ def event_handler(job: DownloadJob | MultiFileDownloadJob, excp: Optional[Except
assert job.bytes > 0, "expected download bytes to be positive"
assert job.bytes == job.total_bytes, "expected download bytes to equal total bytes"
assert job.download_path == tmp_path / "sdxl-turbo"
assert Path(
tmp_path, "sdxl-turbo/model_index.json"
).exists(), f"expected {tmp_path}/sdxl-turbo/model_inded.json to exist"
assert Path(
tmp_path, "sdxl-turbo/text_encoder/config.json"
).exists(), f"expected {tmp_path}/sdxl-turbo/text_encoder/config.json to exist"
assert Path(tmp_path, "sdxl-turbo/model_index.json").exists(), (
f"expected {tmp_path}/sdxl-turbo/model_inded.json to exist"
)
assert Path(tmp_path, "sdxl-turbo/text_encoder/config.json").exists(), (
f"expected {tmp_path}/sdxl-turbo/text_encoder/config.json to exist"
)

assert events == {DownloadJobStatus.RUNNING, DownloadJobStatus.COMPLETED}
queue.stop()
Expand Down
Loading