diff --git a/.github/workflows/python-checks.yml b/.github/workflows/python-checks.yml index 40d028826b3..7ccac8e16fb 100644 --- a/.github/workflows/python-checks.yml +++ b/.github/workflows/python-checks.yml @@ -62,7 +62,7 @@ jobs: - name: install ruff if: ${{ steps.changed-files.outputs.python_any_changed == 'true' || inputs.always_run == true }} - run: pip install ruff==0.6.0 + run: pip install ruff==0.9.9 shell: bash - name: ruff check diff --git a/invokeai/app/invocations/baseinvocation.py b/invokeai/app/invocations/baseinvocation.py index 046b0875280..31ac02ce8ec 100644 --- a/invokeai/app/invocations/baseinvocation.py +++ b/invokeai/app/invocations/baseinvocation.py @@ -417,7 +417,7 @@ def validate_fields(model_fields: dict[str, FieldInfo], model_type: str) -> None ui_type = field.json_schema_extra.get("ui_type", None) if isinstance(ui_type, str) and ui_type.startswith("DEPRECATED_"): - logger.warn(f"\"UIType.{ui_type.split('_')[-1]}\" is deprecated, ignoring") + logger.warn(f'"UIType.{ui_type.split("_")[-1]}" is deprecated, ignoring') field.json_schema_extra.pop("ui_type") return None diff --git a/invokeai/app/invocations/compel.py b/invokeai/app/invocations/compel.py index d73709d8e86..1310b5a38b6 100644 --- a/invokeai/app/invocations/compel.py +++ b/invokeai/app/invocations/compel.py @@ -513,7 +513,7 @@ def log_tokenization_for_text( usedTokens += 1 if usedTokens > 0: - print(f'\n>> [TOKENLOG] Tokens {display_label or ""} ({usedTokens}):') + print(f"\n>> [TOKENLOG] Tokens {display_label or ''} ({usedTokens}):") print(f"{tokenized}\x1b[0m") if discarded != "": diff --git a/invokeai/app/invocations/segment_anything.py b/invokeai/app/invocations/segment_anything.py index 91517cb8e99..9b0000a247f 100644 --- a/invokeai/app/invocations/segment_anything.py +++ b/invokeai/app/invocations/segment_anything.py @@ -185,9 +185,9 @@ def _filter_masks( # Find the largest mask. return [max(masks, key=lambda x: float(x.sum()))] elif self.mask_filter == "highest_box_score": - assert ( - bounding_boxes is not None - ), "Bounding boxes must be provided to use the 'highest_box_score' mask filter." + assert bounding_boxes is not None, ( + "Bounding boxes must be provided to use the 'highest_box_score' mask filter." + ) assert len(masks) == len(bounding_boxes) # Find the index of the bounding box with the highest score. # Note that we fallback to -1.0 if the score is None. This is mainly to satisfy the type checker. In most diff --git a/invokeai/app/services/config/config_default.py b/invokeai/app/services/config/config_default.py index a758e7e1a70..712dd218e5c 100644 --- a/invokeai/app/services/config/config_default.py +++ b/invokeai/app/services/config/config_default.py @@ -476,9 +476,9 @@ def load_and_migrate_config(config_path: Path) -> InvokeAIAppConfig: try: # Meta is not included in the model fields, so we need to validate it separately config = InvokeAIAppConfig.model_validate(loaded_config_dict) - assert ( - config.schema_version == CONFIG_SCHEMA_VERSION - ), f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}" + assert config.schema_version == CONFIG_SCHEMA_VERSION, ( + f"Invalid schema version, expected {CONFIG_SCHEMA_VERSION}: {config.schema_version}" + ) return config except Exception as e: raise RuntimeError(f"Failed to load config file {config_path}: {e}") from e diff --git a/invokeai/backend/image_util/pngwriter.py b/invokeai/backend/image_util/pngwriter.py index f537b4681c9..1f4b42fe217 100644 --- a/invokeai/backend/image_util/pngwriter.py +++ b/invokeai/backend/image_util/pngwriter.py @@ -91,10 +91,10 @@ def normalize_prompt(self): switches = [] switches.append(f'"{opt.prompt}"') - switches.append(f"-s{opt.steps or t2i.steps}") - switches.append(f"-W{opt.width or t2i.width}") - switches.append(f"-H{opt.height or t2i.height}") - switches.append(f"-C{opt.cfg_scale or t2i.cfg_scale}") + switches.append(f"-s{opt.steps or t2i.steps}") + switches.append(f"-W{opt.width or t2i.width}") + switches.append(f"-H{opt.height or t2i.height}") + switches.append(f"-C{opt.cfg_scale or t2i.cfg_scale}") switches.append(f"-A{opt.sampler_name or t2i.sampler_name}") # to do: put model name into the t2i object # switches.append(f'--model{t2i.model_name}') @@ -109,7 +109,7 @@ def normalize_prompt(self): if opt.gfpgan_strength: switches.append(f"-G{opt.gfpgan_strength}") if opt.upscale: - switches.append(f'-U {" ".join([str(u) for u in opt.upscale])}') + switches.append(f"-U {' '.join([str(u) for u in opt.upscale])}") if opt.variation_amount > 0: switches.append(f"-v{opt.variation_amount}") if opt.with_variations: diff --git a/invokeai/backend/model_manager/load/memory_snapshot.py b/invokeai/backend/model_manager/load/memory_snapshot.py index 66dd0709632..7b693bf8318 100644 --- a/invokeai/backend/model_manager/load/memory_snapshot.py +++ b/invokeai/backend/model_manager/load/memory_snapshot.py @@ -70,7 +70,7 @@ def get_pretty_snapshot_diff(snapshot_1: Optional[MemorySnapshot], snapshot_2: O def get_msg_line(prefix: str, val1: int, val2: int) -> str: diff = val2 - val1 - return f"{prefix: <30} ({(diff/GB):+5.3f}): {(val1/GB):5.3f}GB -> {(val2/GB):5.3f}GB\n" + return f"{prefix: <30} ({(diff / GB):+5.3f}): {(val1 / GB):5.3f}GB -> {(val2 / GB):5.3f}GB\n" msg = "" diff --git a/invokeai/backend/model_manager/load/model_cache/model_cache.py b/invokeai/backend/model_manager/load/model_cache/model_cache.py index ea375996b62..466f3c399cd 100644 --- a/invokeai/backend/model_manager/load/model_cache/model_cache.py +++ b/invokeai/backend/model_manager/load/model_cache/model_cache.py @@ -192,7 +192,7 @@ def put(self, key: str, model: AnyModel) -> None: self._cached_models[key] = cache_record self._cache_stack.append(key) self._logger.debug( - f"Added model {key} (Type: {model.__class__.__name__}, Wrap mode: {wrapped_model.__class__.__name__}, Model size: {size/MB:.2f}MB)" + f"Added model {key} (Type: {model.__class__.__name__}, Wrap mode: {wrapped_model.__class__.__name__}, Model size: {size / MB:.2f}MB)" ) @synchronized @@ -303,7 +303,7 @@ def _load_locked_model(self, cache_entry: CacheRecord, working_mem_bytes: Option # 2. If the model can't fit fully into VRAM, then unload all other models and load as much of the model as # possible. vram_bytes_freed = self._offload_unlocked_models(model_vram_needed, working_mem_bytes) - self._logger.debug(f"Unloaded models (if necessary): vram_bytes_freed={(vram_bytes_freed/MB):.2f}MB") + self._logger.debug(f"Unloaded models (if necessary): vram_bytes_freed={(vram_bytes_freed / MB):.2f}MB") # Check the updated vram_available after offloading. vram_available = self._get_vram_available(working_mem_bytes) @@ -317,7 +317,7 @@ def _load_locked_model(self, cache_entry: CacheRecord, working_mem_bytes: Option vram_bytes_freed_from_own_model = self._move_model_to_ram(cache_entry, -vram_available) vram_available = self._get_vram_available(working_mem_bytes) self._logger.debug( - f"Unloaded {vram_bytes_freed_from_own_model/MB:.2f}MB from the model being locked ({cache_entry.key})." + f"Unloaded {vram_bytes_freed_from_own_model / MB:.2f}MB from the model being locked ({cache_entry.key})." ) # Move as much of the model as possible into VRAM. @@ -333,10 +333,12 @@ def _load_locked_model(self, cache_entry: CacheRecord, working_mem_bytes: Option self._logger.info( f"Loaded model '{cache_entry.key}' ({cache_entry.cached_model.model.__class__.__name__}) onto " f"{self._execution_device.type} device in {(time.time() - start_time):.2f}s. " - f"Total model size: {model_total_bytes/MB:.2f}MB, " - f"VRAM: {model_cur_vram_bytes/MB:.2f}MB ({loaded_percent:.1%})" + f"Total model size: {model_total_bytes / MB:.2f}MB, " + f"VRAM: {model_cur_vram_bytes / MB:.2f}MB ({loaded_percent:.1%})" + ) + self._logger.debug( + f"Loaded model onto execution device: model_bytes_loaded={(model_bytes_loaded / MB):.2f}MB, " ) - self._logger.debug(f"Loaded model onto execution device: model_bytes_loaded={(model_bytes_loaded/MB):.2f}MB, ") self._logger.debug( f"After loading: {self._get_vram_state_str(model_cur_vram_bytes, model_total_bytes, vram_available)}" ) @@ -495,10 +497,10 @@ def _get_vram_state_str(self, model_cur_vram_bytes: int, model_total_bytes: int, """Helper function for preparing a VRAM state log string.""" model_cur_vram_bytes_percent = model_cur_vram_bytes / model_total_bytes if model_total_bytes > 0 else 0 return ( - f"model_total={model_total_bytes/MB:.0f} MB, " - + f"model_vram={model_cur_vram_bytes/MB:.0f} MB ({model_cur_vram_bytes_percent:.1%} %), " + f"model_total={model_total_bytes / MB:.0f} MB, " + + f"model_vram={model_cur_vram_bytes / MB:.0f} MB ({model_cur_vram_bytes_percent:.1%} %), " # + f"vram_total={int(self._max_vram_cache_size * GB)/MB:.0f} MB, " - + f"vram_available={(vram_available/MB):.0f} MB, " + + f"vram_available={(vram_available / MB):.0f} MB, " ) def _offload_unlocked_models(self, vram_bytes_required: int, working_mem_bytes: Optional[int] = None) -> int: @@ -509,7 +511,7 @@ def _offload_unlocked_models(self, vram_bytes_required: int, working_mem_bytes: int: The number of bytes freed based on believed model sizes. The actual change in VRAM may be different. """ self._logger.debug( - f"Offloading unlocked models with goal of making room for {vram_bytes_required/MB:.2f}MB of VRAM." + f"Offloading unlocked models with goal of making room for {vram_bytes_required / MB:.2f}MB of VRAM." ) vram_bytes_freed = 0 # TODO(ryand): Give more thought to the offloading policy used here. @@ -527,7 +529,7 @@ def _offload_unlocked_models(self, vram_bytes_required: int, working_mem_bytes: cache_entry_bytes_freed = self._move_model_to_ram(cache_entry, vram_bytes_to_free) if cache_entry_bytes_freed > 0: self._logger.debug( - f"Unloaded {cache_entry.key} from VRAM to free {(cache_entry_bytes_freed/MB):.0f} MB." + f"Unloaded {cache_entry.key} from VRAM to free {(cache_entry_bytes_freed / MB):.0f} MB." ) vram_bytes_freed += cache_entry_bytes_freed @@ -609,7 +611,7 @@ def make_room(self, bytes_needed: int) -> None: external references to the model, there's nothing that the cache can do about it, and those models will not be garbage-collected. """ - self._logger.debug(f"Making room for {bytes_needed/MB:.2f}MB of RAM.") + self._logger.debug(f"Making room for {bytes_needed / MB:.2f}MB of RAM.") self._log_cache_state(title="Before dropping models:") ram_bytes_available = self._get_ram_available() @@ -625,7 +627,7 @@ def make_room(self, bytes_needed: int) -> None: if not cache_entry.is_locked: ram_bytes_freed += cache_entry.cached_model.total_bytes() self._logger.debug( - f"Dropping {model_key} from RAM cache to free {(cache_entry.cached_model.total_bytes()/MB):.2f}MB." + f"Dropping {model_key} from RAM cache to free {(cache_entry.cached_model.total_bytes() / MB):.2f}MB." ) self._delete_cache_entry(cache_entry) del cache_entry @@ -650,7 +652,7 @@ def make_room(self, bytes_needed: int) -> None: gc.collect() TorchDevice.empty_cache() - self._logger.debug(f"Dropped {models_cleared} models to free {ram_bytes_freed/MB:.2f}MB of RAM.") + self._logger.debug(f"Dropped {models_cleared} models to free {ram_bytes_freed / MB:.2f}MB of RAM.") self._log_cache_state(title="After dropping models:") def _delete_cache_entry(self, cache_entry: CacheRecord) -> None: diff --git a/invokeai/backend/model_manager/merge.py b/invokeai/backend/model_manager/merge.py index b00bc99f3e2..03056b10f59 100644 --- a/invokeai/backend/model_manager/merge.py +++ b/invokeai/backend/model_manager/merge.py @@ -115,19 +115,19 @@ def merge_diffusion_models_and_save( base_models: Set[BaseModelType] = set() variant = None if self._installer.app_config.precision == "float32" else "fp16" - assert ( - len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference - ), "When merging three models, only the 'add_difference' merge method is supported" + assert len(model_keys) <= 2 or interp == MergeInterpolationMethod.AddDifference, ( + "When merging three models, only the 'add_difference' merge method is supported" + ) for key in model_keys: info = store.get_model(key) model_names.append(info.name) - assert isinstance( - info, MainDiffusersConfig - ), f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging" - assert info.variant == ModelVariantType( - "normal" - ), f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged" + assert isinstance(info, MainDiffusersConfig), ( + f"{info.name} ({info.key}) is not a diffusers model. It must be optimized before merging" + ) + assert info.variant == ModelVariantType("normal"), ( + f"{info.name} ({info.key}) is a {info.variant} model, which cannot currently be merged" + ) # tally base models used base_models.add(info.base) diff --git a/invokeai/backend/model_manager/util/libc_util.py b/invokeai/backend/model_manager/util/libc_util.py index ef1ac2f8a4b..8d104093085 100644 --- a/invokeai/backend/model_manager/util/libc_util.py +++ b/invokeai/backend/model_manager/util/libc_util.py @@ -37,19 +37,21 @@ class Struct_mallinfo2(ctypes.Structure): def __str__(self) -> str: s = "" - s += f"{'arena': <10}= {(self.arena/2**30):15.5f} # Non-mmapped space allocated (GB) (uordblks + fordblks)\n" + s += ( + f"{'arena': <10}= {(self.arena / 2**30):15.5f} # Non-mmapped space allocated (GB) (uordblks + fordblks)\n" + ) s += f"{'ordblks': <10}= {(self.ordblks): >15} # Number of free chunks\n" s += f"{'smblks': <10}= {(self.smblks): >15} # Number of free fastbin blocks \n" s += f"{'hblks': <10}= {(self.hblks): >15} # Number of mmapped regions \n" - s += f"{'hblkhd': <10}= {(self.hblkhd/2**30):15.5f} # Space allocated in mmapped regions (GB)\n" + s += f"{'hblkhd': <10}= {(self.hblkhd / 2**30):15.5f} # Space allocated in mmapped regions (GB)\n" s += f"{'usmblks': <10}= {(self.usmblks): >15} # Unused\n" - s += f"{'fsmblks': <10}= {(self.fsmblks/2**30):15.5f} # Space in freed fastbin blocks (GB)\n" + s += f"{'fsmblks': <10}= {(self.fsmblks / 2**30):15.5f} # Space in freed fastbin blocks (GB)\n" s += ( - f"{'uordblks': <10}= {(self.uordblks/2**30):15.5f} # Space used by in-use allocations (non-mmapped)" + f"{'uordblks': <10}= {(self.uordblks / 2**30):15.5f} # Space used by in-use allocations (non-mmapped)" " (GB)\n" ) - s += f"{'fordblks': <10}= {(self.fordblks/2**30):15.5f} # Space in free blocks (non-mmapped) (GB)\n" - s += f"{'keepcost': <10}= {(self.keepcost/2**30):15.5f} # Top-most, releasable space (GB)\n" + s += f"{'fordblks': <10}= {(self.fordblks / 2**30):15.5f} # Space in free blocks (non-mmapped) (GB)\n" + s += f"{'keepcost': <10}= {(self.keepcost / 2**30):15.5f} # Top-most, releasable space (GB)\n" return s diff --git a/invokeai/backend/patches/lora_conversions/sdxl_lora_conversion_utils.py b/invokeai/backend/patches/lora_conversions/sdxl_lora_conversion_utils.py index e3780a7e8a4..f96ad5df7cd 100644 --- a/invokeai/backend/patches/lora_conversions/sdxl_lora_conversion_utils.py +++ b/invokeai/backend/patches/lora_conversions/sdxl_lora_conversion_utils.py @@ -73,36 +73,36 @@ def _make_sdxl_unet_conversion_map() -> List[Tuple[str, str]]: for j in range(2): # loop over resnets/attentions for downblocks hf_down_res_prefix = f"down_blocks.{i}.resnets.{j}." - sd_down_res_prefix = f"input_blocks.{3*i + j + 1}.0." + sd_down_res_prefix = f"input_blocks.{3 * i + j + 1}.0." unet_conversion_map_layer.append((sd_down_res_prefix, hf_down_res_prefix)) if i < 3: # no attention layers in down_blocks.3 hf_down_atn_prefix = f"down_blocks.{i}.attentions.{j}." - sd_down_atn_prefix = f"input_blocks.{3*i + j + 1}.1." + sd_down_atn_prefix = f"input_blocks.{3 * i + j + 1}.1." unet_conversion_map_layer.append((sd_down_atn_prefix, hf_down_atn_prefix)) for j in range(3): # loop over resnets/attentions for upblocks hf_up_res_prefix = f"up_blocks.{i}.resnets.{j}." - sd_up_res_prefix = f"output_blocks.{3*i + j}.0." + sd_up_res_prefix = f"output_blocks.{3 * i + j}.0." unet_conversion_map_layer.append((sd_up_res_prefix, hf_up_res_prefix)) # if i > 0: commentout for sdxl # no attention layers in up_blocks.0 hf_up_atn_prefix = f"up_blocks.{i}.attentions.{j}." - sd_up_atn_prefix = f"output_blocks.{3*i + j}.1." + sd_up_atn_prefix = f"output_blocks.{3 * i + j}.1." unet_conversion_map_layer.append((sd_up_atn_prefix, hf_up_atn_prefix)) if i < 3: # no downsample in down_blocks.3 hf_downsample_prefix = f"down_blocks.{i}.downsamplers.0.conv." - sd_downsample_prefix = f"input_blocks.{3*(i+1)}.0.op." + sd_downsample_prefix = f"input_blocks.{3 * (i + 1)}.0.op." unet_conversion_map_layer.append((sd_downsample_prefix, hf_downsample_prefix)) # no upsample in up_blocks.3 hf_upsample_prefix = f"up_blocks.{i}.upsamplers.0." - sd_upsample_prefix = f"output_blocks.{3*i + 2}.{2}." # change for sdxl + sd_upsample_prefix = f"output_blocks.{3 * i + 2}.{2}." # change for sdxl unet_conversion_map_layer.append((sd_upsample_prefix, hf_upsample_prefix)) hf_mid_atn_prefix = "mid_block.attentions.0." @@ -111,7 +111,7 @@ def _make_sdxl_unet_conversion_map() -> List[Tuple[str, str]]: for j in range(2): hf_mid_res_prefix = f"mid_block.resnets.{j}." - sd_mid_res_prefix = f"middle_block.{2*j}." + sd_mid_res_prefix = f"middle_block.{2 * j}." unet_conversion_map_layer.append((sd_mid_res_prefix, hf_mid_res_prefix)) unet_conversion_map_resnet = [ @@ -133,13 +133,13 @@ def _make_sdxl_unet_conversion_map() -> List[Tuple[str, str]]: unet_conversion_map.append((sd, hf)) for j in range(2): - hf_time_embed_prefix = f"time_embedding.linear_{j+1}." - sd_time_embed_prefix = f"time_embed.{j*2}." + hf_time_embed_prefix = f"time_embedding.linear_{j + 1}." + sd_time_embed_prefix = f"time_embed.{j * 2}." unet_conversion_map.append((sd_time_embed_prefix, hf_time_embed_prefix)) for j in range(2): - hf_label_embed_prefix = f"add_embedding.linear_{j+1}." - sd_label_embed_prefix = f"label_emb.0.{j*2}." + hf_label_embed_prefix = f"add_embedding.linear_{j + 1}." + sd_label_embed_prefix = f"label_emb.0.{j * 2}." unet_conversion_map.append((sd_label_embed_prefix, hf_label_embed_prefix)) unet_conversion_map.append(("input_blocks.0.0.", "conv_in.")) diff --git a/pyproject.toml b/pyproject.toml index cc86a7e5ff7..96a9d1eb561 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -115,8 +115,8 @@ dependencies = [ ] "dev" = ["jurigged", "pudb", "snakeviz", "gprof2dot"] "test" = [ - "ruff>=0.3.3", - "ruff-lsp>=0.0.53", + "ruff~=0.9.9", + "ruff-lsp~=0.0.62", "mypy", "pre-commit", "pytest>6.0.0", diff --git a/tests/app/services/download/test_download_queue.py b/tests/app/services/download/test_download_queue.py index fd2e2a65ae5..1c5c8ec6af4 100644 --- a/tests/app/services/download/test_download_queue.py +++ b/tests/app/services/download/test_download_queue.py @@ -212,12 +212,12 @@ def event_handler(job: DownloadJob | MultiFileDownloadJob, excp: Optional[Except assert job.bytes > 0, "expected download bytes to be positive" assert job.bytes == job.total_bytes, "expected download bytes to equal total bytes" assert job.download_path == tmp_path / "sdxl-turbo" - assert Path( - tmp_path, "sdxl-turbo/model_index.json" - ).exists(), f"expected {tmp_path}/sdxl-turbo/model_inded.json to exist" - assert Path( - tmp_path, "sdxl-turbo/text_encoder/config.json" - ).exists(), f"expected {tmp_path}/sdxl-turbo/text_encoder/config.json to exist" + assert Path(tmp_path, "sdxl-turbo/model_index.json").exists(), ( + f"expected {tmp_path}/sdxl-turbo/model_inded.json to exist" + ) + assert Path(tmp_path, "sdxl-turbo/text_encoder/config.json").exists(), ( + f"expected {tmp_path}/sdxl-turbo/text_encoder/config.json to exist" + ) assert events == {DownloadJobStatus.RUNNING, DownloadJobStatus.COMPLETED} queue.stop()