Skip to content

Commit

Permalink
fix lora on model change
Browse files Browse the repository at this point in the history
Signed-off-by: Vladimir Mandic <[email protected]>
  • Loading branch information
vladmandic committed Dec 31, 2024
1 parent 3a959a4 commit 73de099
Show file tree
Hide file tree
Showing 5 changed files with 13 additions and 5 deletions.
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ NYE refresh release with quite a few optimizatios and bug fixes...
- **Fixes**:
- flux pipeline switches: txt/img/inpaint
- flux custom unet loader for bnb
- flux do not requantize already quantized model
- interrogate caption with T5
- on-the-fly quantization using TorchAO
- remove concurrent preview requests
Expand Down
2 changes: 1 addition & 1 deletion installer.py
Original file line number Diff line number Diff line change
Expand Up @@ -255,7 +255,7 @@ def uninstall(package, quiet = False):


@lru_cache()
def pip(arg: str, ignore: bool = False, quiet: bool = False, uv = True):
def pip(arg: str, ignore: bool = False, quiet: bool = True, uv = True):
originalArg = arg
arg = arg.replace('>=', '==')
package = arg.replace("install", "").replace("--upgrade", "").replace("--no-deps", "").replace("--force", "").replace(" ", " ").strip()
Expand Down
8 changes: 5 additions & 3 deletions modules/model_flux.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,9 @@ def quant_flux_bnb(checkpoint_info, transformer, text_encoder_2):
"""


def load_quants(kwargs, repo_id, cache_dir):
def load_quants(kwargs, repo_id, cache_dir, allow_quant):
if not allow_quant:
return kwargs
quant_args = {}
quant_args = model_quant.create_bnb_config(quant_args)
if quant_args:
Expand Down Expand Up @@ -359,10 +361,10 @@ def load_flux(checkpoint_info, diffusers_load_config): # triggered by opts.sd_ch
except Exception:
pass

allow_quant = 'gguf' not in (sd_unet.loaded_unet or '')
allow_quant = 'gguf' not in (sd_unet.loaded_unet or '') and (quant is None or quant == 'none')
fn = checkpoint_info.path
if (fn is None) or (not os.path.exists(fn) or os.path.isdir(fn)):
kwargs = load_quants(kwargs, repo_id, cache_dir=shared.opts.diffusers_dir)
kwargs = load_quants(kwargs, repo_id, cache_dir=shared.opts.diffusers_dir, allow_quant=allow_quant)
kwargs = model_quant.create_bnb_config(kwargs, allow_quant)
kwargs = model_quant.create_ao_config(kwargs, allow_quant)
if fn.endswith('.safetensors') and os.path.isfile(fn):
Expand Down
5 changes: 5 additions & 0 deletions modules/sd_models.py
Original file line number Diff line number Diff line change
Expand Up @@ -1622,6 +1622,11 @@ def unload_model_weights(op='model'):
model_data.sd_model = None
devices.torch_gc(force=True)
shared.log.debug(f'Unload weights {op}: {memory_stats()}')
if not shared.opts.lora_legacy:
from modules.lora import networks
networks.loaded_networks.clear()
networks.previously_loaded_networks.clear()
networks.lora_cache.clear()
elif op == 'refiner':
if model_data.sd_refiner:
if not shared.native:
Expand Down
2 changes: 1 addition & 1 deletion modules/sd_samplers.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def create_sampler(name, model):
model.prior_pipe.scheduler = copy.deepcopy(model.default_scheduler)
model.prior_pipe.scheduler.config.clip_sample = False
config = {k: v for k, v in model.scheduler.config.items() if not k.startswith('_')}
shared.log.debug(f'Sampler: default class={current}: {config}')
shared.log.debug(f'Sampler: "default" class={current}: {config}')
if "flow" in model.scheduler.__class__.__name__.lower():
shared.state.prediction_type = "flow_prediction"
elif hasattr(model.scheduler, "config") and hasattr(model.scheduler.config, "prediction_type"):
Expand Down

0 comments on commit 73de099

Please sign in to comment.