Skip to content

Commit

Permalink
global crlf to lf
Browse files Browse the repository at this point in the history
  • Loading branch information
vladmandic committed Jan 10, 2024
1 parent 8c65942 commit 204853a
Show file tree
Hide file tree
Showing 111 changed files with 24,869 additions and 24,869 deletions.
1,326 changes: 663 additions & 663 deletions LICENSE.txt

Large diffs are not rendered by default.

420 changes: 210 additions & 210 deletions README.md

Large diffs are not rendered by default.

170 changes: 85 additions & 85 deletions extensions-builtin/Lora/extra_networks_lora.py
Original file line number Diff line number Diff line change
@@ -1,85 +1,85 @@
import time
import networks
import lora_patches
from modules import extra_networks, shared


class ExtraNetworkLora(extra_networks.ExtraNetwork):

def __init__(self):
super().__init__('lora')
self.active = False
self.errors = {}
networks.originals = lora_patches.LoraPatches()

"""mapping of network names to the number of errors the network had during operation"""

def activate(self, p, params_list):
t0 = time.time()
additional = shared.opts.sd_lora
self.errors.clear()
if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional):
p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
if len(params_list) > 0:
self.active = True
networks.originals.apply() # apply patches
if networks.debug:
shared.log.debug("LoRA activate")
names = []
te_multipliers = []
unet_multipliers = []
dyn_dims = []
for params in params_list:
assert params.items
names.append(params.positional[0])
te_multiplier = float(params.positional[1]) if len(params.positional) > 1 else 1.0
te_multiplier = float(params.named.get("te", te_multiplier))
unet_multiplier = [float(params.positional[2]) if len(params.positional) > 2 else te_multiplier] * 3
unet_multiplier = [float(params.named.get("unet", unet_multiplier[0]))] * 3
unet_multiplier[0] = float(params.named.get("in", unet_multiplier[0]))
unet_multiplier[1] = float(params.named.get("mid", unet_multiplier[1]))
unet_multiplier[2] = float(params.named.get("out", unet_multiplier[2]))
dyn_dim = int(params.positional[3]) if len(params.positional) > 3 else None
dyn_dim = int(params.named["dyn"]) if "dyn" in params.named else dyn_dim
te_multipliers.append(te_multiplier)
unet_multipliers.append(unet_multiplier)
dyn_dims.append(dyn_dim)
t1 = time.time()
networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims)
t2 = time.time()
if shared.opts.lora_add_hashes_to_infotext:
network_hashes = []
for item in networks.loaded_networks:
shorthash = item.network_on_disk.shorthash
if not shorthash:
continue
alias = item.mentioned_name
if not alias:
continue
alias = alias.replace(":", "").replace(",", "")
network_hashes.append(f"{alias}: {shorthash}")
if network_hashes:
p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes)
if len(names) > 0:
shared.log.info(f'LoRA apply: {names} patch={t1-t0:.2f} load={t2-t1:.2f}')
elif self.active:
self.active = False

def deactivate(self, p):
if shared.backend == shared.Backend.DIFFUSERS and hasattr(shared.sd_model, "unload_lora_weights") and hasattr(shared.sd_model, "text_encoder"):
if 'CLIP' in shared.sd_model.text_encoder.__class__.__name__ and not (shared.opts.cuda_compile and shared.opts.cuda_compile_backend == "openvino_fx"):
if shared.opts.lora_fuse_diffusers:
shared.sd_model.unfuse_lora()
shared.sd_model.unload_lora_weights()
if not self.active and getattr(networks, "originals", None ) is not None:
networks.originals.undo() # remove patches
if networks.debug:
shared.log.debug("LoRA deactivate")
if self.active and networks.debug:
shared.log.debug(f"LoRA end: load={networks.timer['load']:.2f} apply={networks.timer['apply']:.2f} restore={networks.timer['restore']:.2f}")
if self.errors:
p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items()))
for k, v in self.errors.items():
shared.log.error(f'LoRA errors: file="{k}" errors={v}')
self.errors.clear()
import time
import networks
import lora_patches
from modules import extra_networks, shared


class ExtraNetworkLora(extra_networks.ExtraNetwork):

def __init__(self):
super().__init__('lora')
self.active = False
self.errors = {}
networks.originals = lora_patches.LoraPatches()

"""mapping of network names to the number of errors the network had during operation"""

def activate(self, p, params_list):
t0 = time.time()
additional = shared.opts.sd_lora
self.errors.clear()
if additional != "None" and additional in networks.available_networks and not any(x for x in params_list if x.items[0] == additional):
p.all_prompts = [x + f"<lora:{additional}:{shared.opts.extra_networks_default_multiplier}>" for x in p.all_prompts]
params_list.append(extra_networks.ExtraNetworkParams(items=[additional, shared.opts.extra_networks_default_multiplier]))
if len(params_list) > 0:
self.active = True
networks.originals.apply() # apply patches
if networks.debug:
shared.log.debug("LoRA activate")
names = []
te_multipliers = []
unet_multipliers = []
dyn_dims = []
for params in params_list:
assert params.items
names.append(params.positional[0])
te_multiplier = float(params.positional[1]) if len(params.positional) > 1 else 1.0
te_multiplier = float(params.named.get("te", te_multiplier))
unet_multiplier = [float(params.positional[2]) if len(params.positional) > 2 else te_multiplier] * 3
unet_multiplier = [float(params.named.get("unet", unet_multiplier[0]))] * 3
unet_multiplier[0] = float(params.named.get("in", unet_multiplier[0]))
unet_multiplier[1] = float(params.named.get("mid", unet_multiplier[1]))
unet_multiplier[2] = float(params.named.get("out", unet_multiplier[2]))
dyn_dim = int(params.positional[3]) if len(params.positional) > 3 else None
dyn_dim = int(params.named["dyn"]) if "dyn" in params.named else dyn_dim
te_multipliers.append(te_multiplier)
unet_multipliers.append(unet_multiplier)
dyn_dims.append(dyn_dim)
t1 = time.time()
networks.load_networks(names, te_multipliers, unet_multipliers, dyn_dims)
t2 = time.time()
if shared.opts.lora_add_hashes_to_infotext:
network_hashes = []
for item in networks.loaded_networks:
shorthash = item.network_on_disk.shorthash
if not shorthash:
continue
alias = item.mentioned_name
if not alias:
continue
alias = alias.replace(":", "").replace(",", "")
network_hashes.append(f"{alias}: {shorthash}")
if network_hashes:
p.extra_generation_params["Lora hashes"] = ", ".join(network_hashes)
if len(names) > 0:
shared.log.info(f'LoRA apply: {names} patch={t1-t0:.2f} load={t2-t1:.2f}')
elif self.active:
self.active = False

def deactivate(self, p):
if shared.backend == shared.Backend.DIFFUSERS and hasattr(shared.sd_model, "unload_lora_weights") and hasattr(shared.sd_model, "text_encoder"):
if 'CLIP' in shared.sd_model.text_encoder.__class__.__name__ and not (shared.opts.cuda_compile and shared.opts.cuda_compile_backend == "openvino_fx"):
if shared.opts.lora_fuse_diffusers:
shared.sd_model.unfuse_lora()
shared.sd_model.unload_lora_weights()
if not self.active and getattr(networks, "originals", None ) is not None:
networks.originals.undo() # remove patches
if networks.debug:
shared.log.debug("LoRA deactivate")
if self.active and networks.debug:
shared.log.debug(f"LoRA end: load={networks.timer['load']:.2f} apply={networks.timer['apply']:.2f} restore={networks.timer['restore']:.2f}")
if self.errors:
p.comment("Networks with errors: " + ", ".join(f"{k} ({v})" for k, v in self.errors.items()))
for k, v in self.errors.items():
shared.log.error(f'LoRA errors: file="{k}" errors={v}')
self.errors.clear()
16 changes: 8 additions & 8 deletions extensions-builtin/Lora/lora.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import networks

list_available_loras = networks.list_available_networks
available_loras = networks.available_networks
available_lora_aliases = networks.available_network_aliases
available_lora_hash_lookup = networks.available_network_hash_lookup
forbidden_lora_aliases = networks.forbidden_network_aliases
loaded_loras = networks.loaded_networks
import networks

list_available_loras = networks.list_available_networks
available_loras = networks.available_networks
available_lora_aliases = networks.available_network_aliases
available_lora_hash_lookup = networks.available_network_hash_lookup
forbidden_lora_aliases = networks.forbidden_network_aliases
loaded_loras = networks.loaded_networks
104 changes: 52 additions & 52 deletions extensions-builtin/Lora/lora_patches.py
Original file line number Diff line number Diff line change
@@ -1,52 +1,52 @@
import torch
import networks
from modules import patches, shared


class LoraPatches:
def __init__(self):
self.active = False
self.Linear_forward = None
self.Linear_load_state_dict = None
self.Conv2d_forward = None
self.Conv2d_load_state_dict = None
self.GroupNorm_forward = None
self.GroupNorm_load_state_dict = None
self.LayerNorm_forward = None
self.LayerNorm_load_state_dict = None
self.MultiheadAttention_forward = None
self.MultiheadAttention_load_state_dict = None

def apply(self):
if self.active or shared.opts.lora_force_diffusers:
return
self.Linear_forward = patches.patch(__name__, torch.nn.Linear, 'forward', networks.network_Linear_forward)
self.Linear_load_state_dict = patches.patch(__name__, torch.nn.Linear, '_load_from_state_dict', networks.network_Linear_load_state_dict)
self.Conv2d_forward = patches.patch(__name__, torch.nn.Conv2d, 'forward', networks.network_Conv2d_forward)
self.Conv2d_load_state_dict = patches.patch(__name__, torch.nn.Conv2d, '_load_from_state_dict', networks.network_Conv2d_load_state_dict)
self.GroupNorm_forward = patches.patch(__name__, torch.nn.GroupNorm, 'forward', networks.network_GroupNorm_forward)
self.GroupNorm_load_state_dict = patches.patch(__name__, torch.nn.GroupNorm, '_load_from_state_dict', networks.network_GroupNorm_load_state_dict)
self.LayerNorm_forward = patches.patch(__name__, torch.nn.LayerNorm, 'forward', networks.network_LayerNorm_forward)
self.LayerNorm_load_state_dict = patches.patch(__name__, torch.nn.LayerNorm, '_load_from_state_dict', networks.network_LayerNorm_load_state_dict)
self.MultiheadAttention_forward = patches.patch(__name__, torch.nn.MultiheadAttention, 'forward', networks.network_MultiheadAttention_forward)
self.MultiheadAttention_load_state_dict = patches.patch(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict', networks.network_MultiheadAttention_load_state_dict)
networks.timer['load'] = 0
networks.timer['apply'] = 0
networks.timer['restore'] = 0
self.active = True

def undo(self):
if not self.active or shared.opts.lora_force_diffusers:
return
self.Linear_forward = patches.undo(__name__, torch.nn.Linear, 'forward') # pylint: disable=E1128
self.Linear_load_state_dict = patches.undo(__name__, torch.nn.Linear, '_load_from_state_dict') # pylint: disable=E1128
self.Conv2d_forward = patches.undo(__name__, torch.nn.Conv2d, 'forward') # pylint: disable=E1128
self.Conv2d_load_state_dict = patches.undo(__name__, torch.nn.Conv2d, '_load_from_state_dict') # pylint: disable=E1128
self.GroupNorm_forward = patches.undo(__name__, torch.nn.GroupNorm, 'forward') # pylint: disable=E1128
self.GroupNorm_load_state_dict = patches.undo(__name__, torch.nn.GroupNorm, '_load_from_state_dict') # pylint: disable=E1128
self.LayerNorm_forward = patches.undo(__name__, torch.nn.LayerNorm, 'forward') # pylint: disable=E1128
self.LayerNorm_load_state_dict = patches.undo(__name__, torch.nn.LayerNorm, '_load_from_state_dict') # pylint: disable=E1128
self.MultiheadAttention_forward = patches.undo(__name__, torch.nn.MultiheadAttention, 'forward') # pylint: disable=E1128
self.MultiheadAttention_load_state_dict = patches.undo(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict') # pylint: disable=E1128
patches.originals.pop(__name__, None)
self.active = False
import torch
import networks
from modules import patches, shared


class LoraPatches:
def __init__(self):
self.active = False
self.Linear_forward = None
self.Linear_load_state_dict = None
self.Conv2d_forward = None
self.Conv2d_load_state_dict = None
self.GroupNorm_forward = None
self.GroupNorm_load_state_dict = None
self.LayerNorm_forward = None
self.LayerNorm_load_state_dict = None
self.MultiheadAttention_forward = None
self.MultiheadAttention_load_state_dict = None

def apply(self):
if self.active or shared.opts.lora_force_diffusers:
return
self.Linear_forward = patches.patch(__name__, torch.nn.Linear, 'forward', networks.network_Linear_forward)
self.Linear_load_state_dict = patches.patch(__name__, torch.nn.Linear, '_load_from_state_dict', networks.network_Linear_load_state_dict)
self.Conv2d_forward = patches.patch(__name__, torch.nn.Conv2d, 'forward', networks.network_Conv2d_forward)
self.Conv2d_load_state_dict = patches.patch(__name__, torch.nn.Conv2d, '_load_from_state_dict', networks.network_Conv2d_load_state_dict)
self.GroupNorm_forward = patches.patch(__name__, torch.nn.GroupNorm, 'forward', networks.network_GroupNorm_forward)
self.GroupNorm_load_state_dict = patches.patch(__name__, torch.nn.GroupNorm, '_load_from_state_dict', networks.network_GroupNorm_load_state_dict)
self.LayerNorm_forward = patches.patch(__name__, torch.nn.LayerNorm, 'forward', networks.network_LayerNorm_forward)
self.LayerNorm_load_state_dict = patches.patch(__name__, torch.nn.LayerNorm, '_load_from_state_dict', networks.network_LayerNorm_load_state_dict)
self.MultiheadAttention_forward = patches.patch(__name__, torch.nn.MultiheadAttention, 'forward', networks.network_MultiheadAttention_forward)
self.MultiheadAttention_load_state_dict = patches.patch(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict', networks.network_MultiheadAttention_load_state_dict)
networks.timer['load'] = 0
networks.timer['apply'] = 0
networks.timer['restore'] = 0
self.active = True

def undo(self):
if not self.active or shared.opts.lora_force_diffusers:
return
self.Linear_forward = patches.undo(__name__, torch.nn.Linear, 'forward') # pylint: disable=E1128
self.Linear_load_state_dict = patches.undo(__name__, torch.nn.Linear, '_load_from_state_dict') # pylint: disable=E1128
self.Conv2d_forward = patches.undo(__name__, torch.nn.Conv2d, 'forward') # pylint: disable=E1128
self.Conv2d_load_state_dict = patches.undo(__name__, torch.nn.Conv2d, '_load_from_state_dict') # pylint: disable=E1128
self.GroupNorm_forward = patches.undo(__name__, torch.nn.GroupNorm, 'forward') # pylint: disable=E1128
self.GroupNorm_load_state_dict = patches.undo(__name__, torch.nn.GroupNorm, '_load_from_state_dict') # pylint: disable=E1128
self.LayerNorm_forward = patches.undo(__name__, torch.nn.LayerNorm, 'forward') # pylint: disable=E1128
self.LayerNorm_load_state_dict = patches.undo(__name__, torch.nn.LayerNorm, '_load_from_state_dict') # pylint: disable=E1128
self.MultiheadAttention_forward = patches.undo(__name__, torch.nn.MultiheadAttention, 'forward') # pylint: disable=E1128
self.MultiheadAttention_load_state_dict = patches.undo(__name__, torch.nn.MultiheadAttention, '_load_from_state_dict') # pylint: disable=E1128
patches.originals.pop(__name__, None)
self.active = False
Loading

0 comments on commit 204853a

Please sign in to comment.