Skip to content

Commit

Permalink
cleanup scripts
Browse files Browse the repository at this point in the history
  • Loading branch information
vladmandic committed Apr 28, 2023
1 parent 07a589b commit 99e3fce
Show file tree
Hide file tree
Showing 9 changed files with 16 additions and 58 deletions.
4 changes: 4 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,10 @@ venv
*.zip
*.rar
*.pyc
/*.bat
/*.sh
!webui.bat
!webui.sh

# all dynamic stuff
/repositories/**/*
Expand Down
4 changes: 0 additions & 4 deletions TODO.md
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,6 @@

Stuff to be fixed...

- ClipSkip not updated on read gen info
- Run VAE with hires at 1280
- Transformers version
- Move Restart Server from WebUI to Launch and reload modules
Expand All @@ -19,9 +18,6 @@ Stuff to be added...
- Create new GitHub hooks/actions for CI/CD
- Redo Extensions tab: see <https://vladmandic.github.io/sd-extension-manager/pages/extensions.html>
- Stream-load models as option for slow storage
- AMD optimizations
- Apple optimizations
- Support multiple models locations

## Investigate

Expand Down
2 changes: 2 additions & 0 deletions modules/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -1024,6 +1024,8 @@ def __init__(self, init_images: list = None, resize_mode: int = 0, denoising_str
self.image_conditioning = None

def init(self, all_prompts, all_seeds, all_subseeds):
if self.sampler_name in ['PLMS', 'UniPC']: # PLMS/UniPC do not support img2img so we just silently switch to DDIM
self.sampler_name = shared.opts.fallback_sampler
self.sampler = sd_samplers.create_sampler(self.sampler_name, self.sd_model)
crop_region = None

Expand Down
52 changes: 5 additions & 47 deletions scripts/img2imgalt.py
Original file line number Diff line number Diff line change
@@ -1,19 +1,15 @@
from collections import namedtuple

import numpy as np
from tqdm import trange

import modules.scripts as scripts
import torch
import k_diffusion as K
import gradio as gr

import modules.scripts as scripts
from modules import processing, shared, sd_samplers, sd_samplers_common

import torch
import k_diffusion as K

def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
x = p.init_latent

s_in = x.new_ones([x.shape[0]])
if shared.sd_model.parameterization == "v":
dnw = K.external.CompVisVDenoiser(shared.sd_model)
Expand All @@ -22,40 +18,29 @@ def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
dnw = K.external.CompVisDenoiser(shared.sd_model)
skip = 0
sigmas = dnw.get_sigmas(steps).flip(0)

shared.state.sampling_steps = steps

for i in trange(1, len(sigmas)):
shared.state.sampling_step += 1

x_in = torch.cat([x] * 2)
sigma_in = torch.cat([sigmas[i] * s_in] * 2)
cond_in = torch.cat([uncond, cond])

image_conditioning = torch.cat([p.image_conditioning] * 2)
cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]}

c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)[skip:]]
t = dnw.sigma_to_t(sigma_in)

eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)

denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale

d = (x - denoised) / sigmas[i]
dt = sigmas[i] - sigmas[i - 1]

x = x + d * dt

sd_samplers_common.store_latent(x)

# This shouldn't be necessary, but solved some VRAM issues
del x_in, sigma_in, cond_in, c_out, c_in, t,
del eps, denoised_uncond, denoised_cond, denoised, d, dt

shared.state.nextjob()

return x / x.std()


Expand All @@ -65,7 +50,6 @@ def find_noise_for_image(p, cond, uncond, cfg_scale, steps):
# Based on changes suggested by briansemrau in https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/736
def find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps):
x = p.init_latent

s_in = x.new_ones([x.shape[0]])
if shared.sd_model.parameterization == "v":
dnw = K.external.CompVisVDenoiser(shared.sd_model)
Expand All @@ -79,42 +63,31 @@ def find_noise_for_image_sigma_adjustment(p, cond, uncond, cfg_scale, steps):

for i in trange(1, len(sigmas)):
shared.state.sampling_step += 1

x_in = torch.cat([x] * 2)
sigma_in = torch.cat([sigmas[i - 1] * s_in] * 2)
cond_in = torch.cat([uncond, cond])

image_conditioning = torch.cat([p.image_conditioning] * 2)
cond_in = {"c_concat": [image_conditioning], "c_crossattn": [cond_in]}

c_out, c_in = [K.utils.append_dims(k, x_in.ndim) for k in dnw.get_scalings(sigma_in)[skip:]]

if i == 1:
t = dnw.sigma_to_t(torch.cat([sigmas[i] * s_in] * 2))
else:
t = dnw.sigma_to_t(sigma_in)

eps = shared.sd_model.apply_model(x_in * c_in, t, cond=cond_in)
denoised_uncond, denoised_cond = (x_in + eps * c_out).chunk(2)

denoised = denoised_uncond + (denoised_cond - denoised_uncond) * cfg_scale

if i == 1:
d = (x - denoised) / (2 * sigmas[i])
else:
d = (x - denoised) / sigmas[i - 1]

dt = sigmas[i] - sigmas[i - 1]
x = x + d * dt

sd_samplers_common.store_latent(x)

# This shouldn't be necessary, but solved some VRAM issues
del x_in, sigma_in, cond_in, c_out, c_in, t,
del eps, denoised_uncond, denoised_cond, denoised, d, dt

shared.state.nextjob()

return x / sigmas[-1]


Expand All @@ -123,7 +96,7 @@ def __init__(self):
self.cache = None

def title(self):
return "img2img alternative test"
return "Alternative"

def show(self, is_img2img):
return is_img2img
Expand All @@ -132,24 +105,19 @@ def ui(self, is_img2img):
info = gr.Markdown('''
* `CFG Scale` should be 2 or lower.
''')

override_sampler = gr.Checkbox(label="Override `Sampling method` to Euler?(this method is built for it)", value=True, elem_id=self.elem_id("override_sampler"))

override_prompt = gr.Checkbox(label="Override `prompt` to the same value as `original prompt`?(and `negative prompt`)", value=True, elem_id=self.elem_id("override_prompt"))
original_prompt = gr.Textbox(label="Original prompt", lines=1, elem_id=self.elem_id("original_prompt"))
original_negative_prompt = gr.Textbox(label="Original negative prompt", lines=1, elem_id=self.elem_id("original_negative_prompt"))

override_steps = gr.Checkbox(label="Override `Sampling Steps` to the same value as `Decode steps`?", value=True, elem_id=self.elem_id("override_steps"))
st = gr.Slider(label="Decode steps", minimum=1, maximum=150, step=1, value=50, elem_id=self.elem_id("st"))

override_strength = gr.Checkbox(label="Override `Denoising strength` to 1?", value=True, elem_id=self.elem_id("override_strength"))

cfg = gr.Slider(label="Decode CFG scale", minimum=0.0, maximum=15.0, step=0.1, value=1.0, elem_id=self.elem_id("cfg"))
randomness = gr.Slider(label="Randomness", minimum=0.0, maximum=1.0, step=0.01, value=0.0, elem_id=self.elem_id("randomness"))
sigma_adjustment = gr.Checkbox(label="Sigma adjustment for finding noise for image", value=False, elem_id=self.elem_id("sigma_adjustment"))

return [
info,
info,
override_sampler,
override_prompt, original_prompt, original_negative_prompt,
override_steps, st,
Expand All @@ -171,13 +139,11 @@ def run(self, p, _, override_sampler, override_prompt, original_prompt, original

def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength, prompts):
lat = (p.init_latent.cpu().numpy() * 10).astype(int)

same_params = self.cache is not None and self.cache.cfg_scale == cfg and self.cache.steps == st \
and self.cache.original_prompt == original_prompt \
and self.cache.original_negative_prompt == original_negative_prompt \
and self.cache.sigma_adjustment == sigma_adjustment
same_everything = same_params and self.cache.latent.shape == lat.shape and np.abs(self.cache.latent-lat).sum() < 100

if same_everything:
rec_noise = self.cache.noise
else:
Expand All @@ -191,28 +157,20 @@ def sample_extra(conditioning, unconditional_conditioning, seeds, subseeds, subs
self.cache = Cached(rec_noise, cfg, st, lat, original_prompt, original_negative_prompt, sigma_adjustment)

rand_noise = processing.create_random_tensors(p.init_latent.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength, seed_resize_from_h=p.seed_resize_from_h, seed_resize_from_w=p.seed_resize_from_w, p=p)

combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5)

sampler = sd_samplers.create_sampler(p.sampler_name, p.sd_model)

sigmas = sampler.model_wrap.get_sigmas(p.steps)

noise_dt = combined_noise - (p.init_latent / sigmas[0])

p.seed = p.seed + 1

return sampler.sample_img2img(p, p.init_latent, noise_dt, conditioning, unconditional_conditioning, image_conditioning=p.image_conditioning)

p.sample = sample_extra

p.extra_generation_params["Decode prompt"] = original_prompt
p.extra_generation_params["Decode negative prompt"] = original_negative_prompt
p.extra_generation_params["Decode CFG scale"] = cfg
p.extra_generation_params["Decode steps"] = st
p.extra_generation_params["Randomness"] = randomness
p.extra_generation_params["Sigma Adjustment"] = sigma_adjustment

processed = processing.process_images(p)

return processed
2 changes: 1 addition & 1 deletion scripts/outpainting_mk_2.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ def _get_masked_window_rgb(np_mask_grey, hardness=1.):

class Script(scripts.Script):
def title(self):
return "Outpainting mk2"
return "Outpainting"

def show(self, is_img2img):
return is_img2img
Expand Down
2 changes: 1 addition & 1 deletion scripts/poor_mans_outpainting.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

class Script(scripts.Script):
def title(self):
return "Poor man's outpainting"
return "Outpainting alternative"

def show(self, is_img2img):
return is_img2img
Expand Down
2 changes: 1 addition & 1 deletion scripts/prompts_from_file.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@ def load_prompt_file(file):

class Script(scripts.Script):
def title(self):
return "Prompts from file or textbox"
return "Prompts from file"

def ui(self, is_img2img):
checkbox_iterate = gr.Checkbox(label="Iterate seed every line", value=False, elem_id=self.elem_id("checkbox_iterate"))
Expand Down
1 change: 1 addition & 0 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ def setup_logging(clean=False):
rh.set_name(logging.DEBUG if args.debug else logging.INFO)
log.addHandler(rh)


# check if package is installed
def installed(package, friendly: str = None):
import pkg_resources
Expand Down
5 changes: 1 addition & 4 deletions webui.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@
def check_rollback_vae():
if shared.cmd_opts.rollback_vae:
if not torch.cuda.is_available():
print("Rollback VAE functionality requires CUDA support")
print("Rollback VAE functionality requires compatible GPU")
shared.cmd_opts.rollback_vae = False
elif not torch.__version__.startswith('2.1'):
print("Rollback VAE functionality requires Torch 2.1 or higher")
Expand All @@ -95,9 +95,6 @@ def initialize():
gfpgan.setup_model(opts.gfpgan_models_path)
startup_timer.record("gfpgan")

modelloader.list_builtin_upscalers()
startup_timer.record("upscalers")

modules.scripts.load_scripts()
startup_timer.record("scripts")

Expand Down

0 comments on commit 99e3fce

Please sign in to comment.