Skip to content

Commit

Permalink
refactor of progress monitoring
Browse files Browse the repository at this point in the history
Signed-off-by: Vladimir Mandic <[email protected]>
  • Loading branch information
vladmandic committed Jan 5, 2025
1 parent 669799b commit 0114b59
Show file tree
Hide file tree
Showing 24 changed files with 177 additions and 84 deletions.
1 change: 1 addition & 0 deletions .eslintrc.json
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
"default-case":"off",
"no-await-in-loop":"off",
"no-bitwise":"off",
"no-continue":"off",
"no-confusing-arrow":"off",
"no-console":"off",
"no-empty":"off",
Expand Down
2 changes: 2 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@
- add explicit detailer steps setting
- **SysInfo**:
- update to collected data and benchmarks
- **Progress**:
- refactored progress monitoring, job updates and live preview
- **Metadata**:
- improved metadata save and restore
- **Fixes**:
Expand Down
37 changes: 22 additions & 15 deletions javascript/progressBar.js
Original file line number Diff line number Diff line change
Expand Up @@ -20,29 +20,36 @@ function checkPaused(state) {

function setProgress(res) {
const elements = ['txt2img_generate', 'img2img_generate', 'extras_generate', 'control_generate'];
const progress = (res?.progress || 0);
let job = res?.job || '';
job = job.replace('txt2img', 'Generate').replace('img2img', 'Generate');
const perc = res && (progress > 0) ? `${Math.round(100.0 * progress)}%` : '';
let sec = res?.eta || 0;
const progress = res?.progress || 0;
const job = res?.job || '';
let perc = '';
let eta = '';
if (res?.paused) eta = 'Paused';
else if (res?.completed || (progress > 0.99)) eta = 'Finishing';
else if (sec === 0) eta = 'Starting';
if (job === 'VAE') perc = 'Decode';
else {
const min = Math.floor(sec / 60);
sec %= 60;
eta = min > 0 ? `${Math.round(min)}m ${Math.round(sec)}s` : `${Math.round(sec)}s`;
perc = res && (progress > 0) && (progress < 1) ? `${Math.round(100.0 * progress)}% ` : '';
let sec = res?.eta || 0;
if (res?.paused) eta = 'Paused';
else if (res?.completed || (progress > 0.99)) eta = 'Finishing';
else if (sec === 0) eta = 'Start';
else {
const min = Math.floor(sec / 60);
sec %= 60;
eta = min > 0 ? `${Math.round(min)}m ${Math.round(sec)}s` : `${Math.round(sec)}s`;
}
}
document.title = `SD.Next ${perc}`;
for (const elId of elements) {
const el = document.getElementById(elId);
if (el) {
el.innerText = (res ? `${job} ${perc} ${eta}` : 'Generate');
const jobLabel = (res ? `${job} ${perc}${eta}` : 'Generate').trim();
el.innerText = jobLabel;
if (!window.waitForUiReady) {
el.style.background = res && (progress > 0)
? `linear-gradient(to right, var(--primary-500) 0%, var(--primary-800) ${perc}, var(--neutral-700) ${perc})`
: 'var(--button-primary-background-fill)';
const gradient = perc !== '' ? perc : '100%';
if (jobLabel === 'Generate') el.style.background = 'var(--primary-500)';
else if (jobLabel.endsWith('Decode')) continue;
else if (jobLabel.endsWith('Start') || jobLabel.endsWith('Finishing')) el.style.background = 'var(--primary-800)';
else if (res && progress > 0 && progress < 1) el.style.background = `linear-gradient(to right, var(--primary-500) 0%, var(--primary-800) ${gradient}, var(--neutral-700) ${gradient})`;
else el.style.background = 'var(--primary-500)';
}
}
}
Expand Down
4 changes: 2 additions & 2 deletions modules/call_queue.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,8 +15,8 @@ def f(*args, **kwargs):
return f


def wrap_gradio_gpu_call(func, extra_outputs=None):
name = func.__name__
def wrap_gradio_gpu_call(func, extra_outputs=None, name=None):
name = name or func.__name__
def f(*args, **kwargs):
# if the first argument is a string that says "task(...)", it is treated as a job id
if len(args) > 0 and type(args[0]) == str and args[0][0:5] == "task(" and args[0][-1] == ")":
Expand Down
1 change: 1 addition & 0 deletions modules/gr_tempdir.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ def pil_to_temp_file(self, img: Image, dir: str, format="png") -> str: # pylint:
img.already_saved_as = name
size = os.path.getsize(name)
shared.log.debug(f'Save temp: image="{name}" width={img.width} height={img.height} size={size}')
shared.state.image_history += 1
params = ', '.join([f'{k}: {v}' for k, v in img.info.items()])
params = params[12:] if params.startswith('parameters: ') else params
with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
Expand Down
1 change: 1 addition & 0 deletions modules/history.py
Original file line number Diff line number Diff line change
Expand Up @@ -62,6 +62,7 @@ def find(self, name):
return -1

def add(self, latent, preview=None, info=None, ops=[]):
shared.state.latent_history += 1
if shared.opts.latent_history == 0:
return
if torch.is_tensor(latent):
Expand Down
3 changes: 3 additions & 0 deletions modules/images.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ def atomically_save_image():
Image.MAX_IMAGE_PIXELS = None # disable check in Pillow and rely on check below to allow large custom image sizes
while True:
image, filename, extension, params, exifinfo, filename_txt = save_queue.get()
shared.state.image_history += 1
with open(os.path.join(paths.data_path, "params.txt"), "w", encoding="utf8") as file:
file.write(exifinfo)
fn = filename + extension
Expand All @@ -49,6 +50,7 @@ def atomically_save_image():
shared.log.info(f'Save: text="{filename_txt}" len={len(exifinfo)}')
except Exception as e:
shared.log.warning(f'Save failed: description={filename_txt} {e}')

# actual save
if image_format == 'PNG':
pnginfo_data = PngImagePlugin.PngInfo()
Expand Down Expand Up @@ -79,6 +81,7 @@ def atomically_save_image():
errors.display(e, 'Image save')
size = os.path.getsize(fn) if os.path.exists(fn) else 0
shared.log.info(f'Save: image="{fn}" type={image_format} width={image.width} height={image.height} size={size}')

if shared.opts.save_log_fn != '' and len(exifinfo) > 0:
fn = os.path.join(paths.data_path, shared.opts.save_log_fn)
if not fn.endswith('.json'):
Expand Down
2 changes: 1 addition & 1 deletion modules/lora/lora_extract.py
Original file line number Diff line number Diff line change
Expand Up @@ -265,7 +265,7 @@ def gr_show(visible=True):

auto_rank.change(fn=lambda x: gr_show(x), inputs=[auto_rank], outputs=[rank_ratio])
extract.click(
fn=wrap_gradio_gpu_call(make_lora, extra_outputs=[]),
fn=wrap_gradio_gpu_call(make_lora, extra_outputs=[], name='LoRA'),
inputs=[filename, rank, auto_rank, rank_ratio, modules, overwrite],
outputs=[status]
)
7 changes: 5 additions & 2 deletions modules/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,19 +280,22 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
output_images = []

process_init(p)
if os.path.exists(shared.opts.embeddings_dir) and not p.do_not_reload_embeddings and not shared.native:
if not shared.native and os.path.exists(shared.opts.embeddings_dir) and not p.do_not_reload_embeddings:
modules.sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=False)
if p.scripts is not None and isinstance(p.scripts, scripts.ScriptRunner):
p.scripts.process(p)

ema_scope_context = p.sd_model.ema_scope if not shared.native else nullcontext
shared.state.job_count = p.n_iter
if not shared.native:
shared.state.job_count = p.n_iter
with devices.inference_context(), ema_scope_context():
t0 = time.time()
if not hasattr(p, 'skip_init'):
p.init(p.all_prompts, p.all_seeds, p.all_subseeds)
debug(f'Processing inner: args={vars(p)}')
for n in range(p.n_iter):
# if hasattr(p, 'skip_processing'):
# continue
pag.apply(p)
debug(f'Processing inner: iteration={n+1}/{p.n_iter}')
p.iteration = n
Expand Down
3 changes: 2 additions & 1 deletion modules/processing_args.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

debug_enabled = os.environ.get('SD_DIFFUSERS_DEBUG', None)
debug_log = shared.log.trace if os.environ.get('SD_DIFFUSERS_DEBUG', None) is not None else lambda *args, **kwargs: None
disable_pbar = os.environ.get('SD_DISABLE_PBAR', None) is not None


def task_specific_kwargs(p, model):
Expand Down Expand Up @@ -107,7 +108,7 @@ def set_pipeline_args(p, model, prompts:list, negative_prompts:list, prompts_2:t
shared.sd_model = sd_models.apply_balanced_offload(shared.sd_model)
apply_circular(p.tiling, model)
if hasattr(model, "set_progress_bar_config"):
model.set_progress_bar_config(bar_format='Progress {rate_fmt}{postfix} {bar} {percentage:3.0f}% {n_fmt}/{total_fmt} {elapsed} {remaining} ' + '\x1b[38;5;71m' + desc, ncols=80, colour='#327fba')
model.set_progress_bar_config(bar_format='Progress {rate_fmt}{postfix} {bar} {percentage:3.0f}% {n_fmt}/{total_fmt} {elapsed} {remaining} ' + '\x1b[38;5;71m' + desc, ncols=80, colour='#327fba', disable=disable_pbar)
args = {}
has_vae = hasattr(model, 'vae') or (hasattr(model, 'pipe') and hasattr(model.pipe, 'vae'))
if hasattr(model, 'pipe') and not hasattr(model, 'no_recurse'): # recurse
Expand Down
5 changes: 3 additions & 2 deletions modules/processing_callbacks.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,8 +56,9 @@ def diffusers_callback(pipe, step: int = 0, timestep: int = 0, kwargs: dict = {}
latents = kwargs.get('latents', None)
if debug:
debug_callback(f'Callback: step={step} timestep={timestep} latents={latents.shape if latents is not None else None} kwargs={list(kwargs)}')
order = getattr(pipe.scheduler, "order", 1) if hasattr(pipe, 'scheduler') else 1
shared.state.sampling_step = step // order
shared.state.step()
# order = getattr(pipe.scheduler, "order", 1) if hasattr(pipe, 'scheduler') else 1
# shared.state.sampling_step = step // order
if shared.state.interrupted or shared.state.skipped:
raise AssertionError('Interrupted...')
if shared.state.paused:
Expand Down
2 changes: 1 addition & 1 deletion modules/processing_class.py
Original file line number Diff line number Diff line change
Expand Up @@ -581,7 +581,7 @@ def init_hr(self, scale = None, upscaler = None, force = False):
else:
self.hr_upscale_to_x, self.hr_upscale_to_y = self.hr_resize_x, self.hr_resize_y
# hypertile_set(self, hr=True)
shared.state.job_count = 2 * self.n_iter
# shared.state.job_count = 2 * self.n_iter
shared.log.debug(f'Control hires: upscaler="{self.hr_upscaler}" scale={scale} fixed={not use_scale} size={self.hr_upscale_to_x}x{self.hr_upscale_to_y}')


Expand Down
22 changes: 11 additions & 11 deletions modules/processing_diffusers.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
import torchvision.transforms.functional as TF
from PIL import Image
from modules import shared, devices, processing, sd_models, errors, sd_hijack_hypertile, processing_vae, sd_models_compile, hidiffusion, timer, modelstats, extra_networks
from modules.processing_helpers import resize_hires, calculate_base_steps, calculate_hires_steps, calculate_refiner_steps, save_intermediate, update_sampler, is_txt2img, is_refiner_enabled
from modules.processing_helpers import resize_hires, calculate_base_steps, calculate_hires_steps, calculate_refiner_steps, save_intermediate, update_sampler, is_txt2img, is_refiner_enabled, get_job_name
from modules.processing_args import set_pipeline_args
from modules.onnx_impl import preprocess_pipeline as preprocess_onnx_pipeline, check_parameters_changed as olive_check_parameters_changed
from modules.lora import networks
Expand Down Expand Up @@ -53,8 +53,9 @@ def restore_state(p: processing.StableDiffusionProcessing):


def process_base(p: processing.StableDiffusionProcessing):
use_refiner_start = is_txt2img() and is_refiner_enabled(p) and not p.is_hr_pass and p.refiner_start > 0 and p.refiner_start < 1
use_denoise_start = not is_txt2img() and p.refiner_start > 0 and p.refiner_start < 1
txt2img = is_txt2img()
use_refiner_start = txt2img and is_refiner_enabled(p) and not p.is_hr_pass and p.refiner_start > 0 and p.refiner_start < 1
use_denoise_start = not txt2img and p.refiner_start > 0 and p.refiner_start < 1

shared.sd_model = update_pipeline(shared.sd_model, p)
update_sampler(p, shared.sd_model)
Expand All @@ -76,7 +77,8 @@ def process_base(p: processing.StableDiffusionProcessing):
clip_skip=p.clip_skip,
desc='Base',
)
shared.state.sampling_steps = base_args.get('prior_num_inference_steps', None) or p.steps or base_args.get('num_inference_steps', None)
base_steps = base_args.get('prior_num_inference_steps', None) or p.steps or base_args.get('num_inference_steps', None)
shared.state.update(get_job_name(p, shared.sd_model), base_steps, 1)
if shared.opts.scheduler_eta is not None and shared.opts.scheduler_eta > 0 and shared.opts.scheduler_eta < 1:
p.extra_generation_params["Sampler Eta"] = shared.opts.scheduler_eta
output = None
Expand Down Expand Up @@ -172,7 +174,7 @@ def process_hires(p: processing.StableDiffusionProcessing, output):
p.ops.append('upscale')
if shared.opts.samples_save and not p.do_not_save_samples and shared.opts.save_images_before_highres_fix and hasattr(shared.sd_model, 'vae'):
save_intermediate(p, latents=output.images, suffix="-before-hires")
shared.state.job = 'Upscale'
shared.state.update('Upscale', 0, 1)
output.images = resize_hires(p, latents=output.images)
sd_hijack_hypertile.hypertile_set(p, hr=True)

Expand All @@ -190,7 +192,6 @@ def process_hires(p: processing.StableDiffusionProcessing, output):
shared.log.warning('HiRes skip: denoising=0')
p.hr_force = False
if p.hr_force:
shared.state.job_count = 2 * p.n_iter
shared.sd_model = sd_models.set_diffuser_pipe(shared.sd_model, sd_models.DiffusersTaskType.IMAGE_2_IMAGE)
if 'Upscale' in shared.sd_model.__class__.__name__ or 'Flux' in shared.sd_model.__class__.__name__ or 'Kandinsky' in shared.sd_model.__class__.__name__:
output.images = processing_vae.vae_decode(latents=output.images, model=shared.sd_model, full_quality=p.full_quality, output_type='pil', width=p.width, height=p.height)
Expand All @@ -217,8 +218,8 @@ def process_hires(p: processing.StableDiffusionProcessing, output):
strength=strength,
desc='Hires',
)
shared.state.job = 'HiRes'
shared.state.sampling_steps = hires_args.get('prior_num_inference_steps', None) or p.steps or hires_args.get('num_inference_steps', None)
hires_steps = hires_args.get('prior_num_inference_steps', None) or p.hr_second_pass_steps or hires_args.get('num_inference_steps', None)
shared.state.update(get_job_name(p, shared.sd_model), hires_steps, 1)
try:
shared.sd_model = sd_models.apply_balanced_offload(shared.sd_model)
sd_models.move_model(shared.sd_model, devices.device)
Expand Down Expand Up @@ -255,8 +256,6 @@ def process_refine(p: processing.StableDiffusionProcessing, output):
# optional refiner pass or decode
if is_refiner_enabled(p):
prev_job = shared.state.job
shared.state.job = 'Refine'
shared.state.job_count +=1
if shared.opts.samples_save and not p.do_not_save_samples and shared.opts.save_images_before_refiner and hasattr(shared.sd_model, 'vae'):
save_intermediate(p, latents=output.images, suffix="-before-refiner")
if shared.opts.diffusers_move_base:
Expand Down Expand Up @@ -306,7 +305,8 @@ def process_refine(p: processing.StableDiffusionProcessing, output):
prompt_attention='fixed',
desc='Refiner',
)
shared.state.sampling_steps = refiner_args.get('prior_num_inference_steps', None) or p.steps or refiner_args.get('num_inference_steps', None)
refiner_steps = refiner_args.get('prior_num_inference_steps', None) or p.steps or refiner_args.get('num_inference_steps', None)
shared.state.update(get_job_name(p, shared.sd_refiner), refiner_steps, 1)
try:
if 'requires_aesthetics_score' in shared.sd_refiner.config: # sdxl-model needs false and sdxl-refiner needs true
shared.sd_refiner.register_to_config(requires_aesthetics_score = getattr(shared.sd_refiner, 'tokenizer', None) is None)
Expand Down
23 changes: 23 additions & 0 deletions modules/processing_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -584,3 +584,26 @@ def update_sampler(p, sd_model, second_pass=False):
sampler_options.append('low order')
if len(sampler_options) > 0:
p.extra_generation_params['Sampler options'] = '/'.join(sampler_options)


def get_job_name(p, model):
if hasattr(model, 'pipe'):
model = model.pipe
if hasattr(p, 'xyz'):
return 'Ignore' # xyz grid handles its own jobs
if sd_models.get_diffusers_task(model) == sd_models.DiffusersTaskType.TEXT_2_IMAGE:
return 'Text'
elif sd_models.get_diffusers_task(model) == sd_models.DiffusersTaskType.IMAGE_2_IMAGE:
if p.is_refiner_pass:
return 'Refiner'
elif p.is_hr_pass:
return 'Hires'
else:
return 'Image'
elif sd_models.get_diffusers_task(model) == sd_models.DiffusersTaskType.INPAINTING:
if p.detailer:
return 'Detailer'
else:
return 'Inpaint'
else:
return 'Unknown'
15 changes: 4 additions & 11 deletions modules/progress.py
Original file line number Diff line number Diff line change
Expand Up @@ -64,23 +64,16 @@ def progressapi(req: ProgressRequest):
queued = req.id_task in pending_tasks
completed = req.id_task in finished_tasks
paused = shared.state.paused
shared.state.job_count = max(shared.state.frame_count, shared.state.job_count, shared.state.job_no)
batch_x = max(shared.state.job_no, 0)
batch_y = max(shared.state.job_count, 1)
step_x = max(shared.state.sampling_step, 0)
step_y = max(shared.state.sampling_steps, 1)
current = step_y * batch_x + step_x
total = step_y * batch_y
while total < current:
total += step_y
progress = min(1, abs(current / total) if total > 0 else 0)
step = max(shared.state.sampling_step, 0)
steps = max(shared.state.sampling_steps, 1)
progress = round(min(1, abs(step / steps) if steps > 0 else 0), 2)
elapsed = time.time() - shared.state.time_start if shared.state.time_start is not None else 0
predicted = elapsed / progress if progress > 0 else None
eta = predicted - elapsed if predicted is not None else None
id_live_preview = req.id_live_preview
live_preview = None
updated = shared.state.set_current_image()
debug_log(f'Preview: job={shared.state.job} active={active} progress={current}/{total} step={shared.state.current_image_sampling_step}/{step_x}/{step_y} request={id_live_preview} last={shared.state.id_live_preview} enabled={shared.opts.live_previews_enable} job={shared.state.preview_job} updated={updated} image={shared.state.current_image} elapsed={elapsed:.3f}')
debug_log(f'Preview: job={shared.state.job} active={active} progress={step}/{steps}/{progress} image={shared.state.current_image_sampling_step} request={id_live_preview} last={shared.state.id_live_preview} enabled={shared.opts.live_previews_enable} job={shared.state.preview_job} updated={updated} image={shared.state.current_image} elapsed={elapsed:.3f}')
if not active:
return InternalProgressResponse(job=shared.state.job, active=active, queued=queued, paused=paused, completed=completed, id_live_preview=-1, debug=debug, textinfo="Queued..." if queued else "Waiting...")
if shared.opts.live_previews_enable and (shared.state.id_live_preview != id_live_preview) and (shared.state.current_image is not None):
Expand Down
Loading

0 comments on commit 0114b59

Please sign in to comment.