From 9e737cbadcdc89c23b119701815275e7c209ff00 Mon Sep 17 00:00:00 2001
From: Alexandre Simard
Date: Mon, 26 Sep 2022 17:18:57 -0400
Subject: [PATCH 001/460] Solve issue #962
Fix by @MrAcademy
---
.gitignore | 3 ++-
javascript/ui.js | 5 ++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/.gitignore b/.gitignore
index 9d78853af..fa1ab43e7 100644
--- a/.gitignore
+++ b/.gitignore
@@ -19,4 +19,5 @@ __pycache__
/webui-user.sh
/interrogate
/user.css
-/.idea
\ No newline at end of file
+/.idea
+/SwinIR
diff --git a/javascript/ui.js b/javascript/ui.js
index 076e9436c..7db4db48d 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -1,9 +1,8 @@
// various functions for interation with ui.py not large enough to warrant putting them in separate files
function selected_gallery_index(){
- var gr = gradioApp()
- var buttons = gradioApp().querySelectorAll(".gallery-item")
- var button = gr.querySelector(".gallery-item.\\!ring-2")
+ var buttons = gradioApp().querySelectorAll('[style="display: block;"].tabitem .gallery-item')
+ var button = gradioApp().querySelector('[style="display: block;"].tabitem .gallery-item.\\!ring-2')
var result = -1
buttons.forEach(function(v, i){ if(v==button) { result = i } })
From 03ee67bfd34b9e872b33eb05fef5db83410b16f3 Mon Sep 17 00:00:00 2001
From: WDevelopsWebApps <97454358+WDevelopsWebApps@users.noreply.github.com>
Date: Wed, 28 Sep 2022 10:53:40 +0200
Subject: [PATCH 002/460] add advanced saving for save button
---
modules/images.py | 5 ++++-
modules/ui.py | 35 ++++++++++++++++++++++++++++-------
2 files changed, 32 insertions(+), 8 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index 9458bf8d4..923f81dfb 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -290,7 +290,10 @@ def apply_filename_pattern(x, p, seed, prompt):
x = x.replace("[cfg]", str(p.cfg_scale))
x = x.replace("[width]", str(p.width))
x = x.replace("[height]", str(p.height))
- x = x.replace("[styles]", sanitize_filename_part(", ".join(p.styles), replace_spaces=False))
+ #currently disabled if using the save button, will work otherwise
+ # if enabled it will cause a bug because styles is not included in the save_files data dictionary
+ if hasattr(p, "styles"):
+ x = x.replace("[styles]", sanitize_filename_part(", ".join(p.styles), replace_spaces=False))
x = x.replace("[sampler]", sanitize_filename_part(sd_samplers.samplers[p.sampler_index].name, replace_spaces=False))
x = x.replace("[model_hash]", shared.sd_model.sd_model_hash)
diff --git a/modules/ui.py b/modules/ui.py
index 7db8edbd8..87a86a45d 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -28,6 +28,7 @@ import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
+from modules.images import apply_filename_pattern, get_next_sequence_number
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
@@ -90,13 +91,26 @@ def send_gradio_gallery_to_image(x):
def save_files(js_data, images, index):
- import csv
-
- os.makedirs(opts.outdir_save, exist_ok=True)
-
+ import csv
filenames = []
+ #quick dictionary to class object conversion. Its neccesary due apply_filename_pattern requiring it
+ class MyObject:
+ def __init__(self, d=None):
+ if d is not None:
+ for key, value in d.items():
+ setattr(self, key, value)
+
data = json.loads(js_data)
+ p = MyObject(data)
+ path = opts.outdir_save
+ save_to_dirs = opts.save_to_dirs
+
+ if save_to_dirs:
+ dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, p.seed, p.prompt)
+ path = os.path.join(opts.outdir_save, dirname)
+
+ os.makedirs(path, exist_ok=True)
if index > -1 and opts.save_selected_only and (index > 0 or not opts.return_grid): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
@@ -107,11 +121,18 @@ def save_files(js_data, images, index):
writer = csv.writer(file)
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
+ file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
+ if file_decoration != "":
+ file_decoration = "-" + file_decoration.lower()
+ file_decoration = apply_filename_pattern(file_decoration, p, p.seed, p.prompt)
+ truncated = (file_decoration[:240] + '..') if len(file_decoration) > 240 else file_decoration
+ filename_base = truncated
- filename_base = str(int(time.time() * 1000))
+ basecount = get_next_sequence_number(path, "")
for i, filedata in enumerate(images):
- filename = filename_base + ("" if len(images) == 1 else "-" + str(i + 1)) + ".png"
- filepath = os.path.join(opts.outdir_save, filename)
+ file_number = f"{basecount+i:05}"
+ filename = file_number + filename_base + ".png"
+ filepath = os.path.join(path, filename)
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
From c938679de7b87b4f14894d9f57fe0f40dd6e3c06 Mon Sep 17 00:00:00 2001
From: Jairo Correa
Date: Wed, 28 Sep 2022 22:14:13 -0300
Subject: [PATCH 003/460] Fix memory leak and reduce memory usage
---
modules/codeformer_model.py | 6 ++++--
modules/devices.py | 3 ++-
modules/extras.py | 2 ++
modules/gfpgan_model.py | 11 +++++------
modules/processing.py | 33 ++++++++++++++++++++++++++-------
webui.py | 3 +++
6 files changed, 42 insertions(+), 16 deletions(-)
diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py
index 8fbdea249..2177291a7 100644
--- a/modules/codeformer_model.py
+++ b/modules/codeformer_model.py
@@ -89,7 +89,7 @@ def setup_codeformer():
output = self.net(cropped_face_t, w=w if w is not None else shared.opts.code_former_weight, adain=True)[0]
restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1))
del output
- torch.cuda.empty_cache()
+ devices.torch_gc()
except Exception as error:
print(f'\tFailed inference for CodeFormer: {error}', file=sys.stderr)
restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1))
@@ -106,7 +106,9 @@ def setup_codeformer():
restored_img = cv2.resize(restored_img, (0, 0), fx=original_resolution[1]/restored_img.shape[1], fy=original_resolution[0]/restored_img.shape[0], interpolation=cv2.INTER_LINEAR)
if shared.opts.face_restoration_unload:
- self.net.to(devices.cpu)
+ self.net = None
+ self.face_helper = None
+ devices.torch_gc()
return restored_img
diff --git a/modules/devices.py b/modules/devices.py
index 07bb23397..df63dd88e 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -1,4 +1,5 @@
import torch
+import gc
# has_mps is only available in nightly pytorch (for now), `getattr` for compatibility
from modules import errors
@@ -17,8 +18,8 @@ def get_optimal_device():
return cpu
-
def torch_gc():
+ gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
diff --git a/modules/extras.py b/modules/extras.py
index 9a825530f..38b861675 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -98,6 +98,8 @@ def run_extras(extras_mode, image, image_folder, gfpgan_visibility, codeformer_v
outputs.append(image)
+ devices.torch_gc()
+
return outputs, plaintext_to_html(info), ''
diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py
index 44c5dc6ca..b1288f0ca 100644
--- a/modules/gfpgan_model.py
+++ b/modules/gfpgan_model.py
@@ -49,6 +49,7 @@ def gfpgan():
def gfpgan_fix_faces(np_image):
+ global loaded_gfpgan_model
model = gfpgan()
np_image_bgr = np_image[:, :, ::-1]
@@ -56,7 +57,9 @@ def gfpgan_fix_faces(np_image):
np_image = gfpgan_output_bgr[:, :, ::-1]
if shared.opts.face_restoration_unload:
- model.gfpgan.to(devices.cpu)
+ del model
+ loaded_gfpgan_model = None
+ devices.torch_gc()
return np_image
@@ -83,11 +86,7 @@ def setup_gfpgan():
return "GFPGAN"
def restore(self, np_image):
- np_image_bgr = np_image[:, :, ::-1]
- cropped_faces, restored_faces, gfpgan_output_bgr = gfpgan().enhance(np_image_bgr, has_aligned=False, only_center_face=False, paste_back=True)
- np_image = gfpgan_output_bgr[:, :, ::-1]
-
- return np_image
+ return gfpgan_fix_faces(np_image)
shared.face_restorers.append(FaceRestorerGFPGAN())
except Exception:
diff --git a/modules/processing.py b/modules/processing.py
index 4ecdfcd2d..de5cda793 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -12,7 +12,7 @@ import cv2
from skimage import exposure
import modules.sd_hijack
-from modules import devices, prompt_parser, masking
+from modules import devices, prompt_parser, masking, lowvram
from modules.sd_hijack import model_hijack
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.shared import opts, cmd_opts, state
@@ -335,7 +335,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
if state.job_count == -1:
state.job_count = p.n_iter
- for n in range(p.n_iter):
+ for n in range(p.n_iter):
+ with torch.no_grad(), precision_scope("cuda"), ema_scope():
if state.interrupted:
break
@@ -368,22 +369,32 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
x_samples_ddim = p.sd_model.decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
+ del samples_ddim
+
+ if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
+ lowvram.send_everything_to_cpu()
+
+ devices.torch_gc()
+
if opts.filter_nsfw:
import modules.safety as safety
x_samples_ddim = modules.safety.censor_batch(x_samples_ddim)
- for i, x_sample in enumerate(x_samples_ddim):
+ for i, x_sample in enumerate(x_samples_ddim):
+ with torch.no_grad(), precision_scope("cuda"), ema_scope():
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
- if p.restore_faces:
+ if p.restore_faces:
+ with torch.no_grad(), precision_scope("cuda"), ema_scope():
if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration")
- devices.torch_gc()
-
x_sample = modules.face_restoration.restore_faces(x_sample)
+ devices.torch_gc()
+
+ with torch.no_grad(), precision_scope("cuda"), ema_scope():
image = Image.fromarray(x_sample)
if p.color_corrections is not None and i < len(p.color_corrections):
@@ -411,8 +422,13 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
infotexts.append(infotext(n, i))
output_images.append(image)
- state.nextjob()
+ del x_samples_ddim
+ devices.torch_gc()
+
+ state.nextjob()
+
+ with torch.no_grad(), precision_scope("cuda"), ema_scope():
p.color_corrections = None
index_of_first_image = 0
@@ -648,4 +664,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
if self.mask is not None:
samples = samples * self.nmask + self.init_latent * self.mask
+ del x
+ devices.torch_gc()
+
return samples
diff --git a/webui.py b/webui.py
index c70a11c7c..b61a318db 100644
--- a/webui.py
+++ b/webui.py
@@ -22,7 +22,10 @@ import modules.txt2img
import modules.img2img
import modules.swinir as swinir
import modules.sd_models
+from torch.nn.functional import silu
+import ldm
+ldm.modules.diffusionmodules.model.nonlinearity = silu
modules.codeformer_model.setup_codeformer()
modules.gfpgan_model.setup_gfpgan()
From c2d5b29040132c171bc4d77f1f63da972306f22c Mon Sep 17 00:00:00 2001
From: Jairo Correa
Date: Thu, 29 Sep 2022 01:14:54 -0300
Subject: [PATCH 004/460] Move silu to sd_hijack
---
modules/sd_hijack.py | 12 +++---------
webui.py | 3 ---
2 files changed, 3 insertions(+), 12 deletions(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index bfbd07f9a..4bc58fa2b 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -12,6 +12,7 @@ from ldm.util import default
from einops import rearrange
import ldm.modules.attention
import ldm.modules.diffusionmodules.model
+from torch.nn.functional import silu
# see https://github.com/basujindal/stable-diffusion/pull/117 for discussion
@@ -100,14 +101,6 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
return self.to_out(r2)
-def nonlinearity_hijack(x):
- # swish
- t = torch.sigmoid(x)
- x *= t
- del t
-
- return x
-
def cross_attention_attnblock_forward(self, x):
h_ = x
h_ = self.norm(h_)
@@ -245,11 +238,12 @@ class StableDiffusionModelHijack:
m.cond_stage_model = FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self)
self.clip = m.cond_stage_model
+ ldm.modules.diffusionmodules.model.nonlinearity = silu
+
if cmd_opts.opt_split_attention_v1:
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward_v1
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward
- ldm.modules.diffusionmodules.model.nonlinearity = nonlinearity_hijack
ldm.modules.diffusionmodules.model.AttnBlock.forward = cross_attention_attnblock_forward
def flatten(el):
diff --git a/webui.py b/webui.py
index b61a318db..c70a11c7c 100644
--- a/webui.py
+++ b/webui.py
@@ -22,10 +22,7 @@ import modules.txt2img
import modules.img2img
import modules.swinir as swinir
import modules.sd_models
-from torch.nn.functional import silu
-import ldm
-ldm.modules.diffusionmodules.model.nonlinearity = silu
modules.codeformer_model.setup_codeformer()
modules.gfpgan_model.setup_gfpgan()
From e82ea202997cbcd2ab72891cd075d9ba270eb67d Mon Sep 17 00:00:00 2001
From: d8ahazard
Date: Fri, 30 Sep 2022 15:26:18 -0500
Subject: [PATCH 005/460] Optimize model loader
Child classes only get populated to __subclassess__ when they are imported. We don't actually need to import any of them to webui any more, so clean up webUI imports and make sure loader imports children.
Also, fix command line paths not actually being passed to the scalers.
---
modules/modelloader.py | 19 ++++++++++++++++---
webui.py | 13 +++----------
2 files changed, 19 insertions(+), 13 deletions(-)
diff --git a/modules/modelloader.py b/modules/modelloader.py
index 1106aeb7f..b1721671b 100644
--- a/modules/modelloader.py
+++ b/modules/modelloader.py
@@ -4,7 +4,6 @@ import importlib
from urllib.parse import urlparse
from basicsr.utils.download_util import load_file_from_url
-
from modules import shared
from modules.upscaler import Upscaler
from modules.paths import script_path, models_path
@@ -120,16 +119,30 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None):
def load_upscalers():
+ sd = shared.script_path
+ # We can only do this 'magic' method to dynamically load upscalers if they are referenced,
+ # so we'll try to import any _model.py files before looking in __subclasses__
+ modules_dir = os.path.join(sd, "modules")
+ for file in os.listdir(modules_dir):
+ if "_model.py" in file:
+ model_name = file.replace("_model.py", "")
+ full_model = f"modules.{model_name}_model"
+ try:
+ importlib.import_module(full_model)
+ except:
+ pass
datas = []
+ c_o = vars(shared.cmd_opts)
for cls in Upscaler.__subclasses__():
name = cls.__name__
module_name = cls.__module__
module = importlib.import_module(module_name)
class_ = getattr(module, name)
- cmd_name = f"{name.lower().replace('upscaler', '')}-models-path"
+ cmd_name = f"{name.lower().replace('upscaler', '')}_models_path"
opt_string = None
try:
- opt_string = shared.opts.__getattr__(cmd_name)
+ if cmd_name in c_o:
+ opt_string = c_o[cmd_name]
except:
pass
scaler = class_(opt_string)
diff --git a/webui.py b/webui.py
index b8cccd546..ebe39a170 100644
--- a/webui.py
+++ b/webui.py
@@ -1,28 +1,21 @@
import os
-import threading
-
-from modules import devices
-from modules.paths import script_path
import signal
import threading
-import modules.paths
+
import modules.codeformer_model as codeformer
-import modules.esrgan_model as esrgan
-import modules.bsrgan_model as bsrgan
import modules.extras
import modules.face_restoration
import modules.gfpgan_model as gfpgan
import modules.img2img
-import modules.ldsr_model as ldsr
import modules.lowvram
-import modules.realesrgan_model as realesrgan
+import modules.paths
import modules.scripts
import modules.sd_hijack
import modules.sd_models
import modules.shared as shared
-import modules.swinir_model as swinir
import modules.txt2img
import modules.ui
+from modules import devices
from modules import modelloader
from modules.paths import script_path
from modules.shared import cmd_opts
From 8deae077004f0332ca607fc3a5d568b1a4705bec Mon Sep 17 00:00:00 2001
From: d8ahazard
Date: Fri, 30 Sep 2022 15:28:37 -0500
Subject: [PATCH 006/460] Add ScuNET DeNoiser/Upscaler
Q&D Implementation of ScuNET, thanks to our handy model loader. :P
https://github.com/cszn/SCUNet
---
modules/scunet_model.py | 90 ++++++++++++
modules/scunet_model_arch.py | 265 +++++++++++++++++++++++++++++++++++
modules/shared.py | 1 +
3 files changed, 356 insertions(+)
create mode 100644 modules/scunet_model.py
create mode 100644 modules/scunet_model_arch.py
diff --git a/modules/scunet_model.py b/modules/scunet_model.py
new file mode 100644
index 000000000..7987ac145
--- /dev/null
+++ b/modules/scunet_model.py
@@ -0,0 +1,90 @@
+import os.path
+import sys
+import traceback
+
+import PIL.Image
+import numpy as np
+import torch
+from basicsr.utils.download_util import load_file_from_url
+
+import modules.upscaler
+from modules import shared, modelloader
+from modules.paths import models_path
+from modules.scunet_model_arch import SCUNet as net
+
+
+class UpscalerScuNET(modules.upscaler.Upscaler):
+ def __init__(self, dirname):
+ self.name = "ScuNET"
+ self.model_path = os.path.join(models_path, self.name)
+ self.model_name = "ScuNET GAN"
+ self.model_name2 = "ScuNET PSNR"
+ self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_gan.pth"
+ self.model_url2 = "https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_psnr.pth"
+ self.user_path = dirname
+ super().__init__()
+ model_paths = self.find_models(ext_filter=[".pth"])
+ scalers = []
+ add_model2 = True
+ for file in model_paths:
+ if "http" in file:
+ name = self.model_name
+ else:
+ name = modelloader.friendly_name(file)
+ if name == self.model_name2 or file == self.model_url2:
+ add_model2 = False
+ try:
+ scaler_data = modules.upscaler.UpscalerData(name, file, self, 4)
+ scalers.append(scaler_data)
+ except Exception:
+ print(f"Error loading ScuNET model: {file}", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+ if add_model2:
+ scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self)
+ scalers.append(scaler_data2)
+ self.scalers = scalers
+
+ def do_upscale(self, img: PIL.Image, selected_file):
+ torch.cuda.empty_cache()
+
+ model = self.load_model(selected_file)
+ if model is None:
+ return img
+
+ device = shared.device
+ img = np.array(img)
+ img = img[:, :, ::-1]
+ img = np.moveaxis(img, 2, 0) / 255
+ img = torch.from_numpy(img).float()
+ img = img.unsqueeze(0).to(shared.device)
+
+ img = img.to(device)
+ with torch.no_grad():
+ output = model(img)
+ output = output.squeeze().float().cpu().clamp_(0, 1).numpy()
+ output = 255. * np.moveaxis(output, 0, 2)
+ output = output.astype(np.uint8)
+ output = output[:, :, ::-1]
+ torch.cuda.empty_cache()
+ return PIL.Image.fromarray(output, 'RGB')
+
+ def load_model(self, path: str):
+ device = shared.device
+ if "http" in path:
+ filename = load_file_from_url(url=self.model_url, model_dir=self.model_path, file_name="%s.pth" % self.name,
+ progress=True)
+ else:
+ filename = path
+ if not os.path.exists(os.path.join(self.model_path, filename)) or filename is None:
+ print(f"ScuNET: Unable to load model from {filename}", file=sys.stderr)
+ return None
+
+ model = net(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64)
+ model.load_state_dict(torch.load(filename), strict=True)
+ model.eval()
+ for k, v in model.named_parameters():
+ v.requires_grad = False
+ model = model.to(device)
+
+ return model
+
diff --git a/modules/scunet_model_arch.py b/modules/scunet_model_arch.py
new file mode 100644
index 000000000..972a2639a
--- /dev/null
+++ b/modules/scunet_model_arch.py
@@ -0,0 +1,265 @@
+# -*- coding: utf-8 -*-
+import numpy as np
+import torch
+import torch.nn as nn
+from einops import rearrange
+from einops.layers.torch import Rearrange
+from timm.models.layers import trunc_normal_, DropPath
+
+
+class WMSA(nn.Module):
+ """ Self-attention module in Swin Transformer
+ """
+
+ def __init__(self, input_dim, output_dim, head_dim, window_size, type):
+ super(WMSA, self).__init__()
+ self.input_dim = input_dim
+ self.output_dim = output_dim
+ self.head_dim = head_dim
+ self.scale = self.head_dim ** -0.5
+ self.n_heads = input_dim // head_dim
+ self.window_size = window_size
+ self.type = type
+ self.embedding_layer = nn.Linear(self.input_dim, 3 * self.input_dim, bias=True)
+
+ self.relative_position_params = nn.Parameter(
+ torch.zeros((2 * window_size - 1) * (2 * window_size - 1), self.n_heads))
+
+ self.linear = nn.Linear(self.input_dim, self.output_dim)
+
+ trunc_normal_(self.relative_position_params, std=.02)
+ self.relative_position_params = torch.nn.Parameter(
+ self.relative_position_params.view(2 * window_size - 1, 2 * window_size - 1, self.n_heads).transpose(1,
+ 2).transpose(
+ 0, 1))
+
+ def generate_mask(self, h, w, p, shift):
+ """ generating the mask of SW-MSA
+ Args:
+ shift: shift parameters in CyclicShift.
+ Returns:
+ attn_mask: should be (1 1 w p p),
+ """
+ # supporting sqaure.
+ attn_mask = torch.zeros(h, w, p, p, p, p, dtype=torch.bool, device=self.relative_position_params.device)
+ if self.type == 'W':
+ return attn_mask
+
+ s = p - shift
+ attn_mask[-1, :, :s, :, s:, :] = True
+ attn_mask[-1, :, s:, :, :s, :] = True
+ attn_mask[:, -1, :, :s, :, s:] = True
+ attn_mask[:, -1, :, s:, :, :s] = True
+ attn_mask = rearrange(attn_mask, 'w1 w2 p1 p2 p3 p4 -> 1 1 (w1 w2) (p1 p2) (p3 p4)')
+ return attn_mask
+
+ def forward(self, x):
+ """ Forward pass of Window Multi-head Self-attention module.
+ Args:
+ x: input tensor with shape of [b h w c];
+ attn_mask: attention mask, fill -inf where the value is True;
+ Returns:
+ output: tensor shape [b h w c]
+ """
+ if self.type != 'W': x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2))
+ x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size)
+ h_windows = x.size(1)
+ w_windows = x.size(2)
+ # sqaure validation
+ # assert h_windows == w_windows
+
+ x = rearrange(x, 'b w1 w2 p1 p2 c -> b (w1 w2) (p1 p2) c', p1=self.window_size, p2=self.window_size)
+ qkv = self.embedding_layer(x)
+ q, k, v = rearrange(qkv, 'b nw np (threeh c) -> threeh b nw np c', c=self.head_dim).chunk(3, dim=0)
+ sim = torch.einsum('hbwpc,hbwqc->hbwpq', q, k) * self.scale
+ # Adding learnable relative embedding
+ sim = sim + rearrange(self.relative_embedding(), 'h p q -> h 1 1 p q')
+ # Using Attn Mask to distinguish different subwindows.
+ if self.type != 'W':
+ attn_mask = self.generate_mask(h_windows, w_windows, self.window_size, shift=self.window_size // 2)
+ sim = sim.masked_fill_(attn_mask, float("-inf"))
+
+ probs = nn.functional.softmax(sim, dim=-1)
+ output = torch.einsum('hbwij,hbwjc->hbwic', probs, v)
+ output = rearrange(output, 'h b w p c -> b w p (h c)')
+ output = self.linear(output)
+ output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size)
+
+ if self.type != 'W': output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2),
+ dims=(1, 2))
+ return output
+
+ def relative_embedding(self):
+ cord = torch.tensor(np.array([[i, j] for i in range(self.window_size) for j in range(self.window_size)]))
+ relation = cord[:, None, :] - cord[None, :, :] + self.window_size - 1
+ # negative is allowed
+ return self.relative_position_params[:, relation[:, :, 0].long(), relation[:, :, 1].long()]
+
+
+class Block(nn.Module):
+ def __init__(self, input_dim, output_dim, head_dim, window_size, drop_path, type='W', input_resolution=None):
+ """ SwinTransformer Block
+ """
+ super(Block, self).__init__()
+ self.input_dim = input_dim
+ self.output_dim = output_dim
+ assert type in ['W', 'SW']
+ self.type = type
+ if input_resolution <= window_size:
+ self.type = 'W'
+
+ self.ln1 = nn.LayerNorm(input_dim)
+ self.msa = WMSA(input_dim, input_dim, head_dim, window_size, self.type)
+ self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
+ self.ln2 = nn.LayerNorm(input_dim)
+ self.mlp = nn.Sequential(
+ nn.Linear(input_dim, 4 * input_dim),
+ nn.GELU(),
+ nn.Linear(4 * input_dim, output_dim),
+ )
+
+ def forward(self, x):
+ x = x + self.drop_path(self.msa(self.ln1(x)))
+ x = x + self.drop_path(self.mlp(self.ln2(x)))
+ return x
+
+
+class ConvTransBlock(nn.Module):
+ def __init__(self, conv_dim, trans_dim, head_dim, window_size, drop_path, type='W', input_resolution=None):
+ """ SwinTransformer and Conv Block
+ """
+ super(ConvTransBlock, self).__init__()
+ self.conv_dim = conv_dim
+ self.trans_dim = trans_dim
+ self.head_dim = head_dim
+ self.window_size = window_size
+ self.drop_path = drop_path
+ self.type = type
+ self.input_resolution = input_resolution
+
+ assert self.type in ['W', 'SW']
+ if self.input_resolution <= self.window_size:
+ self.type = 'W'
+
+ self.trans_block = Block(self.trans_dim, self.trans_dim, self.head_dim, self.window_size, self.drop_path,
+ self.type, self.input_resolution)
+ self.conv1_1 = nn.Conv2d(self.conv_dim + self.trans_dim, self.conv_dim + self.trans_dim, 1, 1, 0, bias=True)
+ self.conv1_2 = nn.Conv2d(self.conv_dim + self.trans_dim, self.conv_dim + self.trans_dim, 1, 1, 0, bias=True)
+
+ self.conv_block = nn.Sequential(
+ nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False),
+ nn.ReLU(True),
+ nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False)
+ )
+
+ def forward(self, x):
+ conv_x, trans_x = torch.split(self.conv1_1(x), (self.conv_dim, self.trans_dim), dim=1)
+ conv_x = self.conv_block(conv_x) + conv_x
+ trans_x = Rearrange('b c h w -> b h w c')(trans_x)
+ trans_x = self.trans_block(trans_x)
+ trans_x = Rearrange('b h w c -> b c h w')(trans_x)
+ res = self.conv1_2(torch.cat((conv_x, trans_x), dim=1))
+ x = x + res
+
+ return x
+
+
+class SCUNet(nn.Module):
+ # def __init__(self, in_nc=3, config=[2, 2, 2, 2, 2, 2, 2], dim=64, drop_path_rate=0.0, input_resolution=256):
+ def __init__(self, in_nc=3, config=None, dim=64, drop_path_rate=0.0, input_resolution=256):
+ super(SCUNet, self).__init__()
+ if config is None:
+ config = [2, 2, 2, 2, 2, 2, 2]
+ self.config = config
+ self.dim = dim
+ self.head_dim = 32
+ self.window_size = 8
+
+ # drop path rate for each layer
+ dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(config))]
+
+ self.m_head = [nn.Conv2d(in_nc, dim, 3, 1, 1, bias=False)]
+
+ begin = 0
+ self.m_down1 = [ConvTransBlock(dim // 2, dim // 2, self.head_dim, self.window_size, dpr[i + begin],
+ 'W' if not i % 2 else 'SW', input_resolution)
+ for i in range(config[0])] + \
+ [nn.Conv2d(dim, 2 * dim, 2, 2, 0, bias=False)]
+
+ begin += config[0]
+ self.m_down2 = [ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i + begin],
+ 'W' if not i % 2 else 'SW', input_resolution // 2)
+ for i in range(config[1])] + \
+ [nn.Conv2d(2 * dim, 4 * dim, 2, 2, 0, bias=False)]
+
+ begin += config[1]
+ self.m_down3 = [ConvTransBlock(2 * dim, 2 * dim, self.head_dim, self.window_size, dpr[i + begin],
+ 'W' if not i % 2 else 'SW', input_resolution // 4)
+ for i in range(config[2])] + \
+ [nn.Conv2d(4 * dim, 8 * dim, 2, 2, 0, bias=False)]
+
+ begin += config[2]
+ self.m_body = [ConvTransBlock(4 * dim, 4 * dim, self.head_dim, self.window_size, dpr[i + begin],
+ 'W' if not i % 2 else 'SW', input_resolution // 8)
+ for i in range(config[3])]
+
+ begin += config[3]
+ self.m_up3 = [nn.ConvTranspose2d(8 * dim, 4 * dim, 2, 2, 0, bias=False), ] + \
+ [ConvTransBlock(2 * dim, 2 * dim, self.head_dim, self.window_size, dpr[i + begin],
+ 'W' if not i % 2 else 'SW', input_resolution // 4)
+ for i in range(config[4])]
+
+ begin += config[4]
+ self.m_up2 = [nn.ConvTranspose2d(4 * dim, 2 * dim, 2, 2, 0, bias=False), ] + \
+ [ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i + begin],
+ 'W' if not i % 2 else 'SW', input_resolution // 2)
+ for i in range(config[5])]
+
+ begin += config[5]
+ self.m_up1 = [nn.ConvTranspose2d(2 * dim, dim, 2, 2, 0, bias=False), ] + \
+ [ConvTransBlock(dim // 2, dim // 2, self.head_dim, self.window_size, dpr[i + begin],
+ 'W' if not i % 2 else 'SW', input_resolution)
+ for i in range(config[6])]
+
+ self.m_tail = [nn.Conv2d(dim, in_nc, 3, 1, 1, bias=False)]
+
+ self.m_head = nn.Sequential(*self.m_head)
+ self.m_down1 = nn.Sequential(*self.m_down1)
+ self.m_down2 = nn.Sequential(*self.m_down2)
+ self.m_down3 = nn.Sequential(*self.m_down3)
+ self.m_body = nn.Sequential(*self.m_body)
+ self.m_up3 = nn.Sequential(*self.m_up3)
+ self.m_up2 = nn.Sequential(*self.m_up2)
+ self.m_up1 = nn.Sequential(*self.m_up1)
+ self.m_tail = nn.Sequential(*self.m_tail)
+ # self.apply(self._init_weights)
+
+ def forward(self, x0):
+
+ h, w = x0.size()[-2:]
+ paddingBottom = int(np.ceil(h / 64) * 64 - h)
+ paddingRight = int(np.ceil(w / 64) * 64 - w)
+ x0 = nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x0)
+
+ x1 = self.m_head(x0)
+ x2 = self.m_down1(x1)
+ x3 = self.m_down2(x2)
+ x4 = self.m_down3(x3)
+ x = self.m_body(x4)
+ x = self.m_up3(x + x4)
+ x = self.m_up2(x + x3)
+ x = self.m_up1(x + x2)
+ x = self.m_tail(x + x1)
+
+ x = x[..., :h, :w]
+
+ return x
+
+ def _init_weights(self, m):
+ if isinstance(m, nn.Linear):
+ trunc_normal_(m.weight, std=.02)
+ if m.bias is not None:
+ nn.init.constant_(m.bias, 0)
+ elif isinstance(m, nn.LayerNorm):
+ nn.init.constant_(m.bias, 0)
+ nn.init.constant_(m.weight, 1.0)
\ No newline at end of file
diff --git a/modules/shared.py b/modules/shared.py
index 8428c7a38..a48b995ad 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -40,6 +40,7 @@ parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory wi
parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(model_path, 'ESRGAN'))
parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(model_path, 'BSRGAN'))
parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(model_path, 'RealESRGAN'))
+parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(model_path, 'ScuNET'))
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(model_path, 'SwinIR'))
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(model_path, 'LDSR'))
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
From abdbf1de646f007b6d76cfb3f416fdfaadb57903 Mon Sep 17 00:00:00 2001
From: Liam
Date: Thu, 29 Sep 2022 14:40:47 -0400
Subject: [PATCH 007/460] token counters now update when roll artist and style
buttons are pressed
https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/1194#issuecomment-1261203893
---
javascript/ui.js | 28 ++++++++++++++++++++++------
modules/ui.py | 6 +++++-
2 files changed, 27 insertions(+), 7 deletions(-)
diff --git a/javascript/ui.js b/javascript/ui.js
index bfe024108..88fd45ae9 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -199,12 +199,21 @@ let txt2img_textarea, img2img_textarea = undefined;
let wait_time = 800
let token_timeout;
-function submit_prompt(event, generate_button_id) {
- if (event.altKey && event.keyCode === 13) {
- event.preventDefault();
- gradioApp().getElementById(generate_button_id).click();
- return;
- }
+function roll_artist_txt2img(prompt_text) {
+ update_token_counter("txt2img_token_button")
+ return prompt_text;
+}
+function roll_artist_img2img(prompt_text) {
+ update_token_counter("img2img_token_button")
+ return prompt_text;
+}
+function update_style_txt2img(prompt_text, negative_prompt, style1, style2) {
+ update_token_counter("txt2img_token_button")
+ return [prompt_text, negative_prompt, style1, style2]
+}
+function update_style_img2img(prompt_text, negative_prompt, style1, style2) {
+ update_token_counter("img2img_token_button")
+ return [prompt_text, negative_prompt, style1, style2]
}
function update_token_counter(button_id) {
@@ -212,3 +221,10 @@ function update_token_counter(button_id) {
clearTimeout(token_timeout);
token_timeout = setTimeout(() => gradioApp().getElementById(button_id)?.click(), wait_time);
}
+function submit_prompt(event, generate_button_id) {
+ if (event.altKey && event.keyCode === 13) {
+ event.preventDefault();
+ gradioApp().getElementById(generate_button_id).click();
+ return;
+ }
+}
\ No newline at end of file
diff --git a/modules/ui.py b/modules/ui.py
index 15572bb0a..5eea18606 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -539,6 +539,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
roll.click(
fn=roll_artist,
+ _js="roll_artist_txt2img",
inputs=[
txt2img_prompt,
],
@@ -743,6 +744,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
roll.click(
fn=roll_artist,
+ _js="roll_artist_img2img",
inputs=[
img2img_prompt,
],
@@ -753,6 +755,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
+ style_js_funcs = ["update_style_txt2img", "update_style_img2img"]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
@@ -764,9 +767,10 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2],
)
- for button, (prompt, negative_prompt), (style1, style2) in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns):
+ for button, (prompt, negative_prompt), (style1, style2), js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs):
button.click(
fn=apply_styles,
+ _js=js_func,
inputs=[prompt, negative_prompt, style1, style2],
outputs=[prompt, negative_prompt, style1, style2],
)
From ff8dc1908af088d0ed43fb85baad662733c5ca9c Mon Sep 17 00:00:00 2001
From: Liam
Date: Thu, 29 Sep 2022 15:47:06 -0400
Subject: [PATCH 008/460] fixed token counter for prompt editing
---
modules/ui.py | 20 +++++++++++++-------
1 file changed, 13 insertions(+), 7 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index 5eea18606..6bf28562c 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -11,6 +11,7 @@ import time
import traceback
import platform
import subprocess as sp
+from functools import reduce
import numpy as np
import torch
@@ -32,6 +33,7 @@ import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
+from modules.prompt_parser import get_learned_conditioning_prompt_schedules
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
@@ -345,8 +347,11 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info:
outputs=[seed, dummy_component]
)
-def update_token_counter(text):
- tokens, token_count, max_length = model_hijack.tokenize(text)
+def update_token_counter(text, steps):
+ prompt_schedules = get_learned_conditioning_prompt_schedules([text], steps)
+ flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules)
+ prompts = [prompt_text for step,prompt_text in flat_prompts]
+ tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1])
style_class = ' class="red"' if (token_count > max_length) else ""
return f"{token_count}/{max_length}"
@@ -364,8 +369,7 @@ def create_toprow(is_img2img):
roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0)
paste = gr.Button(value=paste_symbol, elem_id="paste")
token_counter = gr.HTML(value="", elem_id=f"{id_part}_token_counter")
- hidden_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
- hidden_button.click(fn=update_token_counter, inputs=[prompt], outputs=[token_counter])
+ token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button")
with gr.Column(scale=10, elem_id="style_pos_col"):
prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1)
@@ -396,7 +400,7 @@ def create_toprow(is_img2img):
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
- return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste
+ return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste, token_counter, token_button
def setup_progressbar(progressbar, preview, id_part):
@@ -419,7 +423,7 @@ def setup_progressbar(progressbar, preview, id_part):
def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
- txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste = create_toprow(is_img2img=False)
+ txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
@@ -568,9 +572,10 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
(hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)),
]
modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt)
+ token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as img2img_interface:
- img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste = create_toprow(is_img2img=True)
+ img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
with gr.Column(scale=1):
@@ -793,6 +798,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
(denoising_strength, "Denoising strength"),
]
modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt)
+ token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as extras_interface:
with gr.Row().style(equal_height=False):
From 3c6a049fc3c6b54ada3736710a7e86663ea7f3d9 Mon Sep 17 00:00:00 2001
From: Liam
Date: Fri, 30 Sep 2022 12:12:44 -0400
Subject: [PATCH 009/460] consolidated token counter functions
---
javascript/ui.js | 21 +++++++++------------
modules/ui.py | 6 +++---
2 files changed, 12 insertions(+), 15 deletions(-)
diff --git a/javascript/ui.js b/javascript/ui.js
index 88fd45ae9..f94ed081d 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -199,21 +199,18 @@ let txt2img_textarea, img2img_textarea = undefined;
let wait_time = 800
let token_timeout;
-function roll_artist_txt2img(prompt_text) {
+function update_txt2img_tokens(...args) {
update_token_counter("txt2img_token_button")
- return prompt_text;
+ if (args.length == 2)
+ return args[0]
+ return args;
}
-function roll_artist_img2img(prompt_text) {
+
+function update_img2img_tokens(...args) {
update_token_counter("img2img_token_button")
- return prompt_text;
-}
-function update_style_txt2img(prompt_text, negative_prompt, style1, style2) {
- update_token_counter("txt2img_token_button")
- return [prompt_text, negative_prompt, style1, style2]
-}
-function update_style_img2img(prompt_text, negative_prompt, style1, style2) {
- update_token_counter("img2img_token_button")
- return [prompt_text, negative_prompt, style1, style2]
+ if (args.length == 2)
+ return args[0]
+ return args;
}
function update_token_counter(button_id) {
diff --git a/modules/ui.py b/modules/ui.py
index 6bf28562c..40c089841 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -543,7 +543,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
roll.click(
fn=roll_artist,
- _js="roll_artist_txt2img",
+ _js="update_txt2img_tokens",
inputs=[
txt2img_prompt,
],
@@ -749,7 +749,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
roll.click(
fn=roll_artist,
- _js="roll_artist_img2img",
+ _js="update_img2img_tokens",
inputs=[
img2img_prompt,
],
@@ -760,7 +760,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)]
style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)]
- style_js_funcs = ["update_style_txt2img", "update_style_img2img"]
+ style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"]
for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts):
button.click(
From bdaa36c84470adbdce3e98c01a69af5e95adfb02 Mon Sep 17 00:00:00 2001
From: brkirch
Date: Fri, 30 Sep 2022 23:53:25 -0400
Subject: [PATCH 010/460] When device is MPS, use CPU for GFPGAN instead
GFPGAN will not work if the device is MPS, so default to CPU instead.
---
modules/devices.py | 2 +-
modules/gfpgan_model.py | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/modules/devices.py b/modules/devices.py
index 07bb23397..08bb26d6f 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -34,7 +34,7 @@ errors.run(enable_tf32, "Enabling TF32")
device = get_optimal_device()
-device_codeformer = cpu if has_mps else device
+device_gfpgan = device_codeformer = cpu if device.type == 'mps' else device
def randn(seed, shape):
diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py
index bb30d7330..fcd8544a5 100644
--- a/modules/gfpgan_model.py
+++ b/modules/gfpgan_model.py
@@ -21,7 +21,7 @@ def gfpgann():
global loaded_gfpgan_model
global model_path
if loaded_gfpgan_model is not None:
- loaded_gfpgan_model.gfpgan.to(shared.device)
+ loaded_gfpgan_model.gfpgan.to(devices.device_gfpgan)
return loaded_gfpgan_model
if gfpgan_constructor is None:
@@ -36,8 +36,8 @@ def gfpgann():
else:
print("Unable to load gfpgan model!")
return None
- model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None)
- model.gfpgan.to(shared.device)
+ model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=devices.device_gfpgan)
+ model.gfpgan.to(devices.device_gfpgan)
loaded_gfpgan_model = model
return model
From 4c2478a68a4f11959fe4887d38e0436eac19f97e Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 1 Oct 2022 18:30:53 +0100
Subject: [PATCH 011/460] add script reload method
---
modules/scripts.py | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/modules/scripts.py b/modules/scripts.py
index 7c3bd5e74..3c14b9e32 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -165,3 +165,12 @@ class ScriptRunner:
scripts_txt2img = ScriptRunner()
scripts_img2img = ScriptRunner()
+
+def reload_scripts(basedir):
+ global scripts_txt2img,scripts_img2img
+
+ scripts_data.clear()
+ load_scripts(basedir)
+
+ scripts_txt2img = ScriptRunner()
+ scripts_img2img = ScriptRunner()
From 95f35d04ab1636e08f69ca9c0ae2446714870e80 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 1 Oct 2022 18:31:58 +0100
Subject: [PATCH 012/460] Host busy thread, check for reload
---
webui.py | 46 +++++++++++++++++++++++++++++++---------------
1 file changed, 31 insertions(+), 15 deletions(-)
diff --git a/webui.py b/webui.py
index b8cccd546..4948c394f 100644
--- a/webui.py
+++ b/webui.py
@@ -86,22 +86,38 @@ def webui():
signal.signal(signal.SIGINT, sigint_handler)
- demo = modules.ui.create_ui(
- txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img),
- img2img=wrap_gradio_gpu_call(modules.img2img.img2img),
- run_extras=wrap_gradio_gpu_call(modules.extras.run_extras),
- run_pnginfo=modules.extras.run_pnginfo,
- run_modelmerger=modules.extras.run_modelmerger
- )
+ while 1:
- demo.launch(
- share=cmd_opts.share,
- server_name="0.0.0.0" if cmd_opts.listen else None,
- server_port=cmd_opts.port,
- debug=cmd_opts.gradio_debug,
- auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
- inbrowser=cmd_opts.autolaunch,
- )
+ demo = modules.ui.create_ui(
+ txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img),
+ img2img=wrap_gradio_gpu_call(modules.img2img.img2img),
+ run_extras=wrap_gradio_gpu_call(modules.extras.run_extras),
+ run_pnginfo=modules.extras.run_pnginfo,
+ run_modelmerger=modules.extras.run_modelmerger
+ )
+
+
+ demo.launch(
+ share=cmd_opts.share,
+ server_name="0.0.0.0" if cmd_opts.listen else None,
+ server_port=cmd_opts.port,
+ debug=cmd_opts.gradio_debug,
+ auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
+ inbrowser=cmd_opts.autolaunch,
+ prevent_thread_lock=True
+ )
+
+ while 1:
+ time.sleep(0.5)
+ if getattr(demo,'do_restart',False):
+ time.sleep(0.5)
+ demo.close()
+ time.sleep(0.5)
+ break
+
+ print('Reloading Scripts')
+ modules.scripts.reload_scripts(os.path.join(script_path, "scripts"))
+ print('Restarting Gradio')
if __name__ == "__main__":
From 4f8490cd5630823ac44de8b5c5e4325bdbbea7fa Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 1 Oct 2022 18:33:31 +0100
Subject: [PATCH 013/460] add restart button
---
modules/ui.py | 15 ++++++++++++++-
1 file changed, 14 insertions(+), 1 deletion(-)
diff --git a/modules/ui.py b/modules/ui.py
index 15572bb0a..ec6aaa288 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1002,6 +1002,17 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
_js='function(){}'
)
+ def request_restart():
+ settings_interface.gradio_ref.do_restart = True
+
+ restart_gradio = gr.Button(value='Restart Gradio and Refresh Scripts')
+ restart_gradio.click(
+ fn=request_restart,
+ inputs=[],
+ outputs=[],
+ _js='function(){document.body.innerHTML=\'Reloading
\';setTimeout(function(){location.reload()},2000)}'
+ )
+
if column is not None:
column.__exit__()
@@ -1026,7 +1037,9 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
css += css_hide_progressbar
with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo:
-
+
+ settings_interface.gradio_ref = demo
+
with gr.Tabs() as tabs:
for interface, label, ifid in interfaces:
with gr.TabItem(label, id=ifid):
From 121ed7d36febe94995774973b5edc1ba2ba84aad Mon Sep 17 00:00:00 2001
From: Alexandre Simard
Date: Sat, 1 Oct 2022 14:04:20 -0400
Subject: [PATCH 014/460] Add progress bar for SwinIR in cmd
I do not know how to add them to the UI...
---
modules/swinir_model.py | 25 ++++++++++++++-----------
webui-user.bat | 2 +-
2 files changed, 15 insertions(+), 12 deletions(-)
diff --git a/modules/swinir_model.py b/modules/swinir_model.py
index 41fda5a7c..9bd454c69 100644
--- a/modules/swinir_model.py
+++ b/modules/swinir_model.py
@@ -5,6 +5,7 @@ import numpy as np
import torch
from PIL import Image
from basicsr.utils.download_util import load_file_from_url
+from tqdm import tqdm
from modules import modelloader
from modules.paths import models_path
@@ -122,18 +123,20 @@ def inference(img, model, tile, tile_overlap, window_size, scale):
E = torch.zeros(b, c, h * sf, w * sf, dtype=torch.half, device=device).type_as(img)
W = torch.zeros_like(E, dtype=torch.half, device=device)
- for h_idx in h_idx_list:
- for w_idx in w_idx_list:
- in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile]
- out_patch = model(in_patch)
- out_patch_mask = torch.ones_like(out_patch)
+ with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="SwinIR tiles") as pbar:
+ for h_idx in h_idx_list:
+ for w_idx in w_idx_list:
+ in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile]
+ out_patch = model(in_patch)
+ out_patch_mask = torch.ones_like(out_patch)
- E[
- ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
- ].add_(out_patch)
- W[
- ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
- ].add_(out_patch_mask)
+ E[
+ ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
+ ].add_(out_patch)
+ W[
+ ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf
+ ].add_(out_patch_mask)
+ pbar.update(1)
output = E.div_(W)
return output
diff --git a/webui-user.bat b/webui-user.bat
index e5a257bef..5c7789535 100644
--- a/webui-user.bat
+++ b/webui-user.bat
@@ -3,6 +3,6 @@
set PYTHON=
set GIT=
set VENV_DIR=
-set COMMANDLINE_ARGS=
+set COMMANDLINE_ARGS=--autolaunch
call webui.bat
From b8a2b0453b62e4e99d0e5c049313402bc79056b5 Mon Sep 17 00:00:00 2001
From: Alexandre Simard
Date: Sat, 1 Oct 2022 14:07:20 -0400
Subject: [PATCH 015/460] Set launch options to default
---
webui-user.bat | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/webui-user.bat b/webui-user.bat
index 5c7789535..e5a257bef 100644
--- a/webui-user.bat
+++ b/webui-user.bat
@@ -3,6 +3,6 @@
set PYTHON=
set GIT=
set VENV_DIR=
-set COMMANDLINE_ARGS=--autolaunch
+set COMMANDLINE_ARGS=
call webui.bat
From a9044475c06204deb886d2a69467d0d3a9f5c9be Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 1 Oct 2022 21:47:42 +0100
Subject: [PATCH 016/460] add time import
---
webui.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/webui.py b/webui.py
index 4948c394f..e2c4c2baa 100644
--- a/webui.py
+++ b/webui.py
@@ -1,5 +1,6 @@
import os
import threading
+import time
from modules import devices
from modules.paths import script_path
From afaa03c5fd05f48ed9c9f15558ea6f0bc4f61628 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 1 Oct 2022 22:43:45 +0100
Subject: [PATCH 017/460] add redefinition guard to
gradio_routes_templates_response
---
modules/ui.py | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index ec6aaa288..fd057916e 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1219,12 +1219,13 @@ for filename in sorted(os.listdir(jsdir)):
javascript += f"\n"
-def template_response(*args, **kwargs):
- res = gradio_routes_templates_response(*args, **kwargs)
- res.body = res.body.replace(b'', f'{javascript}'.encode("utf8"))
- res.init_headers()
- return res
+if 'gradio_routes_templates_response' not in globals():
+ def template_response(*args, **kwargs):
+ res = gradio_routes_templates_response(*args, **kwargs)
+ res.body = res.body.replace(b'', f'{javascript}'.encode("utf8"))
+ res.init_headers()
+ return res
+ gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
+ gradio.routes.templates.TemplateResponse = template_response
-gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
-gradio.routes.templates.TemplateResponse = template_response
From 30f2e3565840544dd66470c6ef216ec664db6432 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 1 Oct 2022 22:50:03 +0100
Subject: [PATCH 018/460] add importlib.reload
---
webui.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/webui.py b/webui.py
index e2c4c2baa..ab200045a 100644
--- a/webui.py
+++ b/webui.py
@@ -1,7 +1,7 @@
import os
import threading
import time
-
+import importlib
from modules import devices
from modules.paths import script_path
import signal
@@ -116,8 +116,10 @@ def webui():
time.sleep(0.5)
break
- print('Reloading Scripts')
+ print('Reloading Custom Scripts')
modules.scripts.reload_scripts(os.path.join(script_path, "scripts"))
+ print('Reloading modules: modules.ui')
+ importlib.reload(modules.ui)
print('Restarting Gradio')
From 6048002dade91b82b1ce9fea3c6ff5b5c1f8c990 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 1 Oct 2022 23:10:07 +0100
Subject: [PATCH 019/460] Add scope warning to refresh button
---
modules/ui.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/ui.py b/modules/ui.py
index fd057916e..72846a122 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1005,7 +1005,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
def request_restart():
settings_interface.gradio_ref.do_restart = True
- restart_gradio = gr.Button(value='Restart Gradio and Refresh Scripts')
+ restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary')
restart_gradio.click(
fn=request_restart,
inputs=[],
From 027c5aae5546ff3650347cb3c2b87df4415ab900 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 1 Oct 2022 23:29:26 +0100
Subject: [PATCH 020/460] update reloading message style
---
modules/ui.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/ui.py b/modules/ui.py
index 72846a122..7b2359c20 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1010,7 +1010,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
fn=request_restart,
inputs=[],
outputs=[],
- _js='function(){document.body.innerHTML=\'Reloading
\';setTimeout(function(){location.reload()},2000)}'
+ _js='function(){document.body.innerHTML=\'Reloading...
\';setTimeout(function(){location.reload()},2000)}'
)
if column is not None:
From 55b046312c51bb7b2329d3b5b7f1c05956f821bf Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sun, 2 Oct 2022 00:12:49 +0100
Subject: [PATCH 021/460] move JavaScript into ui.js
---
javascript/ui.js | 5 +++++
1 file changed, 5 insertions(+)
diff --git a/javascript/ui.js b/javascript/ui.js
index bfe024108..e8f289b44 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -212,3 +212,8 @@ function update_token_counter(button_id) {
clearTimeout(token_timeout);
token_timeout = setTimeout(() => gradioApp().getElementById(button_id)?.click(), wait_time);
}
+
+function restart_reload(){
+ document.body.innerHTML='Reloading...
';
+ setTimeout(function(){location.reload()},2000)
+}
From 0aa354bd5e811e2b41b17a3052cf5d4c8190d533 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sun, 2 Oct 2022 00:13:47 +0100
Subject: [PATCH 022/460] remove styling from python side
---
modules/ui.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/ui.py b/modules/ui.py
index 7b2359c20..cb859ac45 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1010,7 +1010,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
fn=request_restart,
inputs=[],
outputs=[],
- _js='function(){document.body.innerHTML=\'Reloading...
\';setTimeout(function(){location.reload()},2000)}'
+ _js='function(){restart_reload()}'
)
if column is not None:
From cf33268d686986a24f2e04eb615f01ed53bfe308 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sun, 2 Oct 2022 01:18:42 +0100
Subject: [PATCH 023/460] add script body only refresh
---
modules/scripts.py | 23 +++++++++++++++++++++++
1 file changed, 23 insertions(+)
diff --git a/modules/scripts.py b/modules/scripts.py
index 3c14b9e32..788397f53 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -162,10 +162,33 @@ class ScriptRunner:
return processed
+ def reload_sources(self):
+ for si,script in list(enumerate(self.scripts)):
+ with open(script.filename, "r", encoding="utf8") as file:
+ args_from = script.args_from
+ args_to = script.args_to
+ filename = script.filename
+ text = file.read()
+
+ from types import ModuleType
+ compiled = compile(text, filename, 'exec')
+ module = ModuleType(script.filename)
+ exec(compiled, module.__dict__)
+
+ for key, script_class in module.__dict__.items():
+ if type(script_class) == type and issubclass(script_class, Script):
+ self.scripts[si] = script_class()
+ self.scripts[si].filename = filename
+ self.scripts[si].args_from = args_from
+ self.scripts[si].args_to = args_to
scripts_txt2img = ScriptRunner()
scripts_img2img = ScriptRunner()
+def reload_script_body_only():
+ scripts_txt2img.reload_sources()
+ scripts_img2img.reload_sources()
+
def reload_scripts(basedir):
global scripts_txt2img,scripts_img2img
From 07e40ad7f23472fc1c781fe1cc6c1ee403413918 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sun, 2 Oct 2022 01:19:55 +0100
Subject: [PATCH 024/460] add custom script body only refresh option
---
modules/ui.py | 11 +++++++++++
1 file changed, 11 insertions(+)
diff --git a/modules/ui.py b/modules/ui.py
index cb859ac45..eb7c05852 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1012,6 +1012,17 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
outputs=[],
_js='function(){restart_reload()}'
)
+
+ def reload_scripts():
+ modules.scripts.reload_script_body_only()
+
+ reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='primary')
+ reload_script_bodies.click(
+ fn=reload_scripts,
+ inputs=[],
+ outputs=[],
+ _js='function(){}'
+ )
if column is not None:
column.__exit__()
From 2deea867814272f1f089b60e9ba8d587c16b2fb1 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sun, 2 Oct 2022 01:36:30 +0100
Subject: [PATCH 025/460] Put reload buttons in row and add secondary style
---
modules/ui.py | 23 +++++++++++++----------
1 file changed, 13 insertions(+), 10 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index eb7c05852..963a2c611 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1002,27 +1002,30 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
_js='function(){}'
)
- def request_restart():
- settings_interface.gradio_ref.do_restart = True
+ with gr.Row():
+ reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary')
+ restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary')
- restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary')
- restart_gradio.click(
- fn=request_restart,
- inputs=[],
- outputs=[],
- _js='function(){restart_reload()}'
- )
def reload_scripts():
modules.scripts.reload_script_body_only()
- reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='primary')
reload_script_bodies.click(
fn=reload_scripts,
inputs=[],
outputs=[],
_js='function(){}'
)
+
+ def request_restart():
+ settings_interface.gradio_ref.do_restart = True
+
+ restart_gradio.click(
+ fn=request_restart,
+ inputs=[],
+ outputs=[],
+ _js='function(){restart_reload()}'
+ )
if column is not None:
column.__exit__()
From 3cf1a96006daffedb8ecd0ae142eca4c4da06105 Mon Sep 17 00:00:00 2001
From: RnDMonkey
Date: Sat, 1 Oct 2022 21:11:03 -0700
Subject: [PATCH 026/460] added safety for blank directory naming patterns
---
modules/images.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index f1aed5d6b..e7894b4cd 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -311,7 +311,7 @@ def apply_filename_pattern(x, p, seed, prompt):
x = x.replace("[cfg]", str(p.cfg_scale))
x = x.replace("[width]", str(p.width))
x = x.replace("[height]", str(p.height))
- x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"]), replace_spaces=False))
+ x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"]) or "No styles", replace_spaces=False))
x = x.replace("[sampler]", sanitize_filename_part(sd_samplers.samplers[p.sampler_index].name, replace_spaces=False))
x = x.replace("[model_hash]", shared.sd_model.sd_model_hash)
@@ -374,7 +374,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
if save_to_dirs:
- dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt)
+ dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt).strip('\\ ')
path = os.path.join(path, dirname)
os.makedirs(path, exist_ok=True)
From 70f526704721a303ae045f6406439dcceee4302e Mon Sep 17 00:00:00 2001
From: RnDMonkey
Date: Sat, 1 Oct 2022 21:18:15 -0700
Subject: [PATCH 027/460] use os.path.normpath for better safety checking
---
modules/images.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index e7894b4cd..5ef7eb926 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -374,8 +374,8 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
if save_to_dirs:
- dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt).strip('\\ ')
- path = os.path.join(path, dirname)
+ dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt)
+ path = os.path.normpath(os.path.join(path, dirname))
os.makedirs(path, exist_ok=True)
From 32edf1732f27a1fad5133667c22b948adda1b070 Mon Sep 17 00:00:00 2001
From: RnDMonkey
Date: Sat, 1 Oct 2022 21:37:14 -0700
Subject: [PATCH 028/460] os.path.normpath wasn't working, reverting to manual
strip
---
modules/images.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index 5ef7eb926..4998e92cf 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -374,8 +374,8 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
if save_to_dirs:
- dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt)
- path = os.path.normpath(os.path.join(path, dirname))
+ dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt).strip('\\ /')
+ path = os.path.join(path, dirname)
os.makedirs(path, exist_ok=True)
From 820f1dc96b1979d7e92170c161db281ee8bd988b Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 15:03:39 +0300
Subject: [PATCH 029/460] initial support for training textual inversion
---
.gitignore | 1 +
javascript/progressbar.js | 1 +
javascript/textualInversion.js | 8 +
modules/devices.py | 3 +-
modules/processing.py | 13 +-
modules/sd_hijack.py | 324 +++---------------
modules/sd_hijack_optimizations.py | 164 +++++++++
modules/sd_models.py | 4 +-
modules/shared.py | 3 +-
modules/textual_inversion/dataset.py | 76 ++++
.../textual_inversion/textual_inversion.py | 258 ++++++++++++++
modules/textual_inversion/ui.py | 32 ++
modules/ui.py | 139 +++++++-
style.css | 10 +-
textual_inversion_templates/style.txt | 19 +
.../style_filewords.txt | 19 +
textual_inversion_templates/subject.txt | 27 ++
.../subject_filewords.txt | 27 ++
webui.py | 15 +-
19 files changed, 828 insertions(+), 315 deletions(-)
create mode 100644 javascript/textualInversion.js
create mode 100644 modules/sd_hijack_optimizations.py
create mode 100644 modules/textual_inversion/dataset.py
create mode 100644 modules/textual_inversion/textual_inversion.py
create mode 100644 modules/textual_inversion/ui.py
create mode 100644 textual_inversion_templates/style.txt
create mode 100644 textual_inversion_templates/style_filewords.txt
create mode 100644 textual_inversion_templates/subject.txt
create mode 100644 textual_inversion_templates/subject_filewords.txt
diff --git a/.gitignore b/.gitignore
index 3532dab37..7afc93953 100644
--- a/.gitignore
+++ b/.gitignore
@@ -25,3 +25,4 @@ __pycache__
/.idea
notification.mp3
/SwinIR
+/textual_inversion
diff --git a/javascript/progressbar.js b/javascript/progressbar.js
index 21f25b38d..1e297abbe 100644
--- a/javascript/progressbar.js
+++ b/javascript/progressbar.js
@@ -30,6 +30,7 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_inte
onUiUpdate(function(){
check_progressbar('txt2img', 'txt2img_progressbar', 'txt2img_progress_span', 'txt2img_interrupt', 'txt2img_preview', 'txt2img_gallery')
check_progressbar('img2img', 'img2img_progressbar', 'img2img_progress_span', 'img2img_interrupt', 'img2img_preview', 'img2img_gallery')
+ check_progressbar('ti', 'ti_progressbar', 'ti_progress_span', 'ti_interrupt', 'ti_preview', 'ti_gallery')
})
function requestMoreProgress(id_part, id_progressbar_span, id_interrupt){
diff --git a/javascript/textualInversion.js b/javascript/textualInversion.js
new file mode 100644
index 000000000..8061be089
--- /dev/null
+++ b/javascript/textualInversion.js
@@ -0,0 +1,8 @@
+
+
+function start_training_textual_inversion(){
+ requestProgress('ti')
+ gradioApp().querySelector('#ti_error').innerHTML=''
+
+ return args_to_array(arguments)
+}
diff --git a/modules/devices.py b/modules/devices.py
index 07bb23397..ff82f2f64 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -32,10 +32,9 @@ def enable_tf32():
errors.run(enable_tf32, "Enabling TF32")
-
device = get_optimal_device()
device_codeformer = cpu if has_mps else device
-
+dtype = torch.float16
def randn(seed, shape):
# Pytorch currently doesn't handle setting randomness correctly when the metal backend is used.
diff --git a/modules/processing.py b/modules/processing.py
index 7eeb5191c..8223423ab 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -56,7 +56,7 @@ class StableDiffusionProcessing:
self.prompt: str = prompt
self.prompt_for_display: str = None
self.negative_prompt: str = (negative_prompt or "")
- self.styles: str = styles
+ self.styles: list = styles or []
self.seed: int = seed
self.subseed: int = subseed
self.subseed_strength: float = subseed_strength
@@ -271,7 +271,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None),
- "Eta": (None if p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
+ "Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
}
generation_params.update(p.extra_generation_params)
@@ -295,8 +295,11 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
fix_seed(p)
- os.makedirs(p.outpath_samples, exist_ok=True)
- os.makedirs(p.outpath_grids, exist_ok=True)
+ if p.outpath_samples is not None:
+ os.makedirs(p.outpath_samples, exist_ok=True)
+
+ if p.outpath_grids is not None:
+ os.makedirs(p.outpath_grids, exist_ok=True)
modules.sd_hijack.model_hijack.apply_circular(p.tiling)
@@ -323,7 +326,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch)
if os.path.exists(cmd_opts.embeddings_dir):
- model_hijack.load_textual_inversion_embeddings(cmd_opts.embeddings_dir, p.sd_model)
+ model_hijack.embedding_db.load_textual_inversion_embeddings()
infotexts = []
output_images = []
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index fa7eaeb89..fd57e5c54 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -6,244 +6,41 @@ import torch
import numpy as np
from torch import einsum
-from modules import prompt_parser
+import modules.textual_inversion.textual_inversion
+from modules import prompt_parser, devices, sd_hijack_optimizations, shared
from modules.shared import opts, device, cmd_opts
-from ldm.util import default
-from einops import rearrange
import ldm.modules.attention
import ldm.modules.diffusionmodules.model
-
-# see https://github.com/basujindal/stable-diffusion/pull/117 for discussion
-def split_cross_attention_forward_v1(self, x, context=None, mask=None):
- h = self.heads
-
- q = self.to_q(x)
- context = default(context, x)
- k = self.to_k(context)
- v = self.to_v(context)
- del context, x
-
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
-
- r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device)
- for i in range(0, q.shape[0], 2):
- end = i + 2
- s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end])
- s1 *= self.scale
-
- s2 = s1.softmax(dim=-1)
- del s1
-
- r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end])
- del s2
-
- r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h)
- del r1
-
- return self.to_out(r2)
+attention_CrossAttention_forward = ldm.modules.attention.CrossAttention.forward
+diffusionmodules_model_nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity
+diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.AttnBlock.forward
-# taken from https://github.com/Doggettx/stable-diffusion
-def split_cross_attention_forward(self, x, context=None, mask=None):
- h = self.heads
+def apply_optimizations():
+ if cmd_opts.opt_split_attention_v1:
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
+ elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
+ ldm.modules.diffusionmodules.model.nonlinearity = sd_hijack_optimizations.nonlinearity_hijack
+ ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
- q_in = self.to_q(x)
- context = default(context, x)
- k_in = self.to_k(context) * self.scale
- v_in = self.to_v(context)
- del context, x
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
- del q_in, k_in, v_in
+def undo_optimizations():
+ ldm.modules.attention.CrossAttention.forward = attention_CrossAttention_forward
+ ldm.modules.diffusionmodules.model.nonlinearity = diffusionmodules_model_nonlinearity
+ ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward
- r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
-
- stats = torch.cuda.memory_stats(q.device)
- mem_active = stats['active_bytes.all.current']
- mem_reserved = stats['reserved_bytes.all.current']
- mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device())
- mem_free_torch = mem_reserved - mem_active
- mem_free_total = mem_free_cuda + mem_free_torch
-
- gb = 1024 ** 3
- tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size()
- modifier = 3 if q.element_size() == 2 else 2.5
- mem_required = tensor_size * modifier
- steps = 1
-
- if mem_required > mem_free_total:
- steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2)))
- # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
- # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")
-
- if steps > 64:
- max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
- raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
- f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free')
-
- slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
- for i in range(0, q.shape[1], slice_size):
- end = i + slice_size
- s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k)
-
- s2 = s1.softmax(dim=-1, dtype=q.dtype)
- del s1
-
- r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
- del s2
-
- del q, k, v
-
- r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h)
- del r1
-
- return self.to_out(r2)
-
-def nonlinearity_hijack(x):
- # swish
- t = torch.sigmoid(x)
- x *= t
- del t
-
- return x
-
-def cross_attention_attnblock_forward(self, x):
- h_ = x
- h_ = self.norm(h_)
- q1 = self.q(h_)
- k1 = self.k(h_)
- v = self.v(h_)
-
- # compute attention
- b, c, h, w = q1.shape
-
- q2 = q1.reshape(b, c, h*w)
- del q1
-
- q = q2.permute(0, 2, 1) # b,hw,c
- del q2
-
- k = k1.reshape(b, c, h*w) # b,c,hw
- del k1
-
- h_ = torch.zeros_like(k, device=q.device)
-
- stats = torch.cuda.memory_stats(q.device)
- mem_active = stats['active_bytes.all.current']
- mem_reserved = stats['reserved_bytes.all.current']
- mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device())
- mem_free_torch = mem_reserved - mem_active
- mem_free_total = mem_free_cuda + mem_free_torch
-
- tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * q.element_size()
- mem_required = tensor_size * 2.5
- steps = 1
-
- if mem_required > mem_free_total:
- steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2)))
-
- slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
- for i in range(0, q.shape[1], slice_size):
- end = i + slice_size
-
- w1 = torch.bmm(q[:, i:end], k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
- w2 = w1 * (int(c)**(-0.5))
- del w1
- w3 = torch.nn.functional.softmax(w2, dim=2, dtype=q.dtype)
- del w2
-
- # attend to values
- v1 = v.reshape(b, c, h*w)
- w4 = w3.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q)
- del w3
-
- h_[:, :, i:end] = torch.bmm(v1, w4) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
- del v1, w4
-
- h2 = h_.reshape(b, c, h, w)
- del h_
-
- h3 = self.proj_out(h2)
- del h2
-
- h3 += x
-
- return h3
class StableDiffusionModelHijack:
- ids_lookup = {}
- word_embeddings = {}
- word_embeddings_checksums = {}
fixes = None
comments = []
- dir_mtime = None
layers = None
circular_enabled = False
clip = None
- def load_textual_inversion_embeddings(self, dirname, model):
- mt = os.path.getmtime(dirname)
- if self.dir_mtime is not None and mt <= self.dir_mtime:
- return
-
- self.dir_mtime = mt
- self.ids_lookup.clear()
- self.word_embeddings.clear()
-
- tokenizer = model.cond_stage_model.tokenizer
-
- def const_hash(a):
- r = 0
- for v in a:
- r = (r * 281 ^ int(v) * 997) & 0xFFFFFFFF
- return r
-
- def process_file(path, filename):
- name = os.path.splitext(filename)[0]
-
- data = torch.load(path, map_location="cpu")
-
- # textual inversion embeddings
- if 'string_to_param' in data:
- param_dict = data['string_to_param']
- if hasattr(param_dict, '_parameters'):
- param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11
- assert len(param_dict) == 1, 'embedding file has multiple terms in it'
- emb = next(iter(param_dict.items()))[1]
- # diffuser concepts
- elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor:
- assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
-
- emb = next(iter(data.values()))
- if len(emb.shape) == 1:
- emb = emb.unsqueeze(0)
-
- self.word_embeddings[name] = emb.detach().to(device)
- self.word_embeddings_checksums[name] = f'{const_hash(emb.reshape(-1)*100)&0xffff:04x}'
-
- ids = tokenizer([name], add_special_tokens=False)['input_ids'][0]
-
- first_id = ids[0]
- if first_id not in self.ids_lookup:
- self.ids_lookup[first_id] = []
- self.ids_lookup[first_id].append((ids, name))
-
- for fn in os.listdir(dirname):
- try:
- fullfn = os.path.join(dirname, fn)
-
- if os.stat(fullfn).st_size == 0:
- continue
-
- process_file(fullfn, fn)
- except Exception:
- print(f"Error loading emedding {fn}:", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
- continue
-
- print(f"Loaded a total of {len(self.word_embeddings)} textual inversion embeddings.")
+ embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase(cmd_opts.embeddings_dir)
def hijack(self, m):
model_embeddings = m.cond_stage_model.transformer.text_model.embeddings
@@ -253,12 +50,7 @@ class StableDiffusionModelHijack:
self.clip = m.cond_stage_model
- if cmd_opts.opt_split_attention_v1:
- ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward_v1
- elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
- ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward
- ldm.modules.diffusionmodules.model.nonlinearity = nonlinearity_hijack
- ldm.modules.diffusionmodules.model.AttnBlock.forward = cross_attention_attnblock_forward
+ apply_optimizations()
def flatten(el):
flattened = [flatten(children) for children in el.children()]
@@ -296,7 +88,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
def __init__(self, wrapped, hijack):
super().__init__()
self.wrapped = wrapped
- self.hijack = hijack
+ self.hijack: StableDiffusionModelHijack = hijack
self.tokenizer = wrapped.tokenizer
self.max_length = wrapped.max_length
self.token_mults = {}
@@ -317,7 +109,6 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
if mult != 1.0:
self.token_mults[ident] = mult
-
def tokenize_line(self, line, used_custom_terms, hijack_comments):
id_start = self.wrapped.tokenizer.bos_token_id
id_end = self.wrapped.tokenizer.eos_token_id
@@ -339,28 +130,19 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
while i < len(tokens):
token = tokens[i]
- possible_matches = self.hijack.ids_lookup.get(token, None)
+ embedding = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
- if possible_matches is None:
+ if embedding is None:
remade_tokens.append(token)
multipliers.append(weight)
+ i += 1
else:
- found = False
- for ids, word in possible_matches:
- if tokens[i:i + len(ids)] == ids:
- emb_len = int(self.hijack.word_embeddings[word].shape[0])
- fixes.append((len(remade_tokens), word))
- remade_tokens += [0] * emb_len
- multipliers += [weight] * emb_len
- i += len(ids) - 1
- found = True
- used_custom_terms.append((word, self.hijack.word_embeddings_checksums[word]))
- break
-
- if not found:
- remade_tokens.append(token)
- multipliers.append(weight)
- i += 1
+ emb_len = int(embedding.vec.shape[0])
+ fixes.append((len(remade_tokens), embedding))
+ remade_tokens += [0] * emb_len
+ multipliers += [weight] * emb_len
+ used_custom_terms.append((embedding.name, embedding.checksum()))
+ i += emb_len
if len(remade_tokens) > maxlen - 2:
vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
@@ -431,32 +213,23 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
while i < len(tokens):
token = tokens[i]
- possible_matches = self.hijack.ids_lookup.get(token, None)
+ embedding = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
mult_change = self.token_mults.get(token) if opts.enable_emphasis else None
if mult_change is not None:
mult *= mult_change
- elif possible_matches is None:
+ i += 1
+ elif embedding is None:
remade_tokens.append(token)
multipliers.append(mult)
+ i += 1
else:
- found = False
- for ids, word in possible_matches:
- if tokens[i:i+len(ids)] == ids:
- emb_len = int(self.hijack.word_embeddings[word].shape[0])
- fixes.append((len(remade_tokens), word))
- remade_tokens += [0] * emb_len
- multipliers += [mult] * emb_len
- i += len(ids) - 1
- found = True
- used_custom_terms.append((word, self.hijack.word_embeddings_checksums[word]))
- break
-
- if not found:
- remade_tokens.append(token)
- multipliers.append(mult)
-
- i += 1
+ emb_len = int(embedding.vec.shape[0])
+ fixes.append((len(remade_tokens), embedding))
+ remade_tokens += [0] * emb_len
+ multipliers += [mult] * emb_len
+ used_custom_terms.append((embedding.name, embedding.checksum()))
+ i += emb_len
if len(remade_tokens) > maxlen - 2:
vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
@@ -464,6 +237,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
overflowing_words = [vocab.get(int(x), "") for x in ovf]
overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words))
hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
+
token_count = len(remade_tokens)
remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
remade_tokens = [id_start] + remade_tokens[0:maxlen-2] + [id_end]
@@ -484,7 +258,6 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
else:
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text)
-
self.hijack.fixes = hijack_fixes
self.hijack.comments = hijack_comments
@@ -517,14 +290,19 @@ class EmbeddingsWithFixes(torch.nn.Module):
inputs_embeds = self.wrapped(input_ids)
- if batch_fixes is not None:
- for fixes, tensor in zip(batch_fixes, inputs_embeds):
- for offset, word in fixes:
- emb = self.embeddings.word_embeddings[word]
- emb_len = min(tensor.shape[0]-offset-1, emb.shape[0])
- tensor[offset+1:offset+1+emb_len] = self.embeddings.word_embeddings[word][0:emb_len]
+ if batch_fixes is None or len(batch_fixes) == 0 or max([len(x) for x in batch_fixes]) == 0:
+ return inputs_embeds
- return inputs_embeds
+ vecs = []
+ for fixes, tensor in zip(batch_fixes, inputs_embeds):
+ for offset, embedding in fixes:
+ emb = embedding.vec
+ emb_len = min(tensor.shape[0]-offset-1, emb.shape[0])
+ tensor = torch.cat([tensor[0:offset+1], emb[0:emb_len], tensor[offset+1+emb_len:]])
+
+ vecs.append(tensor)
+
+ return torch.stack(vecs)
def add_circular_option_to_conv_2d():
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
new file mode 100644
index 000000000..9c079e578
--- /dev/null
+++ b/modules/sd_hijack_optimizations.py
@@ -0,0 +1,164 @@
+import math
+import torch
+from torch import einsum
+
+from ldm.util import default
+from einops import rearrange
+
+
+# see https://github.com/basujindal/stable-diffusion/pull/117 for discussion
+def split_cross_attention_forward_v1(self, x, context=None, mask=None):
+ h = self.heads
+
+ q = self.to_q(x)
+ context = default(context, x)
+ k = self.to_k(context)
+ v = self.to_v(context)
+ del context, x
+
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
+
+ r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device)
+ for i in range(0, q.shape[0], 2):
+ end = i + 2
+ s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end])
+ s1 *= self.scale
+
+ s2 = s1.softmax(dim=-1)
+ del s1
+
+ r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end])
+ del s2
+
+ r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h)
+ del r1
+
+ return self.to_out(r2)
+
+
+# taken from https://github.com/Doggettx/stable-diffusion
+def split_cross_attention_forward(self, x, context=None, mask=None):
+ h = self.heads
+
+ q_in = self.to_q(x)
+ context = default(context, x)
+ k_in = self.to_k(context) * self.scale
+ v_in = self.to_v(context)
+ del context, x
+
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
+ del q_in, k_in, v_in
+
+ r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype)
+
+ stats = torch.cuda.memory_stats(q.device)
+ mem_active = stats['active_bytes.all.current']
+ mem_reserved = stats['reserved_bytes.all.current']
+ mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device())
+ mem_free_torch = mem_reserved - mem_active
+ mem_free_total = mem_free_cuda + mem_free_torch
+
+ gb = 1024 ** 3
+ tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size()
+ modifier = 3 if q.element_size() == 2 else 2.5
+ mem_required = tensor_size * modifier
+ steps = 1
+
+ if mem_required > mem_free_total:
+ steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2)))
+ # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB "
+ # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}")
+
+ if steps > 64:
+ max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64
+ raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). '
+ f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free')
+
+ slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
+ for i in range(0, q.shape[1], slice_size):
+ end = i + slice_size
+ s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k)
+
+ s2 = s1.softmax(dim=-1, dtype=q.dtype)
+ del s1
+
+ r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v)
+ del s2
+
+ del q, k, v
+
+ r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h)
+ del r1
+
+ return self.to_out(r2)
+
+def nonlinearity_hijack(x):
+ # swish
+ t = torch.sigmoid(x)
+ x *= t
+ del t
+
+ return x
+
+def cross_attention_attnblock_forward(self, x):
+ h_ = x
+ h_ = self.norm(h_)
+ q1 = self.q(h_)
+ k1 = self.k(h_)
+ v = self.v(h_)
+
+ # compute attention
+ b, c, h, w = q1.shape
+
+ q2 = q1.reshape(b, c, h*w)
+ del q1
+
+ q = q2.permute(0, 2, 1) # b,hw,c
+ del q2
+
+ k = k1.reshape(b, c, h*w) # b,c,hw
+ del k1
+
+ h_ = torch.zeros_like(k, device=q.device)
+
+ stats = torch.cuda.memory_stats(q.device)
+ mem_active = stats['active_bytes.all.current']
+ mem_reserved = stats['reserved_bytes.all.current']
+ mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device())
+ mem_free_torch = mem_reserved - mem_active
+ mem_free_total = mem_free_cuda + mem_free_torch
+
+ tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * q.element_size()
+ mem_required = tensor_size * 2.5
+ steps = 1
+
+ if mem_required > mem_free_total:
+ steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2)))
+
+ slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1]
+ for i in range(0, q.shape[1], slice_size):
+ end = i + slice_size
+
+ w1 = torch.bmm(q[:, i:end], k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j]
+ w2 = w1 * (int(c)**(-0.5))
+ del w1
+ w3 = torch.nn.functional.softmax(w2, dim=2, dtype=q.dtype)
+ del w2
+
+ # attend to values
+ v1 = v.reshape(b, c, h*w)
+ w4 = w3.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q)
+ del w3
+
+ h_[:, :, i:end] = torch.bmm(v1, w4) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j]
+ del v1, w4
+
+ h2 = h_.reshape(b, c, h, w)
+ del h_
+
+ h3 = self.proj_out(h2)
+ del h2
+
+ h3 += x
+
+ return h3
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 2539f14cd..5b3dbdc79 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -8,7 +8,7 @@ from omegaconf import OmegaConf
from ldm.util import instantiate_from_config
-from modules import shared, modelloader
+from modules import shared, modelloader, devices
from modules.paths import models_path
model_dir = "Stable-diffusion"
@@ -134,6 +134,8 @@ def load_model_weights(model, checkpoint_file, sd_model_hash):
if not shared.cmd_opts.no_half:
model.half()
+ devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
+
model.sd_model_hash = sd_model_hash
model.sd_model_checkpint = checkpoint_file
diff --git a/modules/shared.py b/modules/shared.py
index ac968b2d2..ac0bc480c 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -78,6 +78,7 @@ class State:
current_latent = None
current_image = None
current_image_sampling_step = 0
+ textinfo = None
def interrupt(self):
self.interrupted = True
@@ -88,7 +89,7 @@ class State:
self.current_image_sampling_step = 0
def get_job_timestamp(self):
- return datetime.datetime.now().strftime("%Y%m%d%H%M%S")
+ return datetime.datetime.now().strftime("%Y%m%d%H%M%S") # shouldn't this return job_timestamp?
state = State()
diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py
new file mode 100644
index 000000000..7e134a08f
--- /dev/null
+++ b/modules/textual_inversion/dataset.py
@@ -0,0 +1,76 @@
+import os
+import numpy as np
+import PIL
+import torch
+from PIL import Image
+from torch.utils.data import Dataset
+from torchvision import transforms
+
+import random
+import tqdm
+
+
+class PersonalizedBase(Dataset):
+ def __init__(self, data_root, size=None, repeats=100, flip_p=0.5, placeholder_token="*", width=512, height=512, model=None, device=None, template_file=None):
+
+ self.placeholder_token = placeholder_token
+
+ self.size = size
+ self.width = width
+ self.height = height
+ self.flip = transforms.RandomHorizontalFlip(p=flip_p)
+
+ self.dataset = []
+
+ with open(template_file, "r") as file:
+ lines = [x.strip() for x in file.readlines()]
+
+ self.lines = lines
+
+ assert data_root, 'dataset directory not specified'
+
+ self.image_paths = [os.path.join(data_root, file_path) for file_path in os.listdir(data_root)]
+ print("Preparing dataset...")
+ for path in tqdm.tqdm(self.image_paths):
+ image = Image.open(path)
+ image = image.convert('RGB')
+ image = image.resize((self.width, self.height), PIL.Image.BICUBIC)
+
+ filename = os.path.basename(path)
+ filename_tokens = os.path.splitext(filename)[0].replace('_', '-').replace(' ', '-').split('-')
+ filename_tokens = [token for token in filename_tokens if token.isalpha()]
+
+ npimage = np.array(image).astype(np.uint8)
+ npimage = (npimage / 127.5 - 1.0).astype(np.float32)
+
+ torchdata = torch.from_numpy(npimage).to(device=device, dtype=torch.float32)
+ torchdata = torch.moveaxis(torchdata, 2, 0)
+
+ init_latent = model.get_first_stage_encoding(model.encode_first_stage(torchdata.unsqueeze(dim=0))).squeeze()
+
+ self.dataset.append((init_latent, filename_tokens))
+
+ self.length = len(self.dataset) * repeats
+
+ self.initial_indexes = np.arange(self.length) % len(self.dataset)
+ self.indexes = None
+ self.shuffle()
+
+ def shuffle(self):
+ self.indexes = self.initial_indexes[torch.randperm(self.initial_indexes.shape[0])]
+
+ def __len__(self):
+ return self.length
+
+ def __getitem__(self, i):
+ if i % len(self.dataset) == 0:
+ self.shuffle()
+
+ index = self.indexes[i % len(self.indexes)]
+ x, filename_tokens = self.dataset[index]
+
+ text = random.choice(self.lines)
+ text = text.replace("[name]", self.placeholder_token)
+ text = text.replace("[filewords]", ' '.join(filename_tokens))
+
+ return x, text
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
new file mode 100644
index 000000000..c0baaace2
--- /dev/null
+++ b/modules/textual_inversion/textual_inversion.py
@@ -0,0 +1,258 @@
+import os
+import sys
+import traceback
+
+import torch
+import tqdm
+import html
+import datetime
+
+from modules import shared, devices, sd_hijack, processing
+import modules.textual_inversion.dataset
+
+
+class Embedding:
+ def __init__(self, vec, name, step=None):
+ self.vec = vec
+ self.name = name
+ self.step = step
+ self.cached_checksum = None
+
+ def save(self, filename):
+ embedding_data = {
+ "string_to_token": {"*": 265},
+ "string_to_param": {"*": self.vec},
+ "name": self.name,
+ "step": self.step,
+ }
+
+ torch.save(embedding_data, filename)
+
+ def checksum(self):
+ if self.cached_checksum is not None:
+ return self.cached_checksum
+
+ def const_hash(a):
+ r = 0
+ for v in a:
+ r = (r * 281 ^ int(v) * 997) & 0xFFFFFFFF
+ return r
+
+ self.cached_checksum = f'{const_hash(self.vec.reshape(-1) * 100) & 0xffff:04x}'
+ return self.cached_checksum
+
+class EmbeddingDatabase:
+ def __init__(self, embeddings_dir):
+ self.ids_lookup = {}
+ self.word_embeddings = {}
+ self.dir_mtime = None
+ self.embeddings_dir = embeddings_dir
+
+ def register_embedding(self, embedding, model):
+
+ self.word_embeddings[embedding.name] = embedding
+
+ ids = model.cond_stage_model.tokenizer([embedding.name], add_special_tokens=False)['input_ids'][0]
+
+ first_id = ids[0]
+ if first_id not in self.ids_lookup:
+ self.ids_lookup[first_id] = []
+ self.ids_lookup[first_id].append((ids, embedding))
+
+ return embedding
+
+ def load_textual_inversion_embeddings(self):
+ mt = os.path.getmtime(self.embeddings_dir)
+ if self.dir_mtime is not None and mt <= self.dir_mtime:
+ return
+
+ self.dir_mtime = mt
+ self.ids_lookup.clear()
+ self.word_embeddings.clear()
+
+ def process_file(path, filename):
+ name = os.path.splitext(filename)[0]
+
+ data = torch.load(path, map_location="cpu")
+
+ # textual inversion embeddings
+ if 'string_to_param' in data:
+ param_dict = data['string_to_param']
+ if hasattr(param_dict, '_parameters'):
+ param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11
+ assert len(param_dict) == 1, 'embedding file has multiple terms in it'
+ emb = next(iter(param_dict.items()))[1]
+ # diffuser concepts
+ elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor:
+ assert len(data.keys()) == 1, 'embedding file has multiple terms in it'
+
+ emb = next(iter(data.values()))
+ if len(emb.shape) == 1:
+ emb = emb.unsqueeze(0)
+ else:
+ raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.")
+
+ vec = emb.detach().to(devices.device, dtype=torch.float32)
+ embedding = Embedding(vec, name)
+ embedding.step = data.get('step', None)
+ self.register_embedding(embedding, shared.sd_model)
+
+ for fn in os.listdir(self.embeddings_dir):
+ try:
+ fullfn = os.path.join(self.embeddings_dir, fn)
+
+ if os.stat(fullfn).st_size == 0:
+ continue
+
+ process_file(fullfn, fn)
+ except Exception:
+ print(f"Error loading emedding {fn}:", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+ continue
+
+ print(f"Loaded a total of {len(self.word_embeddings)} textual inversion embeddings.")
+
+ def find_embedding_at_position(self, tokens, offset):
+ token = tokens[offset]
+ possible_matches = self.ids_lookup.get(token, None)
+
+ if possible_matches is None:
+ return None
+
+ for ids, embedding in possible_matches:
+ if tokens[offset:offset + len(ids)] == ids:
+ return embedding
+
+ return None
+
+
+
+def create_embedding(name, num_vectors_per_token):
+ init_text = '*'
+
+ cond_model = shared.sd_model.cond_stage_model
+ embedding_layer = cond_model.wrapped.transformer.text_model.embeddings
+
+ ids = cond_model.tokenizer(init_text, max_length=num_vectors_per_token, return_tensors="pt", add_special_tokens=False)["input_ids"]
+ embedded = embedding_layer(ids.to(devices.device)).squeeze(0)
+ vec = torch.zeros((num_vectors_per_token, embedded.shape[1]), device=devices.device)
+
+ for i in range(num_vectors_per_token):
+ vec[i] = embedded[i * int(embedded.shape[0]) // num_vectors_per_token]
+
+ fn = os.path.join(shared.cmd_opts.embeddings_dir, f"{name}.pt")
+ assert not os.path.exists(fn), f"file {fn} already exists"
+
+ embedding = Embedding(vec, name)
+ embedding.step = 0
+ embedding.save(fn)
+
+ return fn
+
+
+def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps, create_image_every, save_embedding_every, template_file):
+ assert embedding_name, 'embedding not selected'
+
+ shared.state.textinfo = "Initializing textual inversion training..."
+ shared.state.job_count = steps
+
+ filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
+
+ log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%d-%m"), embedding_name)
+
+ if save_embedding_every > 0:
+ embedding_dir = os.path.join(log_directory, "embeddings")
+ os.makedirs(embedding_dir, exist_ok=True)
+ else:
+ embedding_dir = None
+
+ if create_image_every > 0:
+ images_dir = os.path.join(log_directory, "images")
+ os.makedirs(images_dir, exist_ok=True)
+ else:
+ images_dir = None
+
+ cond_model = shared.sd_model.cond_stage_model
+
+ shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
+ with torch.autocast("cuda"):
+ ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, size=512, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file)
+
+ hijack = sd_hijack.model_hijack
+
+ embedding = hijack.embedding_db.word_embeddings[embedding_name]
+ embedding.vec.requires_grad = True
+
+ optimizer = torch.optim.AdamW([embedding.vec], lr=learn_rate)
+
+ losses = torch.zeros((32,))
+
+ last_saved_file = ""
+ last_saved_image = ""
+
+ ititial_step = embedding.step or 0
+ if ititial_step > steps:
+ return embedding, filename
+
+ pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step)
+ for i, (x, text) in pbar:
+ embedding.step = i + ititial_step
+
+ if embedding.step > steps:
+ break
+
+ if shared.state.interrupted:
+ break
+
+ with torch.autocast("cuda"):
+ c = cond_model([text])
+ loss = shared.sd_model(x.unsqueeze(0), c)[0]
+
+ losses[embedding.step % losses.shape[0]] = loss.item()
+
+ optimizer.zero_grad()
+ loss.backward()
+ optimizer.step()
+
+ pbar.set_description(f"loss: {losses.mean():.7f}")
+
+ if embedding.step > 0 and embedding_dir is not None and embedding.step % save_embedding_every == 0:
+ last_saved_file = os.path.join(embedding_dir, f'{embedding_name}-{embedding.step}.pt')
+ embedding.save(last_saved_file)
+
+ if embedding.step > 0 and images_dir is not None and embedding.step % create_image_every == 0:
+ last_saved_image = os.path.join(images_dir, f'{embedding_name}-{embedding.step}.png')
+
+ p = processing.StableDiffusionProcessingTxt2Img(
+ sd_model=shared.sd_model,
+ prompt=text,
+ steps=20,
+ do_not_save_grid=True,
+ do_not_save_samples=True,
+ )
+
+ processed = processing.process_images(p)
+ image = processed.images[0]
+
+ shared.state.current_image = image
+ image.save(last_saved_image)
+
+ last_saved_image += f", prompt: {text}"
+
+ shared.state.job_no = embedding.step
+
+ shared.state.textinfo = f"""
+
+Loss: {losses.mean():.7f}
+Step: {embedding.step}
+Last prompt: {html.escape(text)}
+Last saved embedding: {html.escape(last_saved_file)}
+Last saved image: {html.escape(last_saved_image)}
+
+"""
+
+ embedding.cached_checksum = None
+ embedding.save(filename)
+
+ return embedding, filename
+
diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py
new file mode 100644
index 000000000..ce3677a98
--- /dev/null
+++ b/modules/textual_inversion/ui.py
@@ -0,0 +1,32 @@
+import html
+
+import gradio as gr
+
+import modules.textual_inversion.textual_inversion as ti
+from modules import sd_hijack, shared
+
+
+def create_embedding(name, nvpt):
+ filename = ti.create_embedding(name, nvpt)
+
+ sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings()
+
+ return gr.Dropdown.update(choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())), f"Created: {filename}", ""
+
+
+def train_embedding(*args):
+
+ try:
+ sd_hijack.undo_optimizations()
+
+ embedding, filename = ti.train_embedding(*args)
+
+ res = f"""
+Training {'interrupted' if shared.state.interrupted else 'finished'} after {embedding.step} steps.
+Embedding saved to {html.escape(filename)}
+"""
+ return res, ""
+ except Exception:
+ raise
+ finally:
+ sd_hijack.apply_optimizations()
diff --git a/modules/ui.py b/modules/ui.py
index 15572bb0a..57aef6ff1 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -21,6 +21,7 @@ import gradio as gr
import gradio.utils
import gradio.routes
+from modules import sd_hijack
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
@@ -32,6 +33,7 @@ import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
+import modules.textual_inversion.ui
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
@@ -142,8 +144,8 @@ def save_files(js_data, images, index):
return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
-def wrap_gradio_call(func):
- def f(*args, **kwargs):
+def wrap_gradio_call(func, extra_outputs=None):
+ def f(*args, extra_outputs_array=extra_outputs, **kwargs):
run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled
if run_memmon:
shared.mem_mon.monitor()
@@ -159,7 +161,10 @@ def wrap_gradio_call(func):
shared.state.job = ""
shared.state.job_count = 0
- res = [None, '', f"{plaintext_to_html(type(e).__name__+': '+str(e))}
"]
+ if extra_outputs_array is None:
+ extra_outputs_array = [None, '']
+
+ res = extra_outputs_array + [f"{plaintext_to_html(type(e).__name__+': '+str(e))}
"]
elapsed = time.perf_counter() - t
@@ -179,6 +184,7 @@ def wrap_gradio_call(func):
res[-1] += f""
shared.state.interrupted = False
+ shared.state.job_count = 0
return tuple(res)
@@ -187,7 +193,7 @@ def wrap_gradio_call(func):
def check_progress_call(id_part):
if shared.state.job_count == 0:
- return "", gr_show(False), gr_show(False)
+ return "", gr_show(False), gr_show(False), gr_show(False)
progress = 0
@@ -219,13 +225,19 @@ def check_progress_call(id_part):
else:
preview_visibility = gr_show(True)
- return f"{time.time()}{progressbar}
", preview_visibility, image
+ if shared.state.textinfo is not None:
+ textinfo_result = gr.HTML.update(value=shared.state.textinfo, visible=True)
+ else:
+ textinfo_result = gr_show(False)
+
+ return f"{time.time()}{progressbar}
", preview_visibility, image, textinfo_result
def check_progress_call_initial(id_part):
shared.state.job_count = -1
shared.state.current_latent = None
shared.state.current_image = None
+ shared.state.textinfo = None
return check_progress_call(id_part)
@@ -399,13 +411,16 @@ def create_toprow(is_img2img):
return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste
-def setup_progressbar(progressbar, preview, id_part):
+def setup_progressbar(progressbar, preview, id_part, textinfo=None):
+ if textinfo is None:
+ textinfo = gr.HTML(visible=False)
+
check_progress = gr.Button('Check progress', elem_id=f"{id_part}_check_progress", visible=False)
check_progress.click(
fn=lambda: check_progress_call(id_part),
show_progress=False,
inputs=[],
- outputs=[progressbar, preview, preview],
+ outputs=[progressbar, preview, preview, textinfo],
)
check_progress_initial = gr.Button('Check progress (first)', elem_id=f"{id_part}_check_progress_initial", visible=False)
@@ -413,11 +428,14 @@ def setup_progressbar(progressbar, preview, id_part):
fn=lambda: check_progress_call_initial(id_part),
show_progress=False,
inputs=[],
- outputs=[progressbar, preview, preview],
+ outputs=[progressbar, preview, preview, textinfo],
)
-def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
+def create_ui(wrap_gradio_gpu_call):
+ import modules.img2img
+ import modules.txt2img
+
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
@@ -483,7 +501,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True)
txt2img_args = dict(
- fn=txt2img,
+ fn=wrap_gradio_gpu_call(modules.txt2img.txt2img),
_js="submit",
inputs=[
txt2img_prompt,
@@ -675,7 +693,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
)
img2img_args = dict(
- fn=img2img,
+ fn=wrap_gradio_gpu_call(modules.img2img.img2img),
_js="submit_img2img",
inputs=[
dummy_component,
@@ -828,7 +846,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
open_extras_folder = gr.Button('Open output directory', elem_id=button_id)
submit.click(
- fn=run_extras,
+ fn=wrap_gradio_gpu_call(modules.extras.run_extras),
_js="get_extras_tab_index",
inputs=[
dummy_component,
@@ -878,7 +896,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
pnginfo_send_to_img2img = gr.Button('Send to img2img')
image.change(
- fn=wrap_gradio_call(run_pnginfo),
+ fn=wrap_gradio_call(modules.extras.run_pnginfo),
inputs=[image],
outputs=[html, generation_info, html2],
)
@@ -887,7 +905,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
with gr.Row().style(equal_height=False):
with gr.Column(variant='panel'):
gr.HTML(value="A merger of the two checkpoints will be generated in your checkpoint directory.
")
-
+
with gr.Row():
primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary Model Name")
secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary Model Name")
@@ -896,10 +914,96 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method")
save_as_half = gr.Checkbox(value=False, label="Safe as float16")
modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
-
+
with gr.Column(variant='panel'):
submit_result = gr.Textbox(elem_id="modelmerger_result", show_label=False)
+ sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings()
+
+ with gr.Blocks() as textual_inversion_interface:
+ with gr.Row().style(equal_height=False):
+ with gr.Column():
+ with gr.Group():
+ gr.HTML(value="Create a new embedding
")
+
+ new_embedding_name = gr.Textbox(label="Name")
+ nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1)
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ gr.HTML(value="")
+
+ with gr.Column():
+ create_embedding = gr.Button(value="Create", variant='primary')
+
+ with gr.Group():
+ gr.HTML(value="Train an embedding; must specify a directory with a set of 512x512 images
")
+ train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
+ learn_rate = gr.Number(label='Learning rate', value=5.0e-03)
+ dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
+ log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
+ template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"))
+ steps = gr.Number(label='Max steps', value=100000, precision=0)
+ create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=1000, precision=0)
+ save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=1000, precision=0)
+
+ with gr.Row():
+ with gr.Column(scale=2):
+ gr.HTML(value="")
+
+ with gr.Column():
+ with gr.Row():
+ interrupt_training = gr.Button(value="Interrupt")
+ train_embedding = gr.Button(value="Train", variant='primary')
+
+ with gr.Column():
+ progressbar = gr.HTML(elem_id="ti_progressbar")
+ ti_output = gr.Text(elem_id="ti_output", value="", show_label=False)
+
+ ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(grid=4)
+ ti_preview = gr.Image(elem_id='ti_preview', visible=False)
+ ti_progress = gr.HTML(elem_id="ti_progress", value="")
+ ti_outcome = gr.HTML(elem_id="ti_error", value="")
+ setup_progressbar(progressbar, ti_preview, 'ti', textinfo=ti_progress)
+
+ create_embedding.click(
+ fn=modules.textual_inversion.ui.create_embedding,
+ inputs=[
+ new_embedding_name,
+ nvpt,
+ ],
+ outputs=[
+ train_embedding_name,
+ ti_output,
+ ti_outcome,
+ ]
+ )
+
+ train_embedding.click(
+ fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]),
+ _js="start_training_textual_inversion",
+ inputs=[
+ train_embedding_name,
+ learn_rate,
+ dataset_directory,
+ log_directory,
+ steps,
+ create_image_every,
+ save_embedding_every,
+ template_file,
+ ],
+ outputs=[
+ ti_output,
+ ti_outcome,
+ ]
+ )
+
+ interrupt_training.click(
+ fn=lambda: shared.state.interrupt(),
+ inputs=[],
+ outputs=[],
+ )
+
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
@@ -1011,6 +1115,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
(extras_interface, "Extras", "extras"),
(pnginfo_interface, "PNG Info", "pnginfo"),
(modelmerger_interface, "Checkpoint Merger", "modelmerger"),
+ (textual_inversion_interface, "Textual inversion", "ti"),
(settings_interface, "Settings", "settings"),
]
@@ -1044,11 +1149,11 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
def modelmerger(*args):
try:
- results = run_modelmerger(*args)
+ results = modules.extras.run_modelmerger(*args)
except Exception as e:
print("Error loading/saving model file:", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
- modules.sd_models.list_models() #To remove the potentially missing models from the list
+ modules.sd_models.list_models() # to remove the potentially missing models from the list
return ["Error loading/saving model file. It doesn't exist or the name contains illegal characters"] + [gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(3)]
return results
diff --git a/style.css b/style.css
index 79d6bb0dc..39586bf18 100644
--- a/style.css
+++ b/style.css
@@ -157,7 +157,7 @@ button{
max-width: 10em;
}
-#txt2img_preview, #img2img_preview{
+#txt2img_preview, #img2img_preview, #ti_preview{
position: absolute;
width: 320px;
left: 0;
@@ -172,18 +172,18 @@ button{
}
@media screen and (min-width: 768px) {
- #txt2img_preview, #img2img_preview {
+ #txt2img_preview, #img2img_preview, #ti_preview {
position: absolute;
}
}
@media screen and (max-width: 767px) {
- #txt2img_preview, #img2img_preview {
+ #txt2img_preview, #img2img_preview, #ti_preview {
position: relative;
}
}
-#txt2img_preview div.left-0.top-0, #img2img_preview div.left-0.top-0{
+#txt2img_preview div.left-0.top-0, #img2img_preview div.left-0.top-0, #ti_preview div.left-0.top-0{
display: none;
}
@@ -247,7 +247,7 @@ input[type="range"]{
#txt2img_negative_prompt, #img2img_negative_prompt{
}
-#txt2img_progressbar, #img2img_progressbar{
+#txt2img_progressbar, #img2img_progressbar, #ti_progressbar{
position: absolute;
z-index: 1000;
right: 0;
diff --git a/textual_inversion_templates/style.txt b/textual_inversion_templates/style.txt
new file mode 100644
index 000000000..15af2d6b8
--- /dev/null
+++ b/textual_inversion_templates/style.txt
@@ -0,0 +1,19 @@
+a painting, art by [name]
+a rendering, art by [name]
+a cropped painting, art by [name]
+the painting, art by [name]
+a clean painting, art by [name]
+a dirty painting, art by [name]
+a dark painting, art by [name]
+a picture, art by [name]
+a cool painting, art by [name]
+a close-up painting, art by [name]
+a bright painting, art by [name]
+a cropped painting, art by [name]
+a good painting, art by [name]
+a close-up painting, art by [name]
+a rendition, art by [name]
+a nice painting, art by [name]
+a small painting, art by [name]
+a weird painting, art by [name]
+a large painting, art by [name]
diff --git a/textual_inversion_templates/style_filewords.txt b/textual_inversion_templates/style_filewords.txt
new file mode 100644
index 000000000..b3a8159a8
--- /dev/null
+++ b/textual_inversion_templates/style_filewords.txt
@@ -0,0 +1,19 @@
+a painting of [filewords], art by [name]
+a rendering of [filewords], art by [name]
+a cropped painting of [filewords], art by [name]
+the painting of [filewords], art by [name]
+a clean painting of [filewords], art by [name]
+a dirty painting of [filewords], art by [name]
+a dark painting of [filewords], art by [name]
+a picture of [filewords], art by [name]
+a cool painting of [filewords], art by [name]
+a close-up painting of [filewords], art by [name]
+a bright painting of [filewords], art by [name]
+a cropped painting of [filewords], art by [name]
+a good painting of [filewords], art by [name]
+a close-up painting of [filewords], art by [name]
+a rendition of [filewords], art by [name]
+a nice painting of [filewords], art by [name]
+a small painting of [filewords], art by [name]
+a weird painting of [filewords], art by [name]
+a large painting of [filewords], art by [name]
diff --git a/textual_inversion_templates/subject.txt b/textual_inversion_templates/subject.txt
new file mode 100644
index 000000000..79f36aa05
--- /dev/null
+++ b/textual_inversion_templates/subject.txt
@@ -0,0 +1,27 @@
+a photo of a [name]
+a rendering of a [name]
+a cropped photo of the [name]
+the photo of a [name]
+a photo of a clean [name]
+a photo of a dirty [name]
+a dark photo of the [name]
+a photo of my [name]
+a photo of the cool [name]
+a close-up photo of a [name]
+a bright photo of the [name]
+a cropped photo of a [name]
+a photo of the [name]
+a good photo of the [name]
+a photo of one [name]
+a close-up photo of the [name]
+a rendition of the [name]
+a photo of the clean [name]
+a rendition of a [name]
+a photo of a nice [name]
+a good photo of a [name]
+a photo of the nice [name]
+a photo of the small [name]
+a photo of the weird [name]
+a photo of the large [name]
+a photo of a cool [name]
+a photo of a small [name]
diff --git a/textual_inversion_templates/subject_filewords.txt b/textual_inversion_templates/subject_filewords.txt
new file mode 100644
index 000000000..008652a6b
--- /dev/null
+++ b/textual_inversion_templates/subject_filewords.txt
@@ -0,0 +1,27 @@
+a photo of a [name], [filewords]
+a rendering of a [name], [filewords]
+a cropped photo of the [name], [filewords]
+the photo of a [name], [filewords]
+a photo of a clean [name], [filewords]
+a photo of a dirty [name], [filewords]
+a dark photo of the [name], [filewords]
+a photo of my [name], [filewords]
+a photo of the cool [name], [filewords]
+a close-up photo of a [name], [filewords]
+a bright photo of the [name], [filewords]
+a cropped photo of a [name], [filewords]
+a photo of the [name], [filewords]
+a good photo of the [name], [filewords]
+a photo of one [name], [filewords]
+a close-up photo of the [name], [filewords]
+a rendition of the [name], [filewords]
+a photo of the clean [name], [filewords]
+a rendition of a [name], [filewords]
+a photo of a nice [name], [filewords]
+a good photo of a [name], [filewords]
+a photo of the nice [name], [filewords]
+a photo of the small [name], [filewords]
+a photo of the weird [name], [filewords]
+a photo of the large [name], [filewords]
+a photo of a cool [name], [filewords]
+a photo of a small [name], [filewords]
diff --git a/webui.py b/webui.py
index b8cccd546..19fdcdd4d 100644
--- a/webui.py
+++ b/webui.py
@@ -12,7 +12,6 @@ import modules.bsrgan_model as bsrgan
import modules.extras
import modules.face_restoration
import modules.gfpgan_model as gfpgan
-import modules.img2img
import modules.ldsr_model as ldsr
import modules.lowvram
import modules.realesrgan_model as realesrgan
@@ -21,7 +20,6 @@ import modules.sd_hijack
import modules.sd_models
import modules.shared as shared
import modules.swinir_model as swinir
-import modules.txt2img
import modules.ui
from modules import modelloader
from modules.paths import script_path
@@ -46,7 +44,7 @@ def wrap_queued_call(func):
return f
-def wrap_gradio_gpu_call(func):
+def wrap_gradio_gpu_call(func, extra_outputs=None):
def f(*args, **kwargs):
devices.torch_gc()
@@ -58,6 +56,7 @@ def wrap_gradio_gpu_call(func):
shared.state.current_image = None
shared.state.current_image_sampling_step = 0
shared.state.interrupted = False
+ shared.state.textinfo = None
with queue_lock:
res = func(*args, **kwargs)
@@ -69,7 +68,7 @@ def wrap_gradio_gpu_call(func):
return res
- return modules.ui.wrap_gradio_call(f)
+ return modules.ui.wrap_gradio_call(f, extra_outputs=extra_outputs)
modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
@@ -86,13 +85,7 @@ def webui():
signal.signal(signal.SIGINT, sigint_handler)
- demo = modules.ui.create_ui(
- txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img),
- img2img=wrap_gradio_gpu_call(modules.img2img.img2img),
- run_extras=wrap_gradio_gpu_call(modules.extras.run_extras),
- run_pnginfo=modules.extras.run_pnginfo,
- run_modelmerger=modules.extras.run_modelmerger
- )
+ demo = modules.ui.create_ui(wrap_gradio_gpu_call=wrap_gradio_gpu_call)
demo.launch(
share=cmd_opts.share,
From 0114057ad672a581bd0b598870b58b674b1a3624 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 15:49:42 +0300
Subject: [PATCH 030/460] fix incorrect use of glob in modelloader for #1410
---
modules/modelloader.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/modelloader.py b/modules/modelloader.py
index 8c862b42f..015aeafa3 100644
--- a/modules/modelloader.py
+++ b/modules/modelloader.py
@@ -43,7 +43,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
for place in places:
if os.path.exists(place):
for file in glob.iglob(place + '**/**', recursive=True):
- full_path = os.path.join(place, file)
+ full_path = file
if os.path.isdir(full_path):
continue
if len(ext_filter) != 0:
From 4e72a1aab6d1b3a8d8c09fadc81843a07c05cc18 Mon Sep 17 00:00:00 2001
From: ClashSAN <98228077+ClashSAN@users.noreply.github.com>
Date: Sat, 1 Oct 2022 00:15:43 +0000
Subject: [PATCH 031/460] Grammar Fix
---
README.md | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/README.md b/README.md
index 5ded94f98..15e224e8f 100644
--- a/README.md
+++ b/README.md
@@ -11,12 +11,12 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- One click install and run script (but you still must install python and git)
- Outpainting
- Inpainting
-- Prompt
-- Stable Diffusion upscale
+- Prompt Matrix
+- Stable Diffusion Upscale
- Attention, specify parts of text that the model should pay more attention to
- - a man in a ((txuedo)) - will pay more attentinoto tuxedo
- - a man in a (txuedo:1.21) - alternative syntax
-- Loopback, run img2img procvessing multiple times
+ - a man in a ((tuxedo)) - will pay more attention to tuxedo
+ - a man in a (tuxedo:1.21) - alternative syntax
+- Loopback, run img2img processing multiple times
- X/Y plot, a way to draw a 2 dimensional plot of images with different parameters
- Textual Inversion
- have as many embeddings as you want and use any names you like for them
@@ -35,15 +35,15 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- 4GB video card support (also reports of 2GB working)
- Correct seeds for batches
- Prompt length validation
- - get length of prompt in tokensas you type
- - get a warning after geenration if some text was truncated
+ - get length of prompt in tokens as you type
+ - get a warning after generation if some text was truncated
- Generation parameters
- parameters you used to generate images are saved with that image
- in PNG chunks for PNG, in EXIF for JPEG
- can drag the image to PNG info tab to restore generation parameters and automatically copy them into UI
- can be disabled in settings
- Settings page
-- Running arbitrary python code from UI (must run with commandline flag to enable)
+- Running arbitrary python code from UI (must run with --allow-code to enable)
- Mouseover hints for most UI elements
- Possible to change defaults/mix/max/step values for UI elements via text config
- Random artist button
From 0758f6e641b5790ce566a998d43e0ea74a627766 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 17:24:50 +0300
Subject: [PATCH 032/460] fix --ckpt option breaking model selection
---
modules/sd_models.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 5b3dbdc79..9259d69e7 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -69,7 +69,7 @@ def list_models():
h = model_hash(cmd_ckpt)
title, short_model_name = modeltitle(cmd_ckpt, h)
checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name)
- shared.opts.sd_model_checkpoint = title
+ shared.opts.data['sd_model_checkpoint'] = title
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
for filename in model_list:
From 53a3dc601fb734ce433505b1ca68770919106bad Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 18:21:56 +0300
Subject: [PATCH 033/460] move CLIP out of requirements and into launcher to
make it possible to launch the program offline
---
launch.py | 4 ++++
requirements.txt | 2 --
requirements_versions.txt | 1 -
3 files changed, 4 insertions(+), 3 deletions(-)
diff --git a/launch.py b/launch.py
index d2793ed20..57405feab 100644
--- a/launch.py
+++ b/launch.py
@@ -15,6 +15,7 @@ requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
+clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
@@ -111,6 +112,9 @@ if not skip_torch_cuda_test:
if not is_installed("gfpgan"):
run_pip(f"install {gfpgan_package}", "gfpgan")
+if not is_installed("clip"):
+ run_pip(f"install {clip_package}", "clip")
+
os.makedirs(dir_repos, exist_ok=True)
git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
diff --git a/requirements.txt b/requirements.txt
index 7cb9d3293..d4b337fce 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -13,14 +13,12 @@ Pillow
pytorch_lightning
realesrgan
scikit-image>=0.19
-git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379
timm==0.4.12
transformers==4.19.2
torch
einops
jsonmerge
clean-fid
-git+https://github.com/openai/CLIP@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1
resize-right
torchdiffeq
kornia
diff --git a/requirements_versions.txt b/requirements_versions.txt
index 1e8006e05..8a9acf205 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -18,7 +18,6 @@ piexif==1.1.3
einops==0.4.1
jsonmerge==1.8.0
clean-fid==0.1.29
-git+https://github.com/openai/CLIP@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1
resize-right==0.0.2
torchdiffeq==0.2.3
kornia==0.6.7
From 88ec0cf5571883d84abd09196652b3679e359f2e Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 19:40:51 +0300
Subject: [PATCH 034/460] fix for incorrect embedding token length calculation
(will break seeds that use embeddings, you're welcome!) add option to input
initialization text for embeddings
---
modules/sd_hijack.py | 8 ++++----
modules/textual_inversion/textual_inversion.py | 13 +++++--------
modules/textual_inversion/ui.py | 4 ++--
modules/ui.py | 2 ++
4 files changed, 13 insertions(+), 14 deletions(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index fd57e5c54..3fa062422 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -130,7 +130,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
while i < len(tokens):
token = tokens[i]
- embedding = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
+ embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
if embedding is None:
remade_tokens.append(token)
@@ -142,7 +142,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
remade_tokens += [0] * emb_len
multipliers += [weight] * emb_len
used_custom_terms.append((embedding.name, embedding.checksum()))
- i += emb_len
+ i += embedding_length_in_tokens
if len(remade_tokens) > maxlen - 2:
vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
@@ -213,7 +213,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
while i < len(tokens):
token = tokens[i]
- embedding = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
+ embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i)
mult_change = self.token_mults.get(token) if opts.enable_emphasis else None
if mult_change is not None:
@@ -229,7 +229,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
remade_tokens += [0] * emb_len
multipliers += [mult] * emb_len
used_custom_terms.append((embedding.name, embedding.checksum()))
- i += emb_len
+ i += embedding_length_in_tokens
if len(remade_tokens) > maxlen - 2:
vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index c0baaace2..0c50161db 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -117,24 +117,21 @@ class EmbeddingDatabase:
possible_matches = self.ids_lookup.get(token, None)
if possible_matches is None:
- return None
+ return None, None
for ids, embedding in possible_matches:
if tokens[offset:offset + len(ids)] == ids:
- return embedding
+ return embedding, len(ids)
- return None
+ return None, None
-
-def create_embedding(name, num_vectors_per_token):
- init_text = '*'
-
+def create_embedding(name, num_vectors_per_token, init_text='*'):
cond_model = shared.sd_model.cond_stage_model
embedding_layer = cond_model.wrapped.transformer.text_model.embeddings
ids = cond_model.tokenizer(init_text, max_length=num_vectors_per_token, return_tensors="pt", add_special_tokens=False)["input_ids"]
- embedded = embedding_layer(ids.to(devices.device)).squeeze(0)
+ embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0)
vec = torch.zeros((num_vectors_per_token, embedded.shape[1]), device=devices.device)
for i in range(num_vectors_per_token):
diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py
index ce3677a98..66c43ffbe 100644
--- a/modules/textual_inversion/ui.py
+++ b/modules/textual_inversion/ui.py
@@ -6,8 +6,8 @@ import modules.textual_inversion.textual_inversion as ti
from modules import sd_hijack, shared
-def create_embedding(name, nvpt):
- filename = ti.create_embedding(name, nvpt)
+def create_embedding(name, initialization_text, nvpt):
+ filename = ti.create_embedding(name, nvpt, init_text=initialization_text)
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings()
diff --git a/modules/ui.py b/modules/ui.py
index 3b81a4f74..eca50df0f 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -954,6 +954,7 @@ def create_ui(wrap_gradio_gpu_call):
gr.HTML(value="Create a new embedding
")
new_embedding_name = gr.Textbox(label="Name")
+ initialization_text = gr.Textbox(label="Initialization text", value="*")
nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1)
with gr.Row():
@@ -997,6 +998,7 @@ def create_ui(wrap_gradio_gpu_call):
fn=modules.textual_inversion.ui.create_embedding,
inputs=[
new_embedding_name,
+ initialization_text,
nvpt,
],
outputs=[
From 71fe7fa49f5eb1a2c89932a9d217ed153c12fc8b Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 19:56:37 +0300
Subject: [PATCH 035/460] fix using aaaa-100 embedding when the prompt has
aaaa-10000 and you have both aaaa-100 and aaaa-10000 in the directory with
embeddings.
---
modules/textual_inversion/textual_inversion.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 0c50161db..9d2241cef 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -57,7 +57,8 @@ class EmbeddingDatabase:
first_id = ids[0]
if first_id not in self.ids_lookup:
self.ids_lookup[first_id] = []
- self.ids_lookup[first_id].append((ids, embedding))
+
+ self.ids_lookup[first_id] = sorted(self.ids_lookup[first_id] + [(ids, embedding)], key=lambda x: len(x[0]), reverse=True)
return embedding
From 4ec4af6e0b7addeee5221a03f32d117ccdc875d9 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 20:15:25 +0300
Subject: [PATCH 036/460] add checkpoint info to saved embeddings
---
modules/textual_inversion/textual_inversion.py | 13 ++++++++++++-
1 file changed, 12 insertions(+), 1 deletion(-)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 9d2241cef..1183aab76 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -7,7 +7,7 @@ import tqdm
import html
import datetime
-from modules import shared, devices, sd_hijack, processing
+from modules import shared, devices, sd_hijack, processing, sd_models
import modules.textual_inversion.dataset
@@ -17,6 +17,8 @@ class Embedding:
self.name = name
self.step = step
self.cached_checksum = None
+ self.sd_checkpoint = None
+ self.sd_checkpoint_name = None
def save(self, filename):
embedding_data = {
@@ -24,6 +26,8 @@ class Embedding:
"string_to_param": {"*": self.vec},
"name": self.name,
"step": self.step,
+ "sd_checkpoint": self.sd_checkpoint,
+ "sd_checkpoint_name": self.sd_checkpoint_name,
}
torch.save(embedding_data, filename)
@@ -41,6 +45,7 @@ class Embedding:
self.cached_checksum = f'{const_hash(self.vec.reshape(-1) * 100) & 0xffff:04x}'
return self.cached_checksum
+
class EmbeddingDatabase:
def __init__(self, embeddings_dir):
self.ids_lookup = {}
@@ -96,6 +101,8 @@ class EmbeddingDatabase:
vec = emb.detach().to(devices.device, dtype=torch.float32)
embedding = Embedding(vec, name)
embedding.step = data.get('step', None)
+ embedding.sd_checkpoint = data.get('hash', None)
+ embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None)
self.register_embedding(embedding, shared.sd_model)
for fn in os.listdir(self.embeddings_dir):
@@ -249,6 +256,10 @@ Last saved image: {html.escape(last_saved_image)}
"""
+ checkpoint = sd_models.select_checkpoint()
+
+ embedding.sd_checkpoint = checkpoint.hash
+ embedding.sd_checkpoint_name = checkpoint.model_name
embedding.cached_checksum = None
embedding.save(filename)
From 3ff0de2c594b786ef948a89efb1814c59bb42117 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 20:23:40 +0300
Subject: [PATCH 037/460] added --disable-console-progressbars to disable
progressbars in console disabled printing prompts to console by default,
enabled by --enable-console-prompts
---
modules/img2img.py | 4 +++-
modules/sd_samplers.py | 8 ++++++--
modules/shared.py | 7 +++++--
modules/txt2img.py | 4 +++-
4 files changed, 17 insertions(+), 6 deletions(-)
diff --git a/modules/img2img.py b/modules/img2img.py
index 03e934e96..f4455c90f 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -103,7 +103,9 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro
inpaint_full_res_padding=inpaint_full_res_padding,
inpainting_mask_invert=inpainting_mask_invert,
)
- print(f"\nimg2img: {prompt}", file=shared.progress_print_out)
+
+ if shared.cmd_opts.enable_console_prompts:
+ print(f"\nimg2img: {prompt}", file=shared.progress_print_out)
p.extra_generation_params["Mask blur"] = mask_blur
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 925222148..9316875ab 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -77,7 +77,9 @@ def extended_tdqm(sequence, *args, desc=None, **kwargs):
state.sampling_steps = len(sequence)
state.sampling_step = 0
- for x in tqdm.tqdm(sequence, *args, desc=state.job, file=shared.progress_print_out, **kwargs):
+ seq = sequence if cmd_opts.disable_console_progressbars else tqdm.tqdm(sequence, *args, desc=state.job, file=shared.progress_print_out, **kwargs)
+
+ for x in seq:
if state.interrupted:
break
@@ -207,7 +209,9 @@ def extended_trange(sampler, count, *args, **kwargs):
state.sampling_steps = count
state.sampling_step = 0
- for x in tqdm.trange(count, *args, desc=state.job, file=shared.progress_print_out, **kwargs):
+ seq = range(count) if cmd_opts.disable_console_progressbars else tqdm.trange(count, *args, desc=state.job, file=shared.progress_print_out, **kwargs)
+
+ for x in seq:
if state.interrupted:
break
diff --git a/modules/shared.py b/modules/shared.py
index 5a591dc99..1bf7a6c14 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -58,6 +58,9 @@ parser.add_argument("--opt-channelslast", action='store_true', help="change memo
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
+parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
+parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
+
cmd_opts = parser.parse_args()
device = get_optimal_device()
@@ -320,14 +323,14 @@ class TotalTQDM:
)
def update(self):
- if not opts.multiple_tqdm:
+ if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
return
if self._tqdm is None:
self.reset()
self._tqdm.update()
def updateTotal(self, new_total):
- if not opts.multiple_tqdm:
+ if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars:
return
if self._tqdm is None:
self.reset()
diff --git a/modules/txt2img.py b/modules/txt2img.py
index 5368e4d00..d4406c3c0 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -34,7 +34,9 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2:
denoising_strength=denoising_strength if enable_hr else None,
)
- print(f"\ntxt2img: {prompt}", file=shared.progress_print_out)
+ if cmd_opts.enable_console_prompts:
+ print(f"\ntxt2img: {prompt}", file=shared.progress_print_out)
+
processed = modules.scripts.scripts_txt2img.run(p, *args)
if processed is None:
From 6365a41f5981efa506dfe4e8fa878b43ca2d8d0c Mon Sep 17 00:00:00 2001
From: d8ahazard
Date: Sun, 2 Oct 2022 12:58:17 -0500
Subject: [PATCH 038/460] Update esrgan_model.py
Use alternate ESRGAN Model download path.
---
modules/esrgan_model.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
index ea91abfe8..4aed9283c 100644
--- a/modules/esrgan_model.py
+++ b/modules/esrgan_model.py
@@ -73,8 +73,8 @@ def fix_model_layers(crt_model, pretrained_net):
class UpscalerESRGAN(Upscaler):
def __init__(self, dirname):
self.name = "ESRGAN"
- self.model_url = "https://drive.google.com/u/0/uc?id=1TPrz5QKd8DHHt1k8SRtm6tMiPjz_Qene&export=download"
- self.model_name = "ESRGAN 4x"
+ self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/ESRGAN.pth"
+ self.model_name = "ESRGAN_4x"
self.scalers = []
self.user_path = dirname
self.model_path = os.path.join(models_path, self.name)
From a1cde7e6468f80584030525a1b07cbf0f4ee42eb Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 21:09:10 +0300
Subject: [PATCH 039/460] disabled SD model download after multiple complaints
---
modules/sd_models.py | 18 ++++++++----------
modules/textual_inversion/ui.py | 2 +-
webui.py | 2 +-
3 files changed, 10 insertions(+), 12 deletions(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 9259d69e7..9a6b568f0 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -13,9 +13,6 @@ from modules.paths import models_path
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
-model_name = "sd-v1-4.ckpt"
-model_url = "https://drive.yerf.org/wl/?id=EBfTrmcCCUAGaQBXVIj5lJmEhjoP1tgl&mode=grid&download=1"
-user_dir = None
CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name'])
checkpoints_list = {}
@@ -30,12 +27,10 @@ except Exception:
pass
-def setup_model(dirname):
- global user_dir
- user_dir = dirname
+def setup_model():
if not os.path.exists(model_path):
os.makedirs(model_path)
- checkpoints_list.clear()
+
list_models()
@@ -45,7 +40,7 @@ def checkpoint_tiles():
def list_models():
checkpoints_list.clear()
- model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=user_dir, ext_filter=[".ckpt"], download_name=model_name)
+ model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt"])
def modeltitle(path, shorthash):
abspath = os.path.abspath(path)
@@ -106,8 +101,11 @@ def select_checkpoint():
if len(checkpoints_list) == 0:
print(f"No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr)
- print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
- print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
+ if shared.cmd_opts.ckpt is not None:
+ print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr)
+ print(f" - directory {model_path}", file=sys.stderr)
+ if shared.cmd_opts.ckpt_dir is not None:
+ print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr)
print(f"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr)
exit(1)
diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py
index 66c43ffbe..633037d8e 100644
--- a/modules/textual_inversion/ui.py
+++ b/modules/textual_inversion/ui.py
@@ -22,7 +22,7 @@ def train_embedding(*args):
embedding, filename = ti.train_embedding(*args)
res = f"""
-Training {'interrupted' if shared.state.interrupted else 'finished'} after {embedding.step} steps.
+Training {'interrupted' if shared.state.interrupted else 'finished'} at {embedding.step} steps.
Embedding saved to {html.escape(filename)}
"""
return res, ""
diff --git a/webui.py b/webui.py
index 424ab9751..dc72ceb8a 100644
--- a/webui.py
+++ b/webui.py
@@ -23,7 +23,7 @@ from modules.paths import script_path
from modules.shared import cmd_opts
modelloader.cleanup_models()
-modules.sd_models.setup_model(cmd_opts.ckpt_dir)
+modules.sd_models.setup_model()
codeformer.setup_model(cmd_opts.codeformer_models_path)
gfpgan.setup_model(cmd_opts.gfpgan_models_path)
shared.face_restorers.append(modules.face_restoration.FaceRestoration())
From 852fd90c0dcda9cb5fbbfdf0c7308ce58034935c Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 21:22:20 +0300
Subject: [PATCH 040/460] emergency fix for disabling SD model download after
multiple complaints
---
modules/sd_models.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 9a6b568f0..5f9920647 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -45,8 +45,8 @@ def list_models():
def modeltitle(path, shorthash):
abspath = os.path.abspath(path)
- if user_dir is not None and abspath.startswith(user_dir):
- name = abspath.replace(user_dir, '')
+ if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
+ name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
elif abspath.startswith(model_path):
name = abspath.replace(model_path, '')
else:
From e808096cf641d868f88465515d70d40fc46125d4 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sun, 2 Oct 2022 19:26:06 +0100
Subject: [PATCH 041/460] correct indent
---
modules/scripts.py | 48 ++++++++++++++++++++++++----------------------
modules/ui.py | 23 +++++++++++-----------
2 files changed, 36 insertions(+), 35 deletions(-)
diff --git a/modules/scripts.py b/modules/scripts.py
index 788397f53..45230f9a1 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -163,37 +163,39 @@ class ScriptRunner:
return processed
def reload_sources(self):
- for si,script in list(enumerate(self.scripts)):
- with open(script.filename, "r", encoding="utf8") as file:
- args_from = script.args_from
- args_to = script.args_to
- filename = script.filename
- text = file.read()
+ for si, script in list(enumerate(self.scripts)):
+ with open(script.filename, "r", encoding="utf8") as file:
+ args_from = script.args_from
+ args_to = script.args_to
+ filename = script.filename
+ text = file.read()
- from types import ModuleType
- compiled = compile(text, filename, 'exec')
- module = ModuleType(script.filename)
- exec(compiled, module.__dict__)
+ from types import ModuleType
- for key, script_class in module.__dict__.items():
- if type(script_class) == type and issubclass(script_class, Script):
- self.scripts[si] = script_class()
- self.scripts[si].filename = filename
- self.scripts[si].args_from = args_from
- self.scripts[si].args_to = args_to
+ compiled = compile(text, filename, 'exec')
+ module = ModuleType(script.filename)
+ exec(compiled, module.__dict__)
+
+ for key, script_class in module.__dict__.items():
+ if type(script_class) == type and issubclass(script_class, Script):
+ self.scripts[si] = script_class()
+ self.scripts[si].filename = filename
+ self.scripts[si].args_from = args_from
+ self.scripts[si].args_to = args_to
scripts_txt2img = ScriptRunner()
scripts_img2img = ScriptRunner()
def reload_script_body_only():
- scripts_txt2img.reload_sources()
- scripts_img2img.reload_sources()
+ scripts_txt2img.reload_sources()
+ scripts_img2img.reload_sources()
+
def reload_scripts(basedir):
- global scripts_txt2img,scripts_img2img
+ global scripts_txt2img, scripts_img2img
- scripts_data.clear()
- load_scripts(basedir)
+ scripts_data.clear()
+ load_scripts(basedir)
- scripts_txt2img = ScriptRunner()
- scripts_img2img = ScriptRunner()
+ scripts_txt2img = ScriptRunner()
+ scripts_img2img = ScriptRunner()
diff --git a/modules/ui.py b/modules/ui.py
index 963a2c611..6b30f84ba 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1003,12 +1003,12 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
)
with gr.Row():
- reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary')
- restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary')
+ reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary')
+ restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary')
def reload_scripts():
- modules.scripts.reload_script_body_only()
+ modules.scripts.reload_script_body_only()
reload_script_bodies.click(
fn=reload_scripts,
@@ -1018,7 +1018,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger):
)
def request_restart():
- settings_interface.gradio_ref.do_restart = True
+ settings_interface.gradio_ref.do_restart = True
restart_gradio.click(
fn=request_restart,
@@ -1234,12 +1234,11 @@ for filename in sorted(os.listdir(jsdir)):
if 'gradio_routes_templates_response' not in globals():
- def template_response(*args, **kwargs):
- res = gradio_routes_templates_response(*args, **kwargs)
- res.body = res.body.replace(b'', f'{javascript}'.encode("utf8"))
- res.init_headers()
- return res
-
- gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
- gradio.routes.templates.TemplateResponse = template_response
+ def template_response(*args, **kwargs):
+ res = gradio_routes_templates_response(*args, **kwargs)
+ res.body = res.body.replace(b'', f'{javascript}'.encode("utf8"))
+ res.init_headers()
+ return res
+ gradio_routes_templates_response = gradio.routes.templates.TemplateResponse
+ gradio.routes.templates.TemplateResponse = template_response
From a634c3226fd69486ce96df56f95f3fd63172305c Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sun, 2 Oct 2022 19:26:38 +0100
Subject: [PATCH 042/460] correct indent
---
webui.py | 56 ++++++++++++++++++++++++++++----------------------------
1 file changed, 28 insertions(+), 28 deletions(-)
diff --git a/webui.py b/webui.py
index ab200045a..140040ca1 100644
--- a/webui.py
+++ b/webui.py
@@ -89,38 +89,38 @@ def webui():
while 1:
- demo = modules.ui.create_ui(
- txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img),
- img2img=wrap_gradio_gpu_call(modules.img2img.img2img),
- run_extras=wrap_gradio_gpu_call(modules.extras.run_extras),
- run_pnginfo=modules.extras.run_pnginfo,
- run_modelmerger=modules.extras.run_modelmerger
- )
+ demo = modules.ui.create_ui(
+ txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img),
+ img2img=wrap_gradio_gpu_call(modules.img2img.img2img),
+ run_extras=wrap_gradio_gpu_call(modules.extras.run_extras),
+ run_pnginfo=modules.extras.run_pnginfo,
+ run_modelmerger=modules.extras.run_modelmerger
+ )
- demo.launch(
- share=cmd_opts.share,
- server_name="0.0.0.0" if cmd_opts.listen else None,
- server_port=cmd_opts.port,
- debug=cmd_opts.gradio_debug,
- auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
- inbrowser=cmd_opts.autolaunch,
- prevent_thread_lock=True
- )
+ demo.launch(
+ share=cmd_opts.share,
+ server_name="0.0.0.0" if cmd_opts.listen else None,
+ server_port=cmd_opts.port,
+ debug=cmd_opts.gradio_debug,
+ auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None,
+ inbrowser=cmd_opts.autolaunch,
+ prevent_thread_lock=True
+ )
- while 1:
- time.sleep(0.5)
- if getattr(demo,'do_restart',False):
- time.sleep(0.5)
- demo.close()
- time.sleep(0.5)
- break
+ while 1:
+ time.sleep(0.5)
+ if getattr(demo,'do_restart',False):
+ time.sleep(0.5)
+ demo.close()
+ time.sleep(0.5)
+ break
- print('Reloading Custom Scripts')
- modules.scripts.reload_scripts(os.path.join(script_path, "scripts"))
- print('Reloading modules: modules.ui')
- importlib.reload(modules.ui)
- print('Restarting Gradio')
+ print('Reloading Custom Scripts')
+ modules.scripts.reload_scripts(os.path.join(script_path, "scripts"))
+ print('Reloading modules: modules.ui')
+ importlib.reload(modules.ui)
+ print('Restarting Gradio')
if __name__ == "__main__":
From c0389eb3071870240bc158263e5dfb4351ec8eba Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 21:35:29 +0300
Subject: [PATCH 043/460] hello
---
webui.py | 10 +++++-----
1 file changed, 5 insertions(+), 5 deletions(-)
diff --git a/webui.py b/webui.py
index 634956978..47848ba58 100644
--- a/webui.py
+++ b/webui.py
@@ -103,11 +103,11 @@ def webui():
while 1:
time.sleep(0.5)
- if getattr(demo,'do_restart',False):
- time.sleep(0.5)
- demo.close()
- time.sleep(0.5)
- break
+ if getattr(demo, 'do_restart', False):
+ time.sleep(0.5)
+ demo.close()
+ time.sleep(0.5)
+ break
print('Reloading Custom Scripts')
modules.scripts.reload_scripts(os.path.join(script_path, "scripts"))
From 2ef69df9a7c7b6793401f29ced71fb8a781fad4c Mon Sep 17 00:00:00 2001
From: Jocke
Date: Sun, 2 Oct 2022 16:10:41 +0200
Subject: [PATCH 044/460] Prevent upscaling when None is selected for SD
upscale
---
scripts/sd_upscale.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py
index 2653e2d40..cb37ff7e8 100644
--- a/scripts/sd_upscale.py
+++ b/scripts/sd_upscale.py
@@ -34,7 +34,11 @@ class Script(scripts.Script):
seed = p.seed
init_img = p.init_images[0]
- img = upscaler.scaler.upscale(init_img, 2, upscaler.data_path)
+
+ if(upscaler.name != "None"):
+ img = upscaler.scaler.upscale(init_img, 2, upscaler.data_path)
+ else:
+ img = init_img
devices.torch_gc()
From 91f327f22bb2feb780c424c74723cc0629dc34a1 Mon Sep 17 00:00:00 2001
From: Lopyter
Date: Sun, 2 Oct 2022 18:15:31 +0200
Subject: [PATCH 045/460] make save to dirs optional for imgs saved from ui
---
modules/shared.py | 1 +
modules/ui.py | 2 +-
2 files changed, 2 insertions(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index 1bf7a6c14..785e7af6f 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -173,6 +173,7 @@ options_templates.update(options_section(('saving-to-dirs', "Saving to a directo
"grid_save_to_dirs": OptionInfo(False, "Save grids to subdirectory"),
"directories_filename_pattern": OptionInfo("", "Directory name pattern"),
"directories_max_prompt_words": OptionInfo(8, "Max prompt words", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}),
+ "use_save_to_dirs_for_ui": OptionInfo(False, "Use \"Save images to a subdirectory\" option for images saved from UI"),
}))
options_templates.update(options_section(('upscaling', "Upscaling"), {
diff --git a/modules/ui.py b/modules/ui.py
index 78a15d83a..8912deff4 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -113,7 +113,7 @@ def save_files(js_data, images, index):
p = MyObject(data)
path = opts.outdir_save
- save_to_dirs = opts.save_to_dirs
+ save_to_dirs = opts.use_save_to_dirs_for_ui
if save_to_dirs:
dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, p.seed, p.prompt)
From c4445225f79f1c57afe52358ff4b205864eb7aac Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 21:50:14 +0300
Subject: [PATCH 046/460] change wording for options
---
modules/shared.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/modules/shared.py b/modules/shared.py
index 785e7af6f..7246eadc6 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -170,10 +170,10 @@ options_templates.update(options_section(('saving-paths', "Paths for saving"), {
options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), {
"save_to_dirs": OptionInfo(False, "Save images to a subdirectory"),
- "grid_save_to_dirs": OptionInfo(False, "Save grids to subdirectory"),
+ "grid_save_to_dirs": OptionInfo(False, "Save grids to a subdirectory"),
+ "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"),
"directories_filename_pattern": OptionInfo("", "Directory name pattern"),
- "directories_max_prompt_words": OptionInfo(8, "Max prompt words", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}),
- "use_save_to_dirs_for_ui": OptionInfo(False, "Use \"Save images to a subdirectory\" option for images saved from UI"),
+ "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}),
}))
options_templates.update(options_section(('upscaling', "Upscaling"), {
From c7543d4940da672d970124ae8f2fec9de7bdc1da Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 22:41:21 +0300
Subject: [PATCH 047/460] preprocessing for textual inversion added
---
modules/interrogate.py | 1 +
modules/textual_inversion/preprocess.py | 75 +++++++++++++++++++
.../textual_inversion/textual_inversion.py | 1 +
modules/textual_inversion/ui.py | 14 +++-
modules/ui.py | 36 +++++++++
5 files changed, 124 insertions(+), 3 deletions(-)
create mode 100644 modules/textual_inversion/preprocess.py
diff --git a/modules/interrogate.py b/modules/interrogate.py
index f62a47458..eed87144f 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -21,6 +21,7 @@ Category = namedtuple("Category", ["name", "topn", "items"])
re_topn = re.compile(r"\.top(\d+)\.")
+
class InterrogateModels:
blip_model = None
clip_model = None
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py
new file mode 100644
index 000000000..209e928ff
--- /dev/null
+++ b/modules/textual_inversion/preprocess.py
@@ -0,0 +1,75 @@
+import os
+from PIL import Image, ImageOps
+import tqdm
+
+from modules import shared, images
+
+
+def preprocess(process_src, process_dst, process_flip, process_split, process_caption):
+ size = 512
+ src = os.path.abspath(process_src)
+ dst = os.path.abspath(process_dst)
+
+ assert src != dst, 'same directory specified as source and desitnation'
+
+ os.makedirs(dst, exist_ok=True)
+
+ files = os.listdir(src)
+
+ shared.state.textinfo = "Preprocessing..."
+ shared.state.job_count = len(files)
+
+ if process_caption:
+ shared.interrogator.load()
+
+ def save_pic_with_caption(image, index):
+ if process_caption:
+ caption = "-" + shared.interrogator.generate_caption(image)
+ else:
+ caption = ""
+
+ image.save(os.path.join(dst, f"{index:05}-{subindex[0]}{caption}.png"))
+ subindex[0] += 1
+
+ def save_pic(image, index):
+ save_pic_with_caption(image, index)
+
+ if process_flip:
+ save_pic_with_caption(ImageOps.mirror(image), index)
+
+ for index, imagefile in enumerate(tqdm.tqdm(files)):
+ subindex = [0]
+ filename = os.path.join(src, imagefile)
+ img = Image.open(filename).convert("RGB")
+
+ if shared.state.interrupted:
+ break
+
+ ratio = img.height / img.width
+ is_tall = ratio > 1.35
+ is_wide = ratio < 1 / 1.35
+
+ if process_split and is_tall:
+ img = img.resize((size, size * img.height // img.width))
+
+ top = img.crop((0, 0, size, size))
+ save_pic(top, index)
+
+ bot = img.crop((0, img.height - size, size, img.height))
+ save_pic(bot, index)
+ elif process_split and is_wide:
+ img = img.resize((size * img.width // img.height, size))
+
+ left = img.crop((0, 0, size, size))
+ save_pic(left, index)
+
+ right = img.crop((img.width - size, 0, img.width, size))
+ save_pic(right, index)
+ else:
+ img = images.resize_image(1, img, size, size)
+ save_pic(img, index)
+
+ shared.state.nextjob()
+
+ if process_caption:
+ shared.interrogator.send_blip_to_ram()
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 1183aab76..d4e250d87 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -7,6 +7,7 @@ import tqdm
import html
import datetime
+
from modules import shared, devices, sd_hijack, processing, sd_models
import modules.textual_inversion.dataset
diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py
index 633037d8e..f19ac5e02 100644
--- a/modules/textual_inversion/ui.py
+++ b/modules/textual_inversion/ui.py
@@ -2,24 +2,31 @@ import html
import gradio as gr
-import modules.textual_inversion.textual_inversion as ti
+import modules.textual_inversion.textual_inversion
+import modules.textual_inversion.preprocess
from modules import sd_hijack, shared
def create_embedding(name, initialization_text, nvpt):
- filename = ti.create_embedding(name, nvpt, init_text=initialization_text)
+ filename = modules.textual_inversion.textual_inversion.create_embedding(name, nvpt, init_text=initialization_text)
sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings()
return gr.Dropdown.update(choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())), f"Created: {filename}", ""
+def preprocess(*args):
+ modules.textual_inversion.preprocess.preprocess(*args)
+
+ return "Preprocessing finished.", ""
+
+
def train_embedding(*args):
try:
sd_hijack.undo_optimizations()
- embedding, filename = ti.train_embedding(*args)
+ embedding, filename = modules.textual_inversion.textual_inversion.train_embedding(*args)
res = f"""
Training {'interrupted' if shared.state.interrupted else 'finished'} at {embedding.step} steps.
@@ -30,3 +37,4 @@ Embedding saved to {html.escape(filename)}
raise
finally:
sd_hijack.apply_optimizations()
+
diff --git a/modules/ui.py b/modules/ui.py
index 8912deff4..e7bde53bf 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -961,6 +961,8 @@ def create_ui(wrap_gradio_gpu_call):
with gr.Row().style(equal_height=False):
with gr.Column():
with gr.Group():
+ gr.HTML(value="See wiki for detailed explanation.
")
+
gr.HTML(value="Create a new embedding
")
new_embedding_name = gr.Textbox(label="Name")
@@ -974,6 +976,24 @@ def create_ui(wrap_gradio_gpu_call):
with gr.Column():
create_embedding = gr.Button(value="Create", variant='primary')
+ with gr.Group():
+ gr.HTML(value="Preprocess images
")
+
+ process_src = gr.Textbox(label='Source directory')
+ process_dst = gr.Textbox(label='Destination directory')
+
+ with gr.Row():
+ process_flip = gr.Checkbox(label='Flip')
+ process_split = gr.Checkbox(label='Split into two')
+ process_caption = gr.Checkbox(label='Add caption')
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ gr.HTML(value="")
+
+ with gr.Column():
+ run_preprocess = gr.Button(value="Preprocess", variant='primary')
+
with gr.Group():
gr.HTML(value="Train an embedding; must specify a directory with a set of 512x512 images
")
train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
@@ -1018,6 +1038,22 @@ def create_ui(wrap_gradio_gpu_call):
]
)
+ run_preprocess.click(
+ fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]),
+ _js="start_training_textual_inversion",
+ inputs=[
+ process_src,
+ process_dst,
+ process_flip,
+ process_split,
+ process_caption,
+ ],
+ outputs=[
+ ti_output,
+ ti_outcome,
+ ],
+ )
+
train_embedding.click(
fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
From 6785331e22d6a488fbf5905fab56d7fec867e038 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 22:59:01 +0300
Subject: [PATCH 048/460] keep textual inversion dataset latents in CPU memory
to save a bit of VRAM
---
modules/textual_inversion/dataset.py | 2 ++
modules/textual_inversion/textual_inversion.py | 3 +++
modules/ui.py | 4 ++--
3 files changed, 7 insertions(+), 2 deletions(-)
diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py
index 7e134a08f..e8394ff65 100644
--- a/modules/textual_inversion/dataset.py
+++ b/modules/textual_inversion/dataset.py
@@ -8,6 +8,7 @@ from torchvision import transforms
import random
import tqdm
+from modules import devices
class PersonalizedBase(Dataset):
@@ -47,6 +48,7 @@ class PersonalizedBase(Dataset):
torchdata = torch.moveaxis(torchdata, 2, 0)
init_latent = model.get_first_stage_encoding(model.encode_first_stage(torchdata.unsqueeze(dim=0))).squeeze()
+ init_latent = init_latent.to(devices.cpu)
self.dataset.append((init_latent, filename_tokens))
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index d4e250d87..8686f5347 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -212,7 +212,10 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps,
with torch.autocast("cuda"):
c = cond_model([text])
+
+ x = x.to(devices.device)
loss = shared.sd_model(x.unsqueeze(0), c)[0]
+ del x
losses[embedding.step % losses.shape[0]] = loss.item()
diff --git a/modules/ui.py b/modules/ui.py
index e7bde53bf..d9d02ecef 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1002,8 +1002,8 @@ def create_ui(wrap_gradio_gpu_call):
log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt"))
steps = gr.Number(label='Max steps', value=100000, precision=0)
- create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=1000, precision=0)
- save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=1000, precision=0)
+ create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0)
+ save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0)
with gr.Row():
with gr.Column(scale=2):
From 166283653cfe7521a422c91e8fb801f3ecb4adc8 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 23:18:13 +0300
Subject: [PATCH 049/460] remove LDSR warning
---
modules/paths.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/modules/paths.py b/modules/paths.py
index ceb804171..606f7d666 100644
--- a/modules/paths.py
+++ b/modules/paths.py
@@ -20,7 +20,6 @@ path_dirs = [
(os.path.join(sd_path, '../taming-transformers'), 'taming', 'Taming Transformers', []),
(os.path.join(sd_path, '../CodeFormer'), 'inference_codeformer.py', 'CodeFormer', []),
(os.path.join(sd_path, '../BLIP'), 'models/blip.py', 'BLIP', []),
- (os.path.join(sd_path, '../latent-diffusion'), 'LDSR.py', 'LDSR', []),
(os.path.join(sd_path, '../k-diffusion'), 'k_diffusion/sampling.py', 'k_diffusion', ["atstart"]),
]
From 4c2eccf8e96825333ed400f8a8a2be78141ed8ec Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 2 Oct 2022 23:22:48 +0300
Subject: [PATCH 050/460] credit Rinon Gal
---
README.md | 1 +
1 file changed, 1 insertion(+)
diff --git a/README.md b/README.md
index 15e224e8f..ec3d7532d 100644
--- a/README.md
+++ b/README.md
@@ -113,6 +113,7 @@ The documentation was moved from this README over to the project's [wiki](https:
- LDSR - https://github.com/Hafiidz/latent-diffusion
- Ideas for optimizations - https://github.com/basujindal/stable-diffusion
- Doggettx - Cross Attention layer optimization - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing.
+- Rinon Gal - Textual Inversion - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas).
- Idea for SD upscale - https://github.com/jquesnelle/txt2imghd
- Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot
- CLIP interrogator idea and borrowing some code - https://github.com/pharmapsychotic/clip-interrogator
From 138662734c25dab4e73e632b7eaff9ad9c0ce2b4 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Mon, 3 Oct 2022 07:57:59 +0300
Subject: [PATCH 051/460] use dropdown instead of radio for img2img upscaler
selection
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index 7246eadc6..2a599e9cf 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -183,7 +183,7 @@ options_templates.update(options_section(('upscaling', "Upscaling"), {
"SWIN_tile": OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}),
"SWIN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}),
"ldsr_steps": OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}),
- "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Radio, lambda: {"choices": [x.name for x in sd_upscalers]}),
+ "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}),
}))
options_templates.update(options_section(('face-restoration', "Face restoration"), {
From e615d4f9d101e2712c7c2d0e3e8feb19cb430c74 Mon Sep 17 00:00:00 2001
From: Hanusz Leszek
Date: Sun, 2 Oct 2022 21:08:23 +0200
Subject: [PATCH 052/460] Convert folder icon surrogate pair to valid utf8
---
javascript/hints.js | 2 +-
modules/ui.py | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/javascript/hints.js b/javascript/hints.js
index 84694eeb3..e72e93381 100644
--- a/javascript/hints.js
+++ b/javascript/hints.js
@@ -15,7 +15,7 @@ titles = {
"\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomed",
"\u{1f3a8}": "Add a random artist to the prompt.",
"\u2199\ufe0f": "Read generation parameters from prompt into user interface.",
- "\uD83D\uDCC2": "Open images output directory",
+ "\u{1f4c2}": "Open images output directory",
"Inpaint a part of image": "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt",
"SD upscale": "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back",
diff --git a/modules/ui.py b/modules/ui.py
index d9d02ecef..164321512 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -69,7 +69,7 @@ random_symbol = '\U0001f3b2\ufe0f' # 🎲️
reuse_symbol = '\u267b\ufe0f' # ♻️
art_symbol = '\U0001f3a8' # 🎨
paste_symbol = '\u2199\ufe0f' # ↙
-folder_symbol = '\uD83D\uDCC2'
+folder_symbol = '\U0001f4c2' # 📂
def plaintext_to_html(text):
text = "" + "
\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "
"
From 34c638142eaa57f89b86545ba3c72085036398bb Mon Sep 17 00:00:00 2001
From: hentailord85ez <112723046+hentailord85ez@users.noreply.github.com>
Date: Fri, 30 Sep 2022 22:38:14 +0100
Subject: [PATCH 053/460] Fixed when eta = 0
Unexpected behavior when using eta = 0 in something like XY, but your default eta was set to something not 0.
---
modules/sd_samplers.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 9316875ab..dbf570d2c 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -127,7 +127,7 @@ class VanillaStableDiffusionSampler:
return res
def initialize(self, p):
- self.eta = p.eta or opts.eta_ddim
+ self.eta = p.eta if p.eta is not None else opts.eta_ddim
for fieldname in ['p_sample_ddim', 'p_sample_plms']:
if hasattr(self.sampler, fieldname):
From 36ea4ac0f5844e5c8dec124edbdb714ccdd6013c Mon Sep 17 00:00:00 2001
From: RnDMonkey
Date: Sun, 2 Oct 2022 22:21:16 -0700
Subject: [PATCH 054/460] moved no-style return outside join function
---
modules/images.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/images.py b/modules/images.py
index bba55158e..1a046aca6 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -315,7 +315,7 @@ def apply_filename_pattern(x, p, seed, prompt):
#currently disabled if using the save button, will work otherwise
# if enabled it will cause a bug because styles is not included in the save_files data dictionary
if hasattr(p, "styles"):
- x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"] or "None"), replace_spaces=False))
+ x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"]) or "None", replace_spaces=False))
x = x.replace("[sampler]", sanitize_filename_part(sd_samplers.samplers[p.sampler_index].name, replace_spaces=False))
From 6491b09c24ea77f1f69990ea80a216f9ce319589 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Mon, 3 Oct 2022 08:53:52 +0300
Subject: [PATCH 055/460] use existing function for gfpgan
---
modules/gfpgan_model.py | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py
index bb30d7330..dd3fbcab1 100644
--- a/modules/gfpgan_model.py
+++ b/modules/gfpgan_model.py
@@ -97,11 +97,7 @@ def setup_model(dirname):
return "GFPGAN"
def restore(self, np_image):
- np_image_bgr = np_image[:, :, ::-1]
- cropped_faces, restored_faces, gfpgan_output_bgr = gfpgann().enhance(np_image_bgr, has_aligned=False, only_center_face=False, paste_back=True)
- np_image = gfpgan_output_bgr[:, :, ::-1]
-
- return np_image
+ return gfpgan_fix_faces(np_image)
shared.face_restorers.append(FaceRestorerGFPGAN())
except Exception:
From 43a74fa595003321200a40bd2431e56c245e75ed Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Mon, 3 Oct 2022 11:48:19 +0300
Subject: [PATCH 056/460] batch processing for img2img with an empty output
directory, by request
---
modules/img2img.py | 7 +++++--
modules/ui.py | 2 +-
2 files changed, 6 insertions(+), 3 deletions(-)
diff --git a/modules/img2img.py b/modules/img2img.py
index f4455c90f..2ff8e2617 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -23,8 +23,10 @@ def process_batch(p, input_dir, output_dir, args):
print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.")
+ save_normally = output_dir == ''
+
p.do_not_save_grid = True
- p.do_not_save_samples = True
+ p.do_not_save_samples = not save_normally
state.job_count = len(images) * p.n_iter
@@ -48,7 +50,8 @@ def process_batch(p, input_dir, output_dir, args):
left, right = os.path.splitext(filename)
filename = f"{left}-{n}{right}"
- processed_image.save(os.path.join(output_dir, filename))
+ if not save_normally:
+ processed_image.save(os.path.join(output_dir, filename))
def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, init_img, init_img_with_mask, init_img_inpaint, init_mask_inpaint, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, *args):
diff --git a/modules/ui.py b/modules/ui.py
index 164321512..55f7aa953 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -658,7 +658,7 @@ def create_ui(wrap_gradio_gpu_call):
with gr.TabItem('Batch img2img', id='batch'):
hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
- gr.HTML(f"Process images in a directory on the same machine where the server is running.{hidden}
")
+ gr.HTML(f"Process images in a directory on the same machine where the server is running.
Use an empty output directory to save pictures normally instead of writing to the output directory.{hidden}
")
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs)
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs)
From 2865ef4b9ab16d56326cc805541bebcf01d099bc Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Mon, 3 Oct 2022 13:10:03 +0300
Subject: [PATCH 057/460] fix broken date in TI
---
modules/textual_inversion/textual_inversion.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index 8686f5347..cd9f34984 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -164,7 +164,7 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps,
filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt')
- log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%d-%m"), embedding_name)
+ log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), embedding_name)
if save_embedding_every > 0:
embedding_dir = os.path.join(log_directory, "embeddings")
From 2a7f48cdb8dcf9acb02610cccae0d1ee5d260bc2 Mon Sep 17 00:00:00 2001
From: fuzzytent
Date: Fri, 30 Sep 2022 16:02:16 +0200
Subject: [PATCH 058/460] Improve styling of gallery items, particularly in
dark mode
---
style.css | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/style.css b/style.css
index 9709c4eec..e11316b96 100644
--- a/style.css
+++ b/style.css
@@ -403,3 +403,7 @@ input[type="range"]{
.red {
color: red;
}
+
+.gallery-item {
+ --tw-bg-opacity: 0 !important;
+}
From 5ef0baf5eaec7f21a1666af424405cbee19f3764 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 4 Oct 2022 08:52:11 +0300
Subject: [PATCH 059/460] add support for gelbooru tags in filenames for
textual inversion
---
modules/textual_inversion/dataset.py | 7 +++++--
modules/textual_inversion/preprocess.py | 4 +++-
2 files changed, 8 insertions(+), 3 deletions(-)
diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py
index e8394ff65..7c44ea5be 100644
--- a/modules/textual_inversion/dataset.py
+++ b/modules/textual_inversion/dataset.py
@@ -9,6 +9,9 @@ from torchvision import transforms
import random
import tqdm
from modules import devices
+import re
+
+re_tag = re.compile(r"[a-zA-Z][_\w\d()]+")
class PersonalizedBase(Dataset):
@@ -38,8 +41,8 @@ class PersonalizedBase(Dataset):
image = image.resize((self.width, self.height), PIL.Image.BICUBIC)
filename = os.path.basename(path)
- filename_tokens = os.path.splitext(filename)[0].replace('_', '-').replace(' ', '-').split('-')
- filename_tokens = [token for token in filename_tokens if token.isalpha()]
+ filename_tokens = os.path.splitext(filename)[0]
+ filename_tokens = re_tag.findall(filename_tokens)
npimage = np.array(image).astype(np.uint8)
npimage = (npimage / 127.5 - 1.0).astype(np.float32)
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py
index 209e928ff..f545a9937 100644
--- a/modules/textual_inversion/preprocess.py
+++ b/modules/textual_inversion/preprocess.py
@@ -26,7 +26,9 @@ def preprocess(process_src, process_dst, process_flip, process_split, process_ca
if process_caption:
caption = "-" + shared.interrogator.generate_caption(image)
else:
- caption = ""
+ caption = filename
+ caption = os.path.splitext(caption)[0]
+ caption = os.path.basename(caption)
image.save(os.path.join(dst, f"{index:05}-{subindex[0]}{caption}.png"))
subindex[0] += 1
From 1c5604791da7e57f40880698666b6617a1754c65 Mon Sep 17 00:00:00 2001
From: DoTheSneedful
Date: Mon, 3 Oct 2022 22:20:09 -0400
Subject: [PATCH 060/460] Add a prompt order option to XY plot script
---
scripts/xy_grid.py | 40 ++++++++++++++++++++++++++++++++++++++--
1 file changed, 38 insertions(+), 2 deletions(-)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 146663b0a..044c30e61 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -1,5 +1,6 @@
from collections import namedtuple
from copy import copy
+from itertools import permutations
import random
from PIL import Image
@@ -28,6 +29,27 @@ def apply_prompt(p, x, xs):
p.prompt = p.prompt.replace(xs[0], x)
p.negative_prompt = p.negative_prompt.replace(xs[0], x)
+def apply_order(p, x, xs):
+ token_order = []
+
+ # Initally grab the tokens from the prompt so they can be later be replaced in order of earliest seen in the prompt
+ for token in x:
+ token_order.append((p.prompt.find(token), token))
+
+ token_order.sort(key=lambda t: t[0])
+
+ search_from_pos = 0
+ for idx, token in enumerate(x):
+ original_pos, old_token = token_order[idx]
+
+ # Get position of the token again as it will likely change as tokens are being replaced
+ pos = p.prompt.find(old_token)
+ if original_pos >= 0:
+ # Avoid trying to replace what was just replaced by searching later in the prompt string
+ p.prompt = p.prompt[0:search_from_pos] + p.prompt[search_from_pos:].replace(old_token, token, 1)
+
+ search_from_pos = pos + len(token)
+
samplers_dict = {}
for i, sampler in enumerate(modules.sd_samplers.samplers):
@@ -60,7 +82,8 @@ def format_value_add_label(p, opt, x):
def format_value(p, opt, x):
if type(x) == float:
x = round(x, 8)
-
+ if type(x) == type(list()):
+ x = str(x)
return x
def do_nothing(p, x, xs):
@@ -89,6 +112,7 @@ axis_options = [
AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label),
AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label),
AxisOption("Eta", float, apply_field("eta"), format_value_add_label),
+ AxisOption("Prompt order", type(list()), apply_order, format_value),
AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label), # as it is now all AxisOptionImg2Img items must go after AxisOption ones
]
@@ -159,7 +183,11 @@ class Script(scripts.Script):
if opt.label == 'Nothing':
return [0]
- valslist = [x.strip() for x in vals.split(",")]
+ if opt.type == type(list()):
+ valslist = [x for x in vals]
+ else:
+ valslist = [x.strip() for x in vals.split(",")]
+
if opt.type == int:
valslist_ext = []
@@ -212,9 +240,17 @@ class Script(scripts.Script):
return valslist
x_opt = axis_options[x_type]
+
+ if x_opt.label == "Prompt order":
+ x_values = list(permutations([x.strip() for x in x_values.split(",")]))
+
xs = process_axis(x_opt, x_values)
y_opt = axis_options[y_type]
+
+ if y_opt.label == "Prompt order":
+ y_values = list(permutations([y.strip() for y in y_values.split(",")]))
+
ys = process_axis(y_opt, y_values)
def fix_axis_seeds(axis_opt, axis_list):
From 1a6d40db35656083d5bf9d3a3430b45fda4e85eb Mon Sep 17 00:00:00 2001
From: DoTheSneedful
Date: Tue, 4 Oct 2022 00:18:15 -0400
Subject: [PATCH 061/460] Fix token ordering in prompt order XY plot
---
scripts/xy_grid.py | 13 +++++--------
1 file changed, 5 insertions(+), 8 deletions(-)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 044c30e61..5bcd39217 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -32,24 +32,21 @@ def apply_prompt(p, x, xs):
def apply_order(p, x, xs):
token_order = []
- # Initally grab the tokens from the prompt so they can be later be replaced in order of earliest seen in the prompt
+ # Initally grab the tokens from the prompt so they can be be replaced in order of earliest seen
for token in x:
token_order.append((p.prompt.find(token), token))
token_order.sort(key=lambda t: t[0])
search_from_pos = 0
- for idx, token in enumerate(x):
- original_pos, old_token = token_order[idx]
-
+ for idx, (original_pos, old_token) in enumerate(token_order):
# Get position of the token again as it will likely change as tokens are being replaced
- pos = p.prompt.find(old_token)
+ pos = search_from_pos + p.prompt[search_from_pos:].find(old_token)
if original_pos >= 0:
# Avoid trying to replace what was just replaced by searching later in the prompt string
- p.prompt = p.prompt[0:search_from_pos] + p.prompt[search_from_pos:].replace(old_token, token, 1)
-
- search_from_pos = pos + len(token)
+ p.prompt = p.prompt[0:search_from_pos] + p.prompt[search_from_pos:].replace(old_token, x[idx], 1)
+ search_from_pos = pos + len(x[idx])
samplers_dict = {}
for i, sampler in enumerate(modules.sd_samplers.samplers):
From 56371153b545e3a43c3a5f206264019af361f3af Mon Sep 17 00:00:00 2001
From: DoTheSneedful
Date: Tue, 4 Oct 2022 01:07:36 -0400
Subject: [PATCH 062/460] XY plot prompt order simplify logic
---
scripts/xy_grid.py | 22 ++++++++++++++--------
1 file changed, 14 insertions(+), 8 deletions(-)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 5bcd39217..7def47f57 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -38,15 +38,21 @@ def apply_order(p, x, xs):
token_order.sort(key=lambda t: t[0])
- search_from_pos = 0
- for idx, (original_pos, old_token) in enumerate(token_order):
- # Get position of the token again as it will likely change as tokens are being replaced
- pos = search_from_pos + p.prompt[search_from_pos:].find(old_token)
- if original_pos >= 0:
- # Avoid trying to replace what was just replaced by searching later in the prompt string
- p.prompt = p.prompt[0:search_from_pos] + p.prompt[search_from_pos:].replace(old_token, x[idx], 1)
+ prompt_parts = []
- search_from_pos = pos + len(x[idx])
+ # Split the prompt up, taking out the tokens
+ for _, token in token_order:
+ n = p.prompt.find(token)
+ prompt_parts.append(p.prompt[0:n])
+ p.prompt = p.prompt[n + len(token):]
+
+ # Rebuild the prompt with the tokens in the order we want
+ prompt_tmp = ""
+ for idx, part in enumerate(prompt_parts):
+ prompt_tmp += part
+ prompt_tmp += x[idx]
+ p.prompt = prompt_tmp + p.prompt
+
samplers_dict = {}
for i, sampler in enumerate(modules.sd_samplers.samplers):
From 556c36b9607e3f4eacdddc85f8e7a78b29476ea7 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 4 Oct 2022 09:18:00 +0300
Subject: [PATCH 063/460] add hint, refactor code for #1607
---
javascript/hints.js | 1 +
scripts/xy_grid.py | 35 ++++++++++++++++++-----------------
2 files changed, 19 insertions(+), 17 deletions(-)
diff --git a/javascript/hints.js b/javascript/hints.js
index e72e93381..8adcd983e 100644
--- a/javascript/hints.js
+++ b/javascript/hints.js
@@ -47,6 +47,7 @@ titles = {
"Custom code": "Run Python code. Advanced user only. Must run program with --allow-code for this to work",
"Prompt S/R": "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others",
+ "Prompt order": "Separate a list of words with commas, and the script will make a variation of prompt with those words for their every possible order",
"Tiling": "Produce an image that can be tiled.",
"Tile overlap": "For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.",
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 7def47f57..1237e754d 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -29,10 +29,11 @@ def apply_prompt(p, x, xs):
p.prompt = p.prompt.replace(xs[0], x)
p.negative_prompt = p.negative_prompt.replace(xs[0], x)
+
def apply_order(p, x, xs):
token_order = []
- # Initally grab the tokens from the prompt so they can be be replaced in order of earliest seen
+ # Initally grab the tokens from the prompt, so they can be replaced in order of earliest seen
for token in x:
token_order.append((p.prompt.find(token), token))
@@ -85,17 +86,26 @@ def format_value_add_label(p, opt, x):
def format_value(p, opt, x):
if type(x) == float:
x = round(x, 8)
- if type(x) == type(list()):
- x = str(x)
return x
+
+def format_value_join_list(p, opt, x):
+ return ", ".join(x)
+
+
def do_nothing(p, x, xs):
pass
+
def format_nothing(p, opt, x):
return ""
+def str_permutations(x):
+ """dummy function for specifying it in AxisOption's type when you want to get a list of permutations"""
+ return x
+
+
AxisOption = namedtuple("AxisOption", ["label", "type", "apply", "format_value"])
AxisOptionImg2Img = namedtuple("AxisOptionImg2Img", ["label", "type", "apply", "format_value"])
@@ -108,6 +118,7 @@ axis_options = [
AxisOption("Steps", int, apply_field("steps"), format_value_add_label),
AxisOption("CFG Scale", float, apply_field("cfg_scale"), format_value_add_label),
AxisOption("Prompt S/R", str, apply_prompt, format_value),
+ AxisOption("Prompt order", str_permutations, apply_order, format_value_join_list),
AxisOption("Sampler", str, apply_sampler, format_value),
AxisOption("Checkpoint name", str, apply_checkpoint, format_value),
AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label),
@@ -115,7 +126,6 @@ axis_options = [
AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label),
AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label),
AxisOption("Eta", float, apply_field("eta"), format_value_add_label),
- AxisOption("Prompt order", type(list()), apply_order, format_value),
AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label), # as it is now all AxisOptionImg2Img items must go after AxisOption ones
]
@@ -158,6 +168,7 @@ re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d
re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*")
re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*")
+
class Script(scripts.Script):
def title(self):
return "X/Y plot"
@@ -186,11 +197,7 @@ class Script(scripts.Script):
if opt.label == 'Nothing':
return [0]
- if opt.type == type(list()):
- valslist = [x for x in vals]
- else:
- valslist = [x.strip() for x in vals.split(",")]
-
+ valslist = [x.strip() for x in vals.split(",")]
if opt.type == int:
valslist_ext = []
@@ -237,23 +244,17 @@ class Script(scripts.Script):
valslist_ext.append(val)
valslist = valslist_ext
+ elif opt.type == str_permutations:
+ valslist = list(permutations(valslist))
valslist = [opt.type(x) for x in valslist]
return valslist
x_opt = axis_options[x_type]
-
- if x_opt.label == "Prompt order":
- x_values = list(permutations([x.strip() for x in x_values.split(",")]))
-
xs = process_axis(x_opt, x_values)
y_opt = axis_options[y_type]
-
- if y_opt.label == "Prompt order":
- y_values = list(permutations([y.strip() for y in y_values.split(",")]))
-
ys = process_axis(y_opt, y_values)
def fix_axis_seeds(axis_opt, axis_list):
From eeab7aedf532680a6ae9058ee272450bb07e41eb Mon Sep 17 00:00:00 2001
From: brkirch
Date: Tue, 4 Oct 2022 04:24:35 -0400
Subject: [PATCH 064/460] Add --use-cpu command line option
Remove MPS detection to use CPU for GFPGAN / CodeFormer and add a --use-cpu command line option.
---
modules/devices.py | 5 ++---
modules/esrgan_model.py | 9 ++++-----
modules/scunet_model.py | 8 ++++----
modules/shared.py | 9 +++++++--
4 files changed, 17 insertions(+), 14 deletions(-)
diff --git a/modules/devices.py b/modules/devices.py
index 5d9c7a076..b5a0cd29e 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -1,8 +1,8 @@
import torch
-# has_mps is only available in nightly pytorch (for now), `getattr` for compatibility
from modules import errors
+# has_mps is only available in nightly pytorch (for now), `getattr` for compatibility
has_mps = getattr(torch, 'has_mps', False)
cpu = torch.device("cpu")
@@ -32,8 +32,7 @@ def enable_tf32():
errors.run(enable_tf32, "Enabling TF32")
-device = get_optimal_device()
-device_gfpgan = device_codeformer = cpu if device.type == 'mps' else device
+device = device_gfpgan = device_esrgan = device_scunet = device_codeformer = get_optimal_device()
dtype = torch.float16
def randn(seed, shape):
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
index 4aed9283c..d17e730f9 100644
--- a/modules/esrgan_model.py
+++ b/modules/esrgan_model.py
@@ -6,8 +6,7 @@ from PIL import Image
from basicsr.utils.download_util import load_file_from_url
import modules.esrgam_model_arch as arch
-from modules import shared, modelloader, images
-from modules.devices import has_mps
+from modules import shared, modelloader, images, devices
from modules.paths import models_path
from modules.upscaler import Upscaler, UpscalerData
from modules.shared import opts
@@ -97,7 +96,7 @@ class UpscalerESRGAN(Upscaler):
model = self.load_model(selected_model)
if model is None:
return img
- model.to(shared.device)
+ model.to(devices.device_esrgan)
img = esrgan_upscale(model, img)
return img
@@ -112,7 +111,7 @@ class UpscalerESRGAN(Upscaler):
print("Unable to load %s from %s" % (self.model_path, filename))
return None
- pretrained_net = torch.load(filename, map_location='cpu' if has_mps else None)
+ pretrained_net = torch.load(filename, map_location='cpu' if shared.device.type == 'mps' else None)
crt_model = arch.RRDBNet(3, 3, 64, 23, gc=32)
pretrained_net = fix_model_layers(crt_model, pretrained_net)
@@ -127,7 +126,7 @@ def upscale_without_tiling(model, img):
img = img[:, :, ::-1]
img = np.moveaxis(img, 2, 0) / 255
img = torch.from_numpy(img).float()
- img = img.unsqueeze(0).to(shared.device)
+ img = img.unsqueeze(0).to(devices.device_esrgan)
with torch.no_grad():
output = model(img)
output = output.squeeze().float().cpu().clamp_(0, 1).numpy()
diff --git a/modules/scunet_model.py b/modules/scunet_model.py
index 7987ac145..fb64b7409 100644
--- a/modules/scunet_model.py
+++ b/modules/scunet_model.py
@@ -8,7 +8,7 @@ import torch
from basicsr.utils.download_util import load_file_from_url
import modules.upscaler
-from modules import shared, modelloader
+from modules import devices, modelloader
from modules.paths import models_path
from modules.scunet_model_arch import SCUNet as net
@@ -51,12 +51,12 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
if model is None:
return img
- device = shared.device
+ device = devices.device_scunet
img = np.array(img)
img = img[:, :, ::-1]
img = np.moveaxis(img, 2, 0) / 255
img = torch.from_numpy(img).float()
- img = img.unsqueeze(0).to(shared.device)
+ img = img.unsqueeze(0).to(device)
img = img.to(device)
with torch.no_grad():
@@ -69,7 +69,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler):
return PIL.Image.fromarray(output, 'RGB')
def load_model(self, path: str):
- device = shared.device
+ device = devices.device_scunet
if "http" in path:
filename = load_file_from_url(url=self.model_url, model_dir=self.model_path, file_name="%s.pth" % self.name,
progress=True)
diff --git a/modules/shared.py b/modules/shared.py
index 2a599e9cf..7899ab8d1 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -12,7 +12,7 @@ import modules.interrogate
import modules.memmon
import modules.sd_models
import modules.styles
-from modules.devices import get_optimal_device
+import modules.devices as devices
from modules.paths import script_path, sd_path
sd_model_file = os.path.join(script_path, 'model.ckpt')
@@ -46,6 +46,7 @@ parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
+parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU for specified modules", default=[])
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
@@ -63,7 +64,11 @@ parser.add_argument("--enable-console-prompts", action='store_true', help="print
cmd_opts = parser.parse_args()
-device = get_optimal_device()
+
+devices.device, devices.device_gfpgan, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
+(devices.cpu if x in cmd_opts.use_cpu else devices.get_optimal_device() for x in ['SD', 'GFPGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'])
+
+device = devices.device
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
From 27ddc24fdee1fbe709054a43235ab7f9c51b3e9f Mon Sep 17 00:00:00 2001
From: brkirch
Date: Tue, 4 Oct 2022 05:18:17 -0400
Subject: [PATCH 065/460] Add BSRGAN to --add-cpu
---
modules/bsrgan_model.py | 6 +++---
modules/devices.py | 2 +-
modules/shared.py | 6 +++---
3 files changed, 7 insertions(+), 7 deletions(-)
diff --git a/modules/bsrgan_model.py b/modules/bsrgan_model.py
index e62c66577..3bd80791a 100644
--- a/modules/bsrgan_model.py
+++ b/modules/bsrgan_model.py
@@ -8,7 +8,7 @@ import torch
from basicsr.utils.download_util import load_file_from_url
import modules.upscaler
-from modules import shared, modelloader
+from modules import devices, modelloader
from modules.bsrgan_model_arch import RRDBNet
from modules.paths import models_path
@@ -44,13 +44,13 @@ class UpscalerBSRGAN(modules.upscaler.Upscaler):
model = self.load_model(selected_file)
if model is None:
return img
- model.to(shared.device)
+ model.to(devices.device_bsrgan)
torch.cuda.empty_cache()
img = np.array(img)
img = img[:, :, ::-1]
img = np.moveaxis(img, 2, 0) / 255
img = torch.from_numpy(img).float()
- img = img.unsqueeze(0).to(shared.device)
+ img = img.unsqueeze(0).to(devices.device_bsrgan)
with torch.no_grad():
output = model(img)
output = output.squeeze().float().cpu().clamp_(0, 1).numpy()
diff --git a/modules/devices.py b/modules/devices.py
index b5a0cd29e..b78996322 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -32,7 +32,7 @@ def enable_tf32():
errors.run(enable_tf32, "Enabling TF32")
-device = device_gfpgan = device_esrgan = device_scunet = device_codeformer = get_optimal_device()
+device = device_gfpgan = device_bsrgan = device_esrgan = device_scunet = device_codeformer = get_optimal_device()
dtype = torch.float16
def randn(seed, shape):
diff --git a/modules/shared.py b/modules/shared.py
index 7899ab8d1..95b98a06e 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -46,7 +46,7 @@ parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
-parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU for specified modules", default=[])
+parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU for specified modules", default=[])
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
@@ -65,8 +65,8 @@ parser.add_argument("--enable-console-prompts", action='store_true', help="print
cmd_opts = parser.parse_args()
-devices.device, devices.device_gfpgan, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
-(devices.cpu if x in cmd_opts.use_cpu else devices.get_optimal_device() for x in ['SD', 'GFPGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'])
+devices.device, devices.device_gfpgan, devices.device_bsrgan, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \
+(devices.cpu if x in cmd_opts.use_cpu else devices.get_optimal_device() for x in ['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'])
device = devices.device
From dc9c5a97742e3a34d37da7108642d8adc0dc5858 Mon Sep 17 00:00:00 2001
From: brkirch
Date: Tue, 4 Oct 2022 05:22:50 -0400
Subject: [PATCH 066/460] Modify --add-cpu description
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index 95b98a06e..25aff5b0e 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -46,7 +46,7 @@ parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
-parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU for specified modules", default=[])
+parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU as torch device for specified modules", default=[])
parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests")
parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None)
parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False)
From 6c6ae28bf5fd1e8bc3e8f64a3430b6f29f338f77 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 4 Oct 2022 12:32:22 +0300
Subject: [PATCH 067/460] send all three of GFPGAN's and codeformer's models to
CPU memory instead of just one for #1283
---
modules/codeformer_model.py | 12 ++++++++++--
modules/devices.py | 10 ++++++++++
modules/gfpgan_model.py | 14 ++++++++++++--
modules/processing.py | 16 +++++++++-------
4 files changed, 41 insertions(+), 11 deletions(-)
diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py
index a29f38550..e6d9fa4f4 100644
--- a/modules/codeformer_model.py
+++ b/modules/codeformer_model.py
@@ -69,10 +69,14 @@ def setup_model(dirname):
self.net = net
self.face_helper = face_helper
- self.net.to(devices.device_codeformer)
return net, face_helper
+ def send_model_to(self, device):
+ self.net.to(device)
+ self.face_helper.face_det.to(device)
+ self.face_helper.face_parse.to(device)
+
def restore(self, np_image, w=None):
np_image = np_image[:, :, ::-1]
@@ -82,6 +86,8 @@ def setup_model(dirname):
if self.net is None or self.face_helper is None:
return np_image
+ self.send_model_to(devices.device_codeformer)
+
self.face_helper.clean_all()
self.face_helper.read_image(np_image)
self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5)
@@ -113,8 +119,10 @@ def setup_model(dirname):
if original_resolution != restored_img.shape[0:2]:
restored_img = cv2.resize(restored_img, (0, 0), fx=original_resolution[1]/restored_img.shape[1], fy=original_resolution[0]/restored_img.shape[0], interpolation=cv2.INTER_LINEAR)
+ self.face_helper.clean_all()
+
if shared.opts.face_restoration_unload:
- self.net.to(devices.cpu)
+ self.send_model_to(devices.cpu)
return restored_img
diff --git a/modules/devices.py b/modules/devices.py
index ff82f2f64..12aab6652 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -1,3 +1,5 @@
+import contextlib
+
import torch
# has_mps is only available in nightly pytorch (for now), `getattr` for compatibility
@@ -57,3 +59,11 @@ def randn_without_seed(shape):
return torch.randn(shape, device=device)
+
+def autocast():
+ from modules import shared
+
+ if dtype == torch.float32 or shared.cmd_opts.precision == "full":
+ return contextlib.nullcontext()
+
+ return torch.autocast("cuda")
diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py
index dd3fbcab1..5586b554b 100644
--- a/modules/gfpgan_model.py
+++ b/modules/gfpgan_model.py
@@ -37,22 +37,32 @@ def gfpgann():
print("Unable to load gfpgan model!")
return None
model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None)
- model.gfpgan.to(shared.device)
loaded_gfpgan_model = model
return model
+def send_model_to(model, device):
+ model.gfpgan.to(device)
+ model.face_helper.face_det.to(device)
+ model.face_helper.face_parse.to(device)
+
+
def gfpgan_fix_faces(np_image):
model = gfpgann()
if model is None:
return np_image
+
+ send_model_to(model, devices.device)
+
np_image_bgr = np_image[:, :, ::-1]
cropped_faces, restored_faces, gfpgan_output_bgr = model.enhance(np_image_bgr, has_aligned=False, only_center_face=False, paste_back=True)
np_image = gfpgan_output_bgr[:, :, ::-1]
+ model.face_helper.clean_all()
+
if shared.opts.face_restoration_unload:
- model.gfpgan.to(devices.cpu)
+ send_model_to(model, devices.cpu)
return np_image
diff --git a/modules/processing.py b/modules/processing.py
index 0a4b6198f..9cbecdd83 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -1,4 +1,3 @@
-import contextlib
import json
import math
import os
@@ -330,9 +329,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
infotexts = []
output_images = []
- precision_scope = torch.autocast if cmd_opts.precision == "autocast" else contextlib.nullcontext
- ema_scope = (contextlib.nullcontext if cmd_opts.lowvram else p.sd_model.ema_scope)
- with torch.no_grad(), precision_scope("cuda"), ema_scope():
+
+ with torch.no_grad():
p.init(all_prompts, all_seeds, all_subseeds)
if state.job_count == -1:
@@ -351,8 +349,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
#uc = p.sd_model.get_learned_conditioning(len(prompts) * [p.negative_prompt])
#c = p.sd_model.get_learned_conditioning(prompts)
- uc = prompt_parser.get_learned_conditioning(len(prompts) * [p.negative_prompt], p.steps)
- c = prompt_parser.get_learned_conditioning(prompts, p.steps)
+ with devices.autocast():
+ uc = prompt_parser.get_learned_conditioning(len(prompts) * [p.negative_prompt], p.steps)
+ c = prompt_parser.get_learned_conditioning(prompts, p.steps)
if len(model_hijack.comments) > 0:
for comment in model_hijack.comments:
@@ -361,7 +360,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
if p.n_iter > 1:
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
- samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength)
+ with devices.autocast():
+ samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength).to(devices.dtype)
+
if state.interrupted:
# if we are interruped, sample returns just noise
@@ -386,6 +387,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
devices.torch_gc()
x_sample = modules.face_restoration.restore_faces(x_sample)
+ devices.torch_gc()
image = Image.fromarray(x_sample)
From 2f1b61d97987ae0a52a7dfc6bc99c68928bdb594 Mon Sep 17 00:00:00 2001
From: dan
Date: Mon, 3 Oct 2022 19:25:36 +0800
Subject: [PATCH 068/460] Allow nested structures inside schedules
---
modules/prompt_parser.py | 119 +++++++++++++++++---------------------
requirements.txt | 1 +
requirements_versions.txt | 1 +
3 files changed, 55 insertions(+), 66 deletions(-)
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index e811eb9ec..99c8ed99c 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -1,20 +1,11 @@
import re
from collections import namedtuple
import torch
+from lark import Lark, Transformer, Visitor
+import functools
import modules.shared as shared
-re_prompt = re.compile(r'''
-(.*?)
-\[
- ([^]:]+):
- (?:([^]:]*):)?
- ([0-9]*\.?[0-9]+)
-]
-|
-(.+)
-''', re.X)
-
# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]"
# will be represented with prompt_schedule like this (assuming steps=100):
# [25, 'fantasy landscape with a mountain and an oak in foreground shoddy']
@@ -25,61 +16,57 @@ re_prompt = re.compile(r'''
def get_learned_conditioning_prompt_schedules(prompts, steps):
- res = []
- cache = {}
-
- for prompt in prompts:
- prompt_schedule: list[list[str | int]] = [[steps, ""]]
-
- cached = cache.get(prompt, None)
- if cached is not None:
- res.append(cached)
- continue
-
- for m in re_prompt.finditer(prompt):
- plaintext = m.group(1) if m.group(5) is None else m.group(5)
- concept_from = m.group(2)
- concept_to = m.group(3)
- if concept_to is None:
- concept_to = concept_from
- concept_from = ""
- swap_position = float(m.group(4)) if m.group(4) is not None else None
-
- if swap_position is not None:
- if swap_position < 1:
- swap_position = swap_position * steps
- swap_position = int(min(swap_position, steps))
-
- swap_index = None
- found_exact_index = False
- for i in range(len(prompt_schedule)):
- end_step = prompt_schedule[i][0]
- prompt_schedule[i][1] += plaintext
-
- if swap_position is not None and swap_index is None:
- if swap_position == end_step:
- swap_index = i
- found_exact_index = True
-
- if swap_position < end_step:
- swap_index = i
-
- if swap_index is not None:
- if not found_exact_index:
- prompt_schedule.insert(swap_index, [swap_position, prompt_schedule[swap_index][1]])
-
- for i in range(len(prompt_schedule)):
- end_step = prompt_schedule[i][0]
- must_replace = swap_position < end_step
-
- prompt_schedule[i][1] += concept_to if must_replace else concept_from
-
- res.append(prompt_schedule)
- cache[prompt] = prompt_schedule
- #for t in prompt_schedule:
- # print(t)
-
- return res
+ grammar = r"""
+ start: prompt
+ prompt: (emphasized | scheduled | weighted | plain)*
+ !emphasized: "(" prompt ")"
+ | "(" prompt ":" prompt ")"
+ | "[" prompt "]"
+ scheduled: "[" (prompt ":")? prompt ":" NUMBER "]"
+ !weighted: "{" weighted_item ("|" weighted_item)* "}"
+ !weighted_item: prompt (":" prompt)?
+ plain: /([^\\\[\](){}:|]|\\.)+/
+ %import common.SIGNED_NUMBER -> NUMBER
+ """
+ parser = Lark(grammar, parser='lalr')
+ def collect_steps(steps, tree):
+ l = [steps]
+ class CollectSteps(Visitor):
+ def scheduled(self, tree):
+ tree.children[-1] = float(tree.children[-1])
+ if tree.children[-1] < 1:
+ tree.children[-1] *= steps
+ tree.children[-1] = min(steps, int(tree.children[-1]))
+ l.append(tree.children[-1])
+ CollectSteps().visit(tree)
+ return sorted(set(l))
+ def at_step(step, tree):
+ class AtStep(Transformer):
+ def scheduled(self, args):
+ if len(args) == 2:
+ before, after, when = (), *args
+ else:
+ before, after, when = args
+ yield before if step <= when else after
+ def start(self, args):
+ def flatten(x):
+ if type(x) == str:
+ yield x
+ else:
+ for gen in x:
+ yield from flatten(gen)
+ return ''.join(flatten(args[0]))
+ def plain(self, args):
+ yield args[0].value
+ def __default__(self, data, children, meta):
+ for child in children:
+ yield from child
+ return AtStep().transform(tree)
+ @functools.cache
+ def get_schedule(prompt):
+ tree = parser.parse(prompt)
+ return [[t, at_step(t, tree)] for t in collect_steps(steps, tree)]
+ return [get_schedule(prompt) for prompt in prompts]
ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at_step", "cond"])
diff --git a/requirements.txt b/requirements.txt
index d4b337fce..631fe616a 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -22,3 +22,4 @@ clean-fid
resize-right
torchdiffeq
kornia
+lark
diff --git a/requirements_versions.txt b/requirements_versions.txt
index 8a9acf205..fdff26878 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -21,3 +21,4 @@ clean-fid==0.1.29
resize-right==0.0.2
torchdiffeq==0.2.3
kornia==0.6.7
+lark==1.1.2
From 61652461242951966e5b4cee83ce359cefa91c17 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 4 Oct 2022 14:23:22 +0300
Subject: [PATCH 069/460] support interrupting after the previous change
---
modules/processing.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/modules/processing.py b/modules/processing.py
index 9cbecdd83..6f5599c7d 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -361,7 +361,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
shared.state.job = f"Batch {n+1} out of {p.n_iter}"
with devices.autocast():
- samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength).to(devices.dtype)
+ samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength)
if state.interrupted:
@@ -369,6 +369,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
# use the image collected previously in sampler loop
samples_ddim = shared.state.current_latent
+ samples_ddim = samples_ddim.to(devices.dtype)
+
x_samples_ddim = p.sd_model.decode_first_stage(samples_ddim)
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
From d5bba20a58f43a9f984bb67b4e17f48661f6b818 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 4 Oct 2022 14:35:12 +0300
Subject: [PATCH 070/460] ignore errors in parse for purposes of token counting
for #1564
---
modules/ui.py | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index 55f7aa953..20dc8c379 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -386,14 +386,22 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info:
outputs=[seed, dummy_component]
)
+
def update_token_counter(text, steps):
- prompt_schedules = get_learned_conditioning_prompt_schedules([text], steps)
+ try:
+ prompt_schedules = get_learned_conditioning_prompt_schedules([text], steps)
+ except Exception:
+ # a parsing error can happen here during typing, and we don't want to bother the user with
+ # messages related to it in console
+ prompt_schedules = [[[steps, text]]]
+
flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules)
- prompts = [prompt_text for step,prompt_text in flat_prompts]
+ prompts = [prompt_text for step, prompt_text in flat_prompts]
tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1])
style_class = ' class="red"' if (token_count > max_length) else ""
return f"{token_count}/{max_length}"
+
def create_toprow(is_img2img):
id_part = "img2img" if is_img2img else "txt2img"
From accd00d6b8258c12b5168918a4c546b02357924a Mon Sep 17 00:00:00 2001
From: Justin Riddiough
Date: Tue, 4 Oct 2022 01:14:28 -0500
Subject: [PATCH 071/460] Explain how to use second progress bar in pycharm
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index 25aff5b0e..11bdf01a7 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -200,7 +200,7 @@ options_templates.update(options_section(('face-restoration', "Face restoration"
options_templates.update(options_section(('system', "System"), {
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}),
"samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
- "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. Broken in PyCharm console."),
+ "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. In PyCharm select 'emulate terminal in console output'."),
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
From ea6b0d98a64290a0305e27126ea59ce1da7959a2 Mon Sep 17 00:00:00 2001
From: Justin Riddiough
Date: Tue, 4 Oct 2022 06:38:45 -0500
Subject: [PATCH 072/460] Remove pycharm note, fix typo
---
modules/shared.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/shared.py b/modules/shared.py
index 11bdf01a7..a7d13b2d4 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -200,7 +200,7 @@ options_templates.update(options_section(('face-restoration', "Face restoration"
options_templates.update(options_section(('system', "System"), {
"memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}),
"samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"),
- "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. In PyCharm select 'emulate terminal in console output'."),
+ "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."),
}))
options_templates.update(options_section(('sd', "Stable Diffusion"), {
@@ -209,7 +209,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
"enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."),
- "enable_emphasis": OptionInfo(True, "Eemphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"),
+ "enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"),
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
From eec1b39bd54711ca31e43022d2d6ac8c6d7281da Mon Sep 17 00:00:00 2001
From: Milly
Date: Tue, 4 Oct 2022 20:16:52 +0900
Subject: [PATCH 073/460] Apply prompt pattern last
---
modules/images.py | 43 ++++++++++++++++++++++---------------------
1 file changed, 22 insertions(+), 21 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index bba55158e..5b56c7e37 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -287,32 +287,13 @@ def apply_filename_pattern(x, p, seed, prompt):
if seed is not None:
x = x.replace("[seed]", str(seed))
- if prompt is not None:
- x = x.replace("[prompt]", sanitize_filename_part(prompt))
- if "[prompt_no_styles]" in x:
- prompt_no_style = prompt
- for style in shared.prompt_styles.get_style_prompts(p.styles):
- if len(style) > 0:
- style_parts = [y for y in style.split("{prompt}")]
- for part in style_parts:
- prompt_no_style = prompt_no_style.replace(part, "").replace(", ,", ",").strip().strip(',')
- prompt_no_style = prompt_no_style.replace(style, "").strip().strip(',').strip()
- x = x.replace("[prompt_no_styles]", sanitize_filename_part(prompt_no_style, replace_spaces=False))
-
- x = x.replace("[prompt_spaces]", sanitize_filename_part(prompt, replace_spaces=False))
- if "[prompt_words]" in x:
- words = [x for x in re_nonletters.split(prompt or "") if len(x) > 0]
- if len(words) == 0:
- words = ["empty"]
- x = x.replace("[prompt_words]", sanitize_filename_part(" ".join(words[0:max_prompt_words]), replace_spaces=False))
-
if p is not None:
x = x.replace("[steps]", str(p.steps))
x = x.replace("[cfg]", str(p.cfg_scale))
x = x.replace("[width]", str(p.width))
x = x.replace("[height]", str(p.height))
-
- #currently disabled if using the save button, will work otherwise
+
+ #currently disabled if using the save button, will work otherwise
# if enabled it will cause a bug because styles is not included in the save_files data dictionary
if hasattr(p, "styles"):
x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"] or "None"), replace_spaces=False))
@@ -324,6 +305,26 @@ def apply_filename_pattern(x, p, seed, prompt):
x = x.replace("[datetime]", datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
x = x.replace("[job_timestamp]", shared.state.job_timestamp)
+ # Apply [prompt] at last. Because it may contain any replacement word.^M
+ if prompt is not None:
+ x = x.replace("[prompt]", sanitize_filename_part(prompt))
+ if "[prompt_no_styles]" in x:
+ prompt_no_style = prompt
+ for style in shared.prompt_styles.get_style_prompts(p.styles):
+ if len(style) > 0:
+ style_parts = [y for y in style.split("{prompt}")]
+ for part in style_parts:
+ prompt_no_style = prompt_no_style.replace(part, "").replace(", ,", ",").strip().strip(',')
+ prompt_no_style = prompt_no_style.replace(style, "").strip().strip(',').strip()
+ x = x.replace("[prompt_no_styles]", sanitize_filename_part(prompt_no_style, replace_spaces=False))
+
+ x = x.replace("[prompt_spaces]", sanitize_filename_part(prompt, replace_spaces=False))
+ if "[prompt_words]" in x:
+ words = [x for x in re_nonletters.split(prompt or "") if len(x) > 0]
+ if len(words) == 0:
+ words = ["empty"]
+ x = x.replace("[prompt_words]", sanitize_filename_part(" ".join(words[0:max_prompt_words]), replace_spaces=False))
+
if cmd_opts.hide_ui_dir_config:
x = re.sub(r'^[\\/]+|\.{2,}[\\/]+|[\\/]+\.{2,}', '', x)
From 52cef36f6ba169a8e606ecdcaed73d47378f0e8e Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 4 Oct 2022 16:54:31 +0300
Subject: [PATCH 074/460] emergency fix for img2img
---
modules/processing.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/modules/processing.py b/modules/processing.py
index 6f5599c7d..e9c453942 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -331,7 +331,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
output_images = []
with torch.no_grad():
- p.init(all_prompts, all_seeds, all_subseeds)
+ with devices.autocast():
+ p.init(all_prompts, all_seeds, all_subseeds)
if state.job_count == -1:
state.job_count = p.n_iter
From 957e29a8e9cb8ca069799ec69263e188c89ed6a6 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 4 Oct 2022 17:23:48 +0300
Subject: [PATCH 075/460] option to not show images in web ui
---
modules/img2img.py | 3 +++
modules/shared.py | 1 +
modules/txt2img.py | 3 +++
3 files changed, 7 insertions(+)
diff --git a/modules/img2img.py b/modules/img2img.py
index 2ff8e2617..da212d72b 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -129,4 +129,7 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro
if opts.samples_log_stdout:
print(generation_info_js)
+ if opts.do_not_show_images:
+ processed.images = []
+
return processed.images, generation_info_js, plaintext_to_html(processed.info)
diff --git a/modules/shared.py b/modules/shared.py
index a7d13b2d4..ff4e5fa39 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -229,6 +229,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
"show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
"return_grid": OptionInfo(True, "Show grid in results for web"),
+ "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
diff --git a/modules/txt2img.py b/modules/txt2img.py
index d4406c3c0..e985242b3 100644
--- a/modules/txt2img.py
+++ b/modules/txt2img.py
@@ -48,5 +48,8 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2:
if opts.samples_log_stdout:
print(generation_info_js)
+ if opts.do_not_show_images:
+ processed.images = []
+
return processed.images, generation_info_js, plaintext_to_html(processed.info)
From e1b128d8e46bddb9c0b2fd3ee0eefd57e0527ee0 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 4 Oct 2022 17:36:39 +0300
Subject: [PATCH 076/460] do not touch p.seed/p.subseed during processing #1181
---
modules/processing.py | 26 +++++++++++++++++---------
1 file changed, 17 insertions(+), 9 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index e9c453942..8180c63d8 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -248,9 +248,16 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see
return x
+def get_fixed_seed(seed):
+ if seed is None or seed == '' or seed == -1:
+ return int(random.randrange(4294967294))
+
+ return seed
+
+
def fix_seed(p):
- p.seed = int(random.randrange(4294967294)) if p.seed is None or p.seed == '' or p.seed == -1 else p.seed
- p.subseed = int(random.randrange(4294967294)) if p.subseed is None or p.subseed == '' or p.subseed == -1 else p.subseed
+ p.seed = get_fixed_seed(p.seed)
+ p.subseed = get_fixed_seed(p.subseed)
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
@@ -292,7 +299,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
devices.torch_gc()
- fix_seed(p)
+ seed = get_fixed_seed(p.seed)
+ subseed = get_fixed_seed(p.subseed)
if p.outpath_samples is not None:
os.makedirs(p.outpath_samples, exist_ok=True)
@@ -311,15 +319,15 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
else:
all_prompts = p.batch_size * p.n_iter * [p.prompt]
- if type(p.seed) == list:
- all_seeds = p.seed
+ if type(seed) == list:
+ all_seeds = seed
else:
- all_seeds = [int(p.seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(all_prompts))]
+ all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(all_prompts))]
- if type(p.subseed) == list:
- all_subseeds = p.subseed
+ if type(subseed) == list:
+ all_subseeds = subseed
else:
- all_subseeds = [int(p.subseed) + x for x in range(len(all_prompts))]
+ all_subseeds = [int(subseed) + x for x in range(len(all_prompts))]
def infotext(iteration=0, position_in_batch=0):
return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch)
From 1eb588cbf19924333b88beaa1ac0041904966640 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 4 Oct 2022 18:02:01 +0300
Subject: [PATCH 077/460] remove functools.cache as some people are having
issues with it
---
modules/prompt_parser.py | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index 99c8ed99c..5d58c4ed9 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -29,6 +29,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
%import common.SIGNED_NUMBER -> NUMBER
"""
parser = Lark(grammar, parser='lalr')
+
def collect_steps(steps, tree):
l = [steps]
class CollectSteps(Visitor):
@@ -40,6 +41,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
l.append(tree.children[-1])
CollectSteps().visit(tree)
return sorted(set(l))
+
def at_step(step, tree):
class AtStep(Transformer):
def scheduled(self, args):
@@ -62,11 +64,13 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
for child in children:
yield from child
return AtStep().transform(tree)
- @functools.cache
+
def get_schedule(prompt):
tree = parser.parse(prompt)
return [[t, at_step(t, tree)] for t in collect_steps(steps, tree)]
- return [get_schedule(prompt) for prompt in prompts]
+
+ promptdict = {prompt: get_schedule(prompt) for prompt in set(prompts)}
+ return [promptdict[prompt] for prompt in prompts]
ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at_step", "cond"])
From 90e911fd546e76f879b38a764473569911a0f845 Mon Sep 17 00:00:00 2001
From: Rae Fu
Date: Tue, 4 Oct 2022 09:49:51 -0600
Subject: [PATCH 078/460] prompt_parser: allow spaces in schedules, add test,
log/ignore errors
Only build the parser once (at import time) instead of for each step.
doctest is run by simply executing modules/prompt_parser.py
---
modules/processing.py | 10 +--
modules/prompt_parser.py | 139 +++++++++++++++++++++++++--------------
2 files changed, 95 insertions(+), 54 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index 8180c63d8..bb94033b1 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -84,7 +84,7 @@ class StableDiffusionProcessing:
self.s_tmin = opts.s_tmin
self.s_tmax = float('inf') # not representable as a standard ui option
self.s_noise = opts.s_noise
-
+
if not seed_enable_extras:
self.subseed = -1
self.subseed_strength = 0
@@ -296,7 +296,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
assert(len(p.prompt) > 0)
else:
assert p.prompt is not None
-
+
devices.torch_gc()
seed = get_fixed_seed(p.seed)
@@ -359,8 +359,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
#uc = p.sd_model.get_learned_conditioning(len(prompts) * [p.negative_prompt])
#c = p.sd_model.get_learned_conditioning(prompts)
with devices.autocast():
- uc = prompt_parser.get_learned_conditioning(len(prompts) * [p.negative_prompt], p.steps)
- c = prompt_parser.get_learned_conditioning(prompts, p.steps)
+ uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps)
+ c = prompt_parser.get_learned_conditioning(shared.sd_model, prompts, p.steps)
if len(model_hijack.comments) > 0:
for comment in model_hijack.comments:
@@ -527,7 +527,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
# GC now before running the next img2img to prevent running out of memory
x = None
devices.torch_gc()
-
+
samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.steps)
return samples
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index 5d58c4ed9..a3b124219 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -1,10 +1,7 @@
import re
from collections import namedtuple
-import torch
-from lark import Lark, Transformer, Visitor
-import functools
-import modules.shared as shared
+import lark
# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]"
# will be represented with prompt_schedule like this (assuming steps=100):
@@ -14,25 +11,48 @@ import modules.shared as shared
# [75, 'fantasy landscape with a lake and an oak in background masterful']
# [100, 'fantasy landscape with a lake and a christmas tree in background masterful']
+schedule_parser = lark.Lark(r"""
+!start: (prompt | /[][():]/+)*
+prompt: (emphasized | scheduled | plain | WHITESPACE)*
+!emphasized: "(" prompt ")"
+ | "(" prompt ":" prompt ")"
+ | "[" prompt "]"
+scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER "]"
+WHITESPACE: /\s+/
+plain: /([^\\\[\]():]|\\.)+/
+%import common.SIGNED_NUMBER -> NUMBER
+""")
def get_learned_conditioning_prompt_schedules(prompts, steps):
- grammar = r"""
- start: prompt
- prompt: (emphasized | scheduled | weighted | plain)*
- !emphasized: "(" prompt ")"
- | "(" prompt ":" prompt ")"
- | "[" prompt "]"
- scheduled: "[" (prompt ":")? prompt ":" NUMBER "]"
- !weighted: "{" weighted_item ("|" weighted_item)* "}"
- !weighted_item: prompt (":" prompt)?
- plain: /([^\\\[\](){}:|]|\\.)+/
- %import common.SIGNED_NUMBER -> NUMBER
"""
- parser = Lark(grammar, parser='lalr')
+ >>> g = lambda p: get_learned_conditioning_prompt_schedules([p], 10)[0]
+ >>> g("test")
+ [[10, 'test']]
+ >>> g("a [b:3]")
+ [[3, 'a '], [10, 'a b']]
+ >>> g("a [b: 3]")
+ [[3, 'a '], [10, 'a b']]
+ >>> g("a [[[b]]:2]")
+ [[2, 'a '], [10, 'a [[b]]']]
+ >>> g("[(a:2):3]")
+ [[3, ''], [10, '(a:2)']]
+ >>> g("a [b : c : 1] d")
+ [[1, 'a b d'], [10, 'a c d']]
+ >>> g("a[b:[c:d:2]:1]e")
+ [[1, 'abe'], [2, 'ace'], [10, 'ade']]
+ >>> g("a [unbalanced")
+ [[10, 'a [unbalanced']]
+ >>> g("a [b:.5] c")
+ [[5, 'a c'], [10, 'a b c']]
+ >>> g("a [{b|d{:.5] c") # not handling this right now
+ [[5, 'a c'], [10, 'a {b|d{ c']]
+ >>> g("((a][:b:c [d:3]")
+ [[3, '((a][:b:c '], [10, '((a][:b:c d']]
+ """
def collect_steps(steps, tree):
l = [steps]
- class CollectSteps(Visitor):
+ class CollectSteps(lark.Visitor):
def scheduled(self, tree):
tree.children[-1] = float(tree.children[-1])
if tree.children[-1] < 1:
@@ -43,13 +63,10 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
return sorted(set(l))
def at_step(step, tree):
- class AtStep(Transformer):
+ class AtStep(lark.Transformer):
def scheduled(self, args):
- if len(args) == 2:
- before, after, when = (), *args
- else:
- before, after, when = args
- yield before if step <= when else after
+ before, after, _, when = args
+ yield before or () if step <= when else after
def start(self, args):
def flatten(x):
if type(x) == str:
@@ -57,16 +74,22 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
else:
for gen in x:
yield from flatten(gen)
- return ''.join(flatten(args[0]))
+ return ''.join(flatten(args))
def plain(self, args):
yield args[0].value
def __default__(self, data, children, meta):
for child in children:
yield from child
return AtStep().transform(tree)
-
+
def get_schedule(prompt):
- tree = parser.parse(prompt)
+ try:
+ tree = schedule_parser.parse(prompt)
+ except lark.exceptions.LarkError as e:
+ if 0:
+ import traceback
+ traceback.print_exc()
+ return [[steps, prompt]]
return [[t, at_step(t, tree)] for t in collect_steps(steps, tree)]
promptdict = {prompt: get_schedule(prompt) for prompt in set(prompts)}
@@ -77,8 +100,7 @@ ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at
ScheduledPromptBatch = namedtuple("ScheduledPromptBatch", ["shape", "schedules"])
-def get_learned_conditioning(prompts, steps):
-
+def get_learned_conditioning(model, prompts, steps):
res = []
prompt_schedules = get_learned_conditioning_prompt_schedules(prompts, steps)
@@ -92,7 +114,7 @@ def get_learned_conditioning(prompts, steps):
continue
texts = [x[1] for x in prompt_schedule]
- conds = shared.sd_model.get_learned_conditioning(texts)
+ conds = model.get_learned_conditioning(texts)
cond_schedule = []
for i, (end_at_step, text) in enumerate(prompt_schedule):
@@ -105,12 +127,13 @@ def get_learned_conditioning(prompts, steps):
def reconstruct_cond_batch(c: ScheduledPromptBatch, current_step):
- res = torch.zeros(c.shape, device=shared.device, dtype=next(shared.sd_model.parameters()).dtype)
+ param = c.schedules[0][0].cond
+ res = torch.zeros(c.shape, device=param.device, dtype=param.dtype)
for i, cond_schedule in enumerate(c.schedules):
target_index = 0
- for curret_index, (end_at, cond) in enumerate(cond_schedule):
+ for current, (end_at, cond) in enumerate(cond_schedule):
if current_step <= end_at:
- target_index = curret_index
+ target_index = current
break
res[i] = cond_schedule[target_index].cond
@@ -148,23 +171,26 @@ def parse_prompt_attention(text):
\\ - literal character '\'
anything else - just text
- Example:
-
- 'a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).'
-
- produces:
-
- [
- ['a ', 1.0],
- ['house', 1.5730000000000004],
- [' ', 1.1],
- ['on', 1.0],
- [' a ', 1.1],
- ['hill', 0.55],
- [', sun, ', 1.1],
- ['sky', 1.4641000000000006],
- ['.', 1.1]
- ]
+ >>> parse_prompt_attention('normal text')
+ [['normal text', 1.0]]
+ >>> parse_prompt_attention('an (important) word')
+ [['an ', 1.0], ['important', 1.1], [' word', 1.0]]
+ >>> parse_prompt_attention('(unbalanced')
+ [['unbalanced', 1.1]]
+ >>> parse_prompt_attention('\(literal\]')
+ [['(literal]', 1.0]]
+ >>> parse_prompt_attention('(unnecessary)(parens)')
+ [['unnecessaryparens', 1.1]]
+ >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).')
+ [['a ', 1.0],
+ ['house', 1.5730000000000004],
+ [' ', 1.1],
+ ['on', 1.0],
+ [' a ', 1.1],
+ ['hill', 0.55],
+ [', sun, ', 1.1],
+ ['sky', 1.4641000000000006],
+ ['.', 1.1]]
"""
res = []
@@ -206,4 +232,19 @@ def parse_prompt_attention(text):
if len(res) == 0:
res = [["", 1.0]]
+ # merge runs of identical weights
+ i = 0
+ while i + 1 < len(res):
+ if res[i][1] == res[i + 1][1]:
+ res[i][0] += res[i + 1][0]
+ res.pop(i + 1)
+ else:
+ i += 1
+
return res
+
+if __name__ == "__main__":
+ import doctest
+ doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE)
+else:
+ import torch # doctest faster
From b32852ef037251eb3d846af76e2965594e1ac7a5 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Tue, 4 Oct 2022 20:49:54 +0300
Subject: [PATCH 079/460] add editor to img2img
---
modules/shared.py | 1 +
modules/ui.py | 2 +-
style.css | 4 ++++
3 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index ff4e5fa39..e52c9b1d1 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -55,6 +55,7 @@ parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide dire
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
+parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image uploader tool: can be either editor for ctopping, or color-sketch for drawing', choices=["color-sketch", "editor"], default="color-sketch")
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
diff --git a/modules/ui.py b/modules/ui.py
index 20dc8c379..6cd6761b8 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -644,7 +644,7 @@ def create_ui(wrap_gradio_gpu_call):
with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode:
with gr.TabItem('img2img', id='img2img'):
- init_img = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil")
+ init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_img2img_tool)
with gr.TabItem('Inpaint', id='inpaint'):
init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA")
diff --git a/style.css b/style.css
index 39586bf18..e8f4cb752 100644
--- a/style.css
+++ b/style.css
@@ -403,3 +403,7 @@ input[type="range"]{
.red {
color: red;
}
+
+#img2img_image div.h-60{
+ height: 480px;
+}
\ No newline at end of file
From ef40e4cd4d383a3405e03f1da3f5b5a1820a8f53 Mon Sep 17 00:00:00 2001
From: xpscyho
Date: Tue, 4 Oct 2022 15:12:38 -0400
Subject: [PATCH 080/460] Display time taken in mins, secs when relevant Fixes
#1656
---
modules/ui.py | 7 ++++++-
1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/modules/ui.py b/modules/ui.py
index 6cd6761b8..de6342a48 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -196,6 +196,11 @@ def wrap_gradio_call(func, extra_outputs=None):
res = extra_outputs_array + [f"{plaintext_to_html(type(e).__name__+': '+str(e))}
"]
elapsed = time.perf_counter() - t
+ elapsed_m = int(elapsed // 60)
+ elapsed_s = elapsed % 60
+ elapsed_text = f"{elapsed_s:.2f}s"
+ if (elapsed_m > 0):
+ elapsed_text = f"{elapsed_m}m "+elapsed_text
if run_memmon:
mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()}
@@ -210,7 +215,7 @@ def wrap_gradio_call(func, extra_outputs=None):
vram_html = ''
# last item is always HTML
- res[-1] += f""
+ res[-1] += f""
shared.state.interrupted = False
shared.state.job_count = 0
From 82380d9ac18614c87bebba1b4cfd4b147cc76a18 Mon Sep 17 00:00:00 2001
From: Jairo Correa
Date: Tue, 4 Oct 2022 22:28:50 -0300
Subject: [PATCH 081/460] Removing parts no longer needed to fix vram
---
modules/devices.py | 3 +--
modules/processing.py | 21 ++++++++-------------
2 files changed, 9 insertions(+), 15 deletions(-)
diff --git a/modules/devices.py b/modules/devices.py
index 6db4e57c9..0158b11fc 100644
--- a/modules/devices.py
+++ b/modules/devices.py
@@ -1,7 +1,6 @@
import contextlib
import torch
-import gc
from modules import errors
@@ -20,8 +19,8 @@ def get_optimal_device():
return cpu
+
def torch_gc():
- gc.collect()
if torch.cuda.is_available():
torch.cuda.empty_cache()
torch.cuda.ipc_collect()
diff --git a/modules/processing.py b/modules/processing.py
index e7f9c85e1..f666ba811 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -345,8 +345,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
if state.job_count == -1:
state.job_count = p.n_iter
- for n in range(p.n_iter):
- with torch.no_grad(), precision_scope("cuda"), ema_scope():
+ for n in range(p.n_iter):
if state.interrupted:
break
@@ -395,22 +394,19 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
import modules.safety as safety
x_samples_ddim = modules.safety.censor_batch(x_samples_ddim)
- for i, x_sample in enumerate(x_samples_ddim):
- with torch.no_grad(), precision_scope("cuda"), ema_scope():
+ for i, x_sample in enumerate(x_samples_ddim):
x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2)
x_sample = x_sample.astype(np.uint8)
- if p.restore_faces:
- with torch.no_grad(), precision_scope("cuda"), ema_scope():
+ if p.restore_faces:
if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration")
+ devices.torch_gc()
+
x_sample = modules.face_restoration.restore_faces(x_sample)
devices.torch_gc()
- devices.torch_gc()
-
- with torch.no_grad(), precision_scope("cuda"), ema_scope():
image = Image.fromarray(x_sample)
if p.color_corrections is not None and i < len(p.color_corrections):
@@ -438,13 +434,12 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
infotexts.append(infotext(n, i))
output_images.append(image)
- del x_samples_ddim
+ del x_samples_ddim
- devices.torch_gc()
+ devices.torch_gc()
- state.nextjob()
+ state.nextjob()
- with torch.no_grad(), precision_scope("cuda"), ema_scope():
p.color_corrections = None
index_of_first_image = 0
From bbdbbd36eda870cf0bd49fdf28476c78919a123e Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Wed, 5 Oct 2022 04:43:05 +0100
Subject: [PATCH 082/460] shared.state.interrupt when restart is requested
---
modules/ui.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/ui.py b/modules/ui.py
index de6342a48..523ab25b3 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1210,6 +1210,7 @@ def create_ui(wrap_gradio_gpu_call):
)
def request_restart():
+ shared.state.interrupt()
settings_interface.gradio_ref.do_restart = True
restart_gradio.click(
From 67d011b02eddc20202b654dfea56528de3d5edf7 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Wed, 5 Oct 2022 04:44:22 +0100
Subject: [PATCH 083/460] Show generation progress in window title
---
javascript/progressbar.js | 15 +++++++++++++++
1 file changed, 15 insertions(+)
diff --git a/javascript/progressbar.js b/javascript/progressbar.js
index 1e297abbe..3e3220c3f 100644
--- a/javascript/progressbar.js
+++ b/javascript/progressbar.js
@@ -4,6 +4,21 @@ global_progressbars = {}
function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_interrupt, id_preview, id_gallery){
var progressbar = gradioApp().getElementById(id_progressbar)
var interrupt = gradioApp().getElementById(id_interrupt)
+
+ if(progressbar && progressbar.offsetParent){
+ if(progressbar.innerText){
+ let newtitle = 'Stable Diffusion - ' + progressbar.innerText
+ if(document.title != newtitle){
+ document.title = newtitle;
+ }
+ }else{
+ let newtitle = 'Stable Diffusion'
+ if(document.title != newtitle){
+ document.title = newtitle;
+ }
+ }
+ }
+
if(progressbar!= null && progressbar != global_progressbars[id_progressbar]){
global_progressbars[id_progressbar] = progressbar
From 59a2b9e5afc27d2fda72069ca0635070535d18fe Mon Sep 17 00:00:00 2001
From: Greendayle
Date: Wed, 5 Oct 2022 20:50:10 +0200
Subject: [PATCH 084/460] deepdanbooru interrogator
---
... deepbooru release project folder here.txt | 0
modules/deepbooru.py | 60 +++++++++++++++++++
modules/ui.py | 24 ++++++--
requirements.txt | 3 +
requirements_versions.txt | 3 +
style.css | 7 ++-
6 files changed, 91 insertions(+), 6 deletions(-)
create mode 100644 models/deepbooru/Put your deepbooru release project folder here.txt
create mode 100644 modules/deepbooru.py
diff --git a/models/deepbooru/Put your deepbooru release project folder here.txt b/models/deepbooru/Put your deepbooru release project folder here.txt
new file mode 100644
index 000000000..e69de29bb
diff --git a/modules/deepbooru.py b/modules/deepbooru.py
new file mode 100644
index 000000000..958b1c3d8
--- /dev/null
+++ b/modules/deepbooru.py
@@ -0,0 +1,60 @@
+import os.path
+from concurrent.futures import ProcessPoolExecutor
+
+import numpy as np
+import deepdanbooru as dd
+import tensorflow as tf
+
+
+def _load_tf_and_return_tags(pil_image, threshold):
+ this_folder = os.path.dirname(__file__)
+ model_path = os.path.join(this_folder, '..', 'models', 'deepbooru', 'deepdanbooru-v3-20211112-sgd-e28')
+ if not os.path.exists(model_path):
+ return "Download https://github.com/KichangKim/DeepDanbooru/releases/download/v3-20211112-sgd-e28/deepdanbooru-v3-20211112-sgd-e28.zip unpack and put into models/deepbooru"
+
+ tags = dd.project.load_tags_from_project(model_path)
+ model = dd.project.load_model_from_project(
+ model_path, compile_model=True
+ )
+
+ width = model.input_shape[2]
+ height = model.input_shape[1]
+ image = np.array(pil_image)
+ image = tf.image.resize(
+ image,
+ size=(height, width),
+ method=tf.image.ResizeMethod.AREA,
+ preserve_aspect_ratio=True,
+ )
+ image = image.numpy() # EagerTensor to np.array
+ image = dd.image.transform_and_pad_image(image, width, height)
+ image = image / 255.0
+ image_shape = image.shape
+ image = image.reshape((1, image_shape[0], image_shape[1], image_shape[2]))
+
+ y = model.predict(image)[0]
+
+ result_dict = {}
+
+ for i, tag in enumerate(tags):
+ result_dict[tag] = y[i]
+
+
+
+ result_tags_out = []
+ result_tags_print = []
+ for tag in tags:
+ if result_dict[tag] >= threshold:
+ result_tags_out.append(tag)
+ result_tags_print.append(f'{result_dict[tag]} {tag}')
+
+ print('\n'.join(sorted(result_tags_print, reverse=True)))
+
+ return ', '.join(result_tags_out)
+
+
+def get_deepbooru_tags(pil_image, threshold=0.5):
+ with ProcessPoolExecutor() as executor:
+ f = executor.submit(_load_tf_and_return_tags, pil_image, threshold)
+ ret = f.result() # will rethrow any exceptions
+ return ret
\ No newline at end of file
diff --git a/modules/ui.py b/modules/ui.py
index 20dc8c379..ae98219a6 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -23,6 +23,7 @@ import gradio.utils
import gradio.routes
from modules import sd_hijack
+from modules.deepbooru import get_deepbooru_tags
from modules.paths import script_path
from modules.shared import opts, cmd_opts
import modules.shared as shared
@@ -312,6 +313,11 @@ def interrogate(image):
return gr_show(True) if prompt is None else prompt
+def interrogate_deepbooru(image):
+ prompt = get_deepbooru_tags(image)
+ return gr_show(True) if prompt is None else prompt
+
+
def create_seed_inputs():
with gr.Row():
with gr.Box():
@@ -439,15 +445,17 @@ def create_toprow(is_img2img):
outputs=[],
)
- with gr.Row():
+ with gr.Row(scale=1):
if is_img2img:
- interrogate = gr.Button('Interrogate', elem_id="interrogate")
+ interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate")
+ deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru")
else:
interrogate = None
+ deepbooru = None
prompt_style_apply = gr.Button('Apply style', elem_id="style_apply")
save_style = gr.Button('Create style', elem_id="style_create")
- return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste, token_counter, token_button
+ return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button
def setup_progressbar(progressbar, preview, id_part, textinfo=None):
@@ -476,7 +484,7 @@ def create_ui(wrap_gradio_gpu_call):
import modules.txt2img
with gr.Blocks(analytics_enabled=False) as txt2img_interface:
- txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False)
+ txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False)
dummy_component = gr.Label(visible=False)
with gr.Row(elem_id='txt2img_progress_row'):
@@ -628,7 +636,7 @@ def create_ui(wrap_gradio_gpu_call):
token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter])
with gr.Blocks(analytics_enabled=False) as img2img_interface:
- img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True)
+ img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True)
with gr.Row(elem_id='img2img_progress_row'):
with gr.Column(scale=1):
@@ -785,6 +793,12 @@ def create_ui(wrap_gradio_gpu_call):
outputs=[img2img_prompt],
)
+ img2img_deepbooru.click(
+ fn=interrogate_deepbooru,
+ inputs=[init_img],
+ outputs=[img2img_prompt],
+ )
+
save.click(
fn=wrap_gradio_call(save_files),
_js="(x, y, z) => [x, y, selected_gallery_index()]",
diff --git a/requirements.txt b/requirements.txt
index 631fe616a..cab101f88 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -23,3 +23,6 @@ resize-right
torchdiffeq
kornia
lark
+deepdanbooru
+tensorflow
+tensorflow-io
diff --git a/requirements_versions.txt b/requirements_versions.txt
index fdff26878..811953c68 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -22,3 +22,6 @@ resize-right==0.0.2
torchdiffeq==0.2.3
kornia==0.6.7
lark==1.1.2
+git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26#egg=deepdanbooru[tensorflow]
+tensorflow==2.10.0
+tensorflow-io==0.27.0
diff --git a/style.css b/style.css
index 39586bf18..2fd351f91 100644
--- a/style.css
+++ b/style.css
@@ -103,7 +103,12 @@
#style_apply, #style_create, #interrogate{
margin: 0.75em 0.25em 0.25em 0.25em;
- min-width: 3em;
+ min-width: 5em;
+}
+
+#style_apply, #style_create, #deepbooru{
+ margin: 0.75em 0.25em 0.25em 0.25em;
+ min-width: 5em;
}
#style_pos_col, #style_neg_col{
From 1506fab29ad54beb9f52236912abc432209c8089 Mon Sep 17 00:00:00 2001
From: Greendayle
Date: Wed, 5 Oct 2022 21:15:08 +0200
Subject: [PATCH 085/460] removing problematic tag
---
modules/deepbooru.py | 5 ++---
1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/modules/deepbooru.py b/modules/deepbooru.py
index 958b1c3d8..841cb9c5f 100644
--- a/modules/deepbooru.py
+++ b/modules/deepbooru.py
@@ -38,13 +38,12 @@ def _load_tf_and_return_tags(pil_image, threshold):
for i, tag in enumerate(tags):
result_dict[tag] = y[i]
-
-
-
result_tags_out = []
result_tags_print = []
for tag in tags:
if result_dict[tag] >= threshold:
+ if tag.startswith("rating:"):
+ continue
result_tags_out.append(tag)
result_tags_print.append(f'{result_dict[tag]} {tag}')
From 17a99baf0c929e5df4dfc4b2a96aa3890a141112 Mon Sep 17 00:00:00 2001
From: Greendayle
Date: Wed, 5 Oct 2022 22:05:24 +0200
Subject: [PATCH 086/460] better model search
---
modules/deepbooru.py | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/modules/deepbooru.py b/modules/deepbooru.py
index 841cb9c5f..a64fd9cd1 100644
--- a/modules/deepbooru.py
+++ b/modules/deepbooru.py
@@ -9,8 +9,15 @@ import tensorflow as tf
def _load_tf_and_return_tags(pil_image, threshold):
this_folder = os.path.dirname(__file__)
model_path = os.path.join(this_folder, '..', 'models', 'deepbooru', 'deepdanbooru-v3-20211112-sgd-e28')
- if not os.path.exists(model_path):
- return "Download https://github.com/KichangKim/DeepDanbooru/releases/download/v3-20211112-sgd-e28/deepdanbooru-v3-20211112-sgd-e28.zip unpack and put into models/deepbooru"
+
+ model_good = False
+ for path_candidate in [model_path, os.path.dirname(model_path)]:
+ if os.path.exists(os.path.join(path_candidate, 'project.json')):
+ model_path = path_candidate
+ model_good = True
+ if not model_good:
+ return ("Download https://github.com/KichangKim/DeepDanbooru/releases/download/v3-20211112-sgd-e28/"
+ "deepdanbooru-v3-20211112-sgd-e28.zip unpack and put into models/deepbooru")
tags = dd.project.load_tags_from_project(model_path)
model = dd.project.load_model_from_project(
From c26732fbee2a57e621ac22bf70decf7496daa4cd Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 5 Oct 2022 23:16:27 +0300
Subject: [PATCH 087/460] added support for AND from
https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/
---
modules/processing.py | 2 +-
modules/prompt_parser.py | 114 ++++++++++++++++++++++++++++++++++++---
modules/sd_samplers.py | 35 ++++++++----
modules/ui.py | 6 ++-
4 files changed, 138 insertions(+), 19 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index bb94033b1..d8c6b8d57 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -360,7 +360,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
#c = p.sd_model.get_learned_conditioning(prompts)
with devices.autocast():
uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps)
- c = prompt_parser.get_learned_conditioning(shared.sd_model, prompts, p.steps)
+ c = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, prompts, p.steps)
if len(model_hijack.comments) > 0:
for comment in model_hijack.comments:
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index a3b124219..f7420daf9 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -97,10 +97,26 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at_step", "cond"])
-ScheduledPromptBatch = namedtuple("ScheduledPromptBatch", ["shape", "schedules"])
def get_learned_conditioning(model, prompts, steps):
+ """converts a list of prompts into a list of prompt schedules - each schedule is a list of ScheduledPromptConditioning, specifying the comdition (cond),
+ and the sampling step at which this condition is to be replaced by the next one.
+
+ Input:
+ (model, ['a red crown', 'a [blue:green:5] jeweled crown'], 20)
+
+ Output:
+ [
+ [
+ ScheduledPromptConditioning(end_at_step=20, cond=tensor([[-0.3886, 0.0229, -0.0523, ..., -0.4901, -0.3066, 0.0674], ..., [ 0.3317, -0.5102, -0.4066, ..., 0.4119, -0.7647, -1.0160]], device='cuda:0'))
+ ],
+ [
+ ScheduledPromptConditioning(end_at_step=5, cond=tensor([[-0.3886, 0.0229, -0.0522, ..., -0.4901, -0.3067, 0.0673], ..., [-0.0192, 0.3867, -0.4644, ..., 0.1135, -0.3696, -0.4625]], device='cuda:0')),
+ ScheduledPromptConditioning(end_at_step=20, cond=tensor([[-0.3886, 0.0229, -0.0522, ..., -0.4901, -0.3067, 0.0673], ..., [-0.7352, -0.4356, -0.7888, ..., 0.6994, -0.4312, -1.2593]], device='cuda:0'))
+ ]
+ ]
+ """
res = []
prompt_schedules = get_learned_conditioning_prompt_schedules(prompts, steps)
@@ -123,13 +139,75 @@ def get_learned_conditioning(model, prompts, steps):
cache[prompt] = cond_schedule
res.append(cond_schedule)
- return ScheduledPromptBatch((len(prompts),) + res[0][0].cond.shape, res)
+ return res
-def reconstruct_cond_batch(c: ScheduledPromptBatch, current_step):
- param = c.schedules[0][0].cond
- res = torch.zeros(c.shape, device=param.device, dtype=param.dtype)
- for i, cond_schedule in enumerate(c.schedules):
+re_AND = re.compile(r"\bAND\b")
+re_weight = re.compile(r"^(.*?)(?:\s*:\s*([-+]?\s*(?:\d+|\d*\.\d+)?))?\s*$")
+
+
+def get_multicond_prompt_list(prompts):
+ res_indexes = []
+
+ prompt_flat_list = []
+ prompt_indexes = {}
+
+ for prompt in prompts:
+ subprompts = re_AND.split(prompt)
+
+ indexes = []
+ for subprompt in subprompts:
+ text, weight = re_weight.search(subprompt).groups()
+
+ weight = float(weight) if weight is not None else 1.0
+
+ index = prompt_indexes.get(text, None)
+ if index is None:
+ index = len(prompt_flat_list)
+ prompt_flat_list.append(text)
+ prompt_indexes[text] = index
+
+ indexes.append((index, weight))
+
+ res_indexes.append(indexes)
+
+ return res_indexes, prompt_flat_list, prompt_indexes
+
+
+class ComposableScheduledPromptConditioning:
+ def __init__(self, schedules, weight=1.0):
+ self.schedules: list[ScheduledPromptConditioning] = schedules
+ self.weight: float = weight
+
+
+class MulticondLearnedConditioning:
+ def __init__(self, shape, batch):
+ self.shape: tuple = shape # the shape field is needed to send this object to DDIM/PLMS
+ self.batch: list[list[ComposableScheduledPromptConditioning]] = batch
+
+
+def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearnedConditioning:
+ """same as get_learned_conditioning, but returns a list of ScheduledPromptConditioning along with the weight objects for each prompt.
+ For each prompt, the list is obtained by splitting the prompt using the AND separator.
+
+ https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/
+ """
+
+ res_indexes, prompt_flat_list, prompt_indexes = get_multicond_prompt_list(prompts)
+
+ learned_conditioning = get_learned_conditioning(model, prompt_flat_list, steps)
+
+ res = []
+ for indexes in res_indexes:
+ res.append([ComposableScheduledPromptConditioning(learned_conditioning[i], weight) for i, weight in indexes])
+
+ return MulticondLearnedConditioning(shape=(len(prompts),), batch=res)
+
+
+def reconstruct_cond_batch(c: list[list[ScheduledPromptConditioning]], current_step):
+ param = c[0][0].cond
+ res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype)
+ for i, cond_schedule in enumerate(c):
target_index = 0
for current, (end_at, cond) in enumerate(cond_schedule):
if current_step <= end_at:
@@ -140,6 +218,30 @@ def reconstruct_cond_batch(c: ScheduledPromptBatch, current_step):
return res
+def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current_step):
+ param = c.batch[0][0].schedules[0].cond
+
+ tensors = []
+ conds_list = []
+
+ for batch_no, composable_prompts in enumerate(c.batch):
+ conds_for_batch = []
+
+ for cond_index, composable_prompt in enumerate(composable_prompts):
+ target_index = 0
+ for current, (end_at, cond) in enumerate(composable_prompt.schedules):
+ if current_step <= end_at:
+ target_index = current
+ break
+
+ conds_for_batch.append((len(tensors), composable_prompt.weight))
+ tensors.append(composable_prompt.schedules[target_index].cond)
+
+ conds_list.append(conds_for_batch)
+
+ return conds_list, torch.stack(tensors).to(device=param.device, dtype=param.dtype)
+
+
re_attention = re.compile(r"""
\\\(|
\\\)|
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index dbf570d2c..d27c547b3 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -109,9 +109,12 @@ class VanillaStableDiffusionSampler:
return 0
def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs):
- cond = prompt_parser.reconstruct_cond_batch(cond, self.step)
+ conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step)
+ assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers'
+ cond = tensor
+
if self.mask is not None:
img_orig = self.sampler.model.q_sample(self.init_latent, ts)
x_dec = img_orig * self.mask + self.nmask * x_dec
@@ -183,19 +186,31 @@ class CFGDenoiser(torch.nn.Module):
self.step = 0
def forward(self, x, sigma, uncond, cond, cond_scale):
- cond = prompt_parser.reconstruct_cond_batch(cond, self.step)
+ conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step)
uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step)
+ batch_size = len(conds_list)
+ repeats = [len(conds_list[i]) for i in range(batch_size)]
+
+ x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
+ sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
+ cond_in = torch.cat([tensor, uncond])
+
if shared.batch_cond_uncond:
- x_in = torch.cat([x] * 2)
- sigma_in = torch.cat([sigma] * 2)
- cond_in = torch.cat([uncond, cond])
- uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2)
- denoised = uncond + (cond - uncond) * cond_scale
+ x_out = self.inner_model(x_in, sigma_in, cond=cond_in)
else:
- uncond = self.inner_model(x, sigma, cond=uncond)
- cond = self.inner_model(x, sigma, cond=cond)
- denoised = uncond + (cond - uncond) * cond_scale
+ x_out = torch.zeros_like(x_in)
+ for batch_offset in range(0, x_out.shape[0], batch_size):
+ a = batch_offset
+ b = a + batch_size
+ x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=cond_in[a:b])
+
+ denoised_uncond = x_out[-batch_size:]
+ denoised = torch.clone(denoised_uncond)
+
+ for i, conds in enumerate(conds_list):
+ for cond_index, weight in conds:
+ denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale)
if self.mask is not None:
denoised = self.init_latent * self.mask + self.nmask * denoised
diff --git a/modules/ui.py b/modules/ui.py
index 523ab25b3..9620350fc 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -34,7 +34,7 @@ import modules.gfpgan_model
import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
-from modules.prompt_parser import get_learned_conditioning_prompt_schedules
+from modules import prompt_parser
from modules.images import apply_filename_pattern, get_next_sequence_number
import modules.textual_inversion.ui
@@ -394,7 +394,9 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info:
def update_token_counter(text, steps):
try:
- prompt_schedules = get_learned_conditioning_prompt_schedules([text], steps)
+ _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text])
+ prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps)
+
except Exception:
# a parsing error can happen here during typing, and we don't want to bother the user with
# messages related to it in console
From 4320f386d9641c7c234589c4cb0c0c6cbeb156ad Mon Sep 17 00:00:00 2001
From: Greendayle
Date: Wed, 5 Oct 2022 22:39:32 +0200
Subject: [PATCH 088/460] removing underscores and colons
---
modules/deepbooru.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/deepbooru.py b/modules/deepbooru.py
index a64fd9cd1..fb5018a6c 100644
--- a/modules/deepbooru.py
+++ b/modules/deepbooru.py
@@ -56,7 +56,7 @@ def _load_tf_and_return_tags(pil_image, threshold):
print('\n'.join(sorted(result_tags_print, reverse=True)))
- return ', '.join(result_tags_out)
+ return ', '.join(result_tags_out).replace('_', ' ').replace(':', ' ')
def get_deepbooru_tags(pil_image, threshold=0.5):
From f8e41a96bb30a04dd5e294c7e1178c1c3b09d481 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Wed, 5 Oct 2022 23:52:05 +0300
Subject: [PATCH 089/460] fix various float parsing errors
---
modules/prompt_parser.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index f7420daf9..800b12c75 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -143,8 +143,7 @@ def get_learned_conditioning(model, prompts, steps):
re_AND = re.compile(r"\bAND\b")
-re_weight = re.compile(r"^(.*?)(?:\s*:\s*([-+]?\s*(?:\d+|\d*\.\d+)?))?\s*$")
-
+re_weight = re.compile(r"^(.*?)(?:\s*:\s*([-+]?(?:\d+\.?|\d*\.\d+)))?\s*$")
def get_multicond_prompt_list(prompts):
res_indexes = []
From 20f8ec877a99ce2ebf193cb1e2e773cfc77b7c41 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 6 Oct 2022 00:09:32 +0300
Subject: [PATCH 090/460] remove type annotations in new code because
presumably they don't work in 3.7
---
modules/prompt_parser.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index 800b12c75..ee4c5d02d 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -175,14 +175,14 @@ def get_multicond_prompt_list(prompts):
class ComposableScheduledPromptConditioning:
def __init__(self, schedules, weight=1.0):
- self.schedules: list[ScheduledPromptConditioning] = schedules
+ self.schedules = schedules # : list[ScheduledPromptConditioning]
self.weight: float = weight
class MulticondLearnedConditioning:
def __init__(self, shape, batch):
self.shape: tuple = shape # the shape field is needed to send this object to DDIM/PLMS
- self.batch: list[list[ComposableScheduledPromptConditioning]] = batch
+ self.batch = batch # : list[list[ComposableScheduledPromptConditioning]]
def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearnedConditioning:
@@ -203,7 +203,7 @@ def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearne
return MulticondLearnedConditioning(shape=(len(prompts),), batch=res)
-def reconstruct_cond_batch(c: list[list[ScheduledPromptConditioning]], current_step):
+def reconstruct_cond_batch(c, current_step): # c: list[list[ScheduledPromptConditioning]]
param = c[0][0].cond
res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype)
for i, cond_schedule in enumerate(c):
From 34c358d10d52817f7a889ae4c52096ee654f3fe6 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Wed, 5 Oct 2022 22:11:30 +0100
Subject: [PATCH 091/460] use typing.list in prompt_parser.py for wider python
version support
---
modules/prompt_parser.py | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index 800b12c75..fdfa21ae6 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -1,6 +1,6 @@
import re
from collections import namedtuple
-
+from typing import List
import lark
# a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]"
@@ -175,14 +175,14 @@ def get_multicond_prompt_list(prompts):
class ComposableScheduledPromptConditioning:
def __init__(self, schedules, weight=1.0):
- self.schedules: list[ScheduledPromptConditioning] = schedules
+ self.schedules: List[ScheduledPromptConditioning] = schedules
self.weight: float = weight
class MulticondLearnedConditioning:
def __init__(self, shape, batch):
self.shape: tuple = shape # the shape field is needed to send this object to DDIM/PLMS
- self.batch: list[list[ComposableScheduledPromptConditioning]] = batch
+ self.batch: List[List[ComposableScheduledPromptConditioning]] = batch
def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearnedConditioning:
@@ -203,7 +203,7 @@ def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearne
return MulticondLearnedConditioning(shape=(len(prompts),), batch=res)
-def reconstruct_cond_batch(c: list[list[ScheduledPromptConditioning]], current_step):
+def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_step):
param = c[0][0].cond
res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype)
for i, cond_schedule in enumerate(c):
From 55400c981b7c1389482057a35ed6ea11f08da194 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Thu, 6 Oct 2022 03:11:15 +0100
Subject: [PATCH 092/460] Set gradio-img2img-tool default to 'editor'
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index e52c9b1d1..bab0fe6ee 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -55,7 +55,7 @@ parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide dire
parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json'))
parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option")
parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None)
-parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image uploader tool: can be either editor for ctopping, or color-sketch for drawing', choices=["color-sketch", "editor"], default="color-sketch")
+parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image uploader tool: can be either editor for ctopping, or color-sketch for drawing', choices=["color-sketch", "editor"], default="editor")
parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last")
parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv'))
parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False)
From 2499fb4e1910d31ff12c24110f161b20641b8835 Mon Sep 17 00:00:00 2001
From: Raphael Stoeckli
Date: Wed, 5 Oct 2022 21:57:18 +0200
Subject: [PATCH 093/460] Add sanitizer for captions in Textual inversion
---
modules/textual_inversion/preprocess.py | 28 +++++++++++++++++++++++++
1 file changed, 28 insertions(+)
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py
index f545a9937..4f3df4bd9 100644
--- a/modules/textual_inversion/preprocess.py
+++ b/modules/textual_inversion/preprocess.py
@@ -1,5 +1,8 @@
+from cmath import log
import os
from PIL import Image, ImageOps
+import platform
+import sys
import tqdm
from modules import shared, images
@@ -25,6 +28,7 @@ def preprocess(process_src, process_dst, process_flip, process_split, process_ca
def save_pic_with_caption(image, index):
if process_caption:
caption = "-" + shared.interrogator.generate_caption(image)
+ caption = sanitize_caption(os.path.join(dst, f"{index:05}-{subindex[0]}"), caption, ".png")
else:
caption = filename
caption = os.path.splitext(caption)[0]
@@ -75,3 +79,27 @@ def preprocess(process_src, process_dst, process_flip, process_split, process_ca
if process_caption:
shared.interrogator.send_blip_to_ram()
+
+def sanitize_caption(base_path, original_caption, suffix):
+ operating_system = platform.system().lower()
+ if (operating_system == "windows"):
+ invalid_path_characters = "\\/:*?\"<>|"
+ max_path_length = 259
+ else:
+ invalid_path_characters = "/" #linux/macos
+ max_path_length = 1023
+ caption = original_caption
+ for invalid_character in invalid_path_characters:
+ caption = caption.replace(invalid_character, "")
+ fixed_path_length = len(base_path) + len(suffix)
+ if fixed_path_length + len(caption) <= max_path_length:
+ return caption
+ caption_tokens = caption.split()
+ new_caption = ""
+ for token in caption_tokens:
+ last_caption = new_caption
+ new_caption = new_caption + token + " "
+ if (len(new_caption) + fixed_path_length - 1 > max_path_length):
+ break
+ print(f"\nPath will be too long. Truncated caption: {original_caption}\nto: {last_caption}", file=sys.stderr)
+ return last_caption.strip()
From 4288e53fc2ea25fa49715bf5b7f14603553c9e38 Mon Sep 17 00:00:00 2001
From: Raphael Stoeckli
Date: Wed, 5 Oct 2022 23:11:32 +0200
Subject: [PATCH 094/460] removed unused import, fixed typo
---
modules/textual_inversion/preprocess.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py
index 4f3df4bd9..f1c002a2b 100644
--- a/modules/textual_inversion/preprocess.py
+++ b/modules/textual_inversion/preprocess.py
@@ -1,4 +1,3 @@
-from cmath import log
import os
from PIL import Image, ImageOps
import platform
@@ -13,7 +12,7 @@ def preprocess(process_src, process_dst, process_flip, process_split, process_ca
src = os.path.abspath(process_src)
dst = os.path.abspath(process_dst)
- assert src != dst, 'same directory specified as source and desitnation'
+ assert src != dst, 'same directory specified as source and destination'
os.makedirs(dst, exist_ok=True)
From a93c3ffbfd264ed6b5d989922352300c9d3efbe4 Mon Sep 17 00:00:00 2001
From: Jocke
Date: Wed, 5 Oct 2022 16:31:48 +0200
Subject: [PATCH 095/460] Outpainting mk2, prevent generation of a completely
random image every time even when global seed is static
---
scripts/outpainting_mk_2.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py
index 11613ca36..a6468e09a 100644
--- a/scripts/outpainting_mk_2.py
+++ b/scripts/outpainting_mk_2.py
@@ -85,8 +85,11 @@ def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.0
src_dist = np.absolute(src_fft)
src_phase = src_fft / src_dist
+ # create a generator with a static seed to make outpainting deterministic / only follow global seed
+ rng = np.random.default_rng(0)
+
noise_window = _get_gaussian_window(width, height, mode=1) # start with simple gaussian noise
- noise_rgb = np.random.random_sample((width, height, num_channels))
+ noise_rgb = rng.random((width, height, num_channels))
noise_grey = (np.sum(noise_rgb, axis=2) / 3.)
noise_rgb *= color_variation # the colorfulness of the starting noise is blended to greyscale with a parameter
for c in range(num_channels):
From 6e7057b31b9762a9720282c7da486e4f264dee28 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 6 Oct 2022 12:08:06 +0300
Subject: [PATCH 096/460] support for downloading new commit hash for git repos
---
launch.py | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/launch.py b/launch.py
index 57405feab..2f91f586c 100644
--- a/launch.py
+++ b/launch.py
@@ -86,6 +86,15 @@ def git_clone(url, dir, name, commithash=None):
# TODO clone into temporary dir and move if successful
if os.path.exists(dir):
+ if commithash is None:
+ return
+
+ current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, "Couldn't determine {name}'s hash: {commithash}").strip()
+ if current_hash == commithash:
+ return
+
+ run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
+ run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commint for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
return
run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
From 5f24b7bcf4a074fbdec757617fcd1bc82e76551b Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 6 Oct 2022 12:08:48 +0300
Subject: [PATCH 097/460] option to let users select which samplers they want
to hide
---
modules/processing.py | 13 ++++++-------
modules/sd_samplers.py | 19 +++++++++++++++++--
modules/shared.py | 15 +++++++++------
webui.py | 4 +++-
4 files changed, 35 insertions(+), 16 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index d8c6b8d57..e01c8b3f6 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -11,9 +11,8 @@ import cv2
from skimage import exposure
import modules.sd_hijack
-from modules import devices, prompt_parser, masking
+from modules import devices, prompt_parser, masking, sd_samplers
from modules.sd_hijack import model_hijack
-from modules.sd_samplers import samplers, samplers_for_img2img
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.face_restoration
@@ -110,7 +109,7 @@ class Processed:
self.width = p.width
self.height = p.height
self.sampler_index = p.sampler_index
- self.sampler = samplers[p.sampler_index].name
+ self.sampler = sd_samplers.samplers[p.sampler_index].name
self.cfg_scale = p.cfg_scale
self.steps = p.steps
self.batch_size = p.batch_size
@@ -265,7 +264,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
generation_params = {
"Steps": p.steps,
- "Sampler": samplers[p.sampler_index].name,
+ "Sampler": sd_samplers.samplers[p.sampler_index].name,
"CFG scale": p.cfg_scale,
"Seed": all_seeds[index],
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
@@ -478,7 +477,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.firstphase_height_truncated = int(scale * self.height)
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
- self.sampler = samplers[self.sampler_index].constructor(self.sd_model)
+ self.sampler = sd_samplers.samplers[self.sampler_index].constructor(self.sd_model)
if not self.enable_hr:
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
@@ -521,7 +520,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
shared.state.nextjob()
- self.sampler = samplers[self.sampler_index].constructor(self.sd_model)
+ self.sampler = sd_samplers.samplers[self.sampler_index].constructor(self.sd_model)
noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
# GC now before running the next img2img to prevent running out of memory
@@ -556,7 +555,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.nmask = None
def init(self, all_prompts, all_seeds, all_subseeds):
- self.sampler = samplers_for_img2img[self.sampler_index].constructor(self.sd_model)
+ self.sampler = sd_samplers.samplers_for_img2img[self.sampler_index].constructor(self.sd_model)
crop_region = None
if self.image_mask is not None:
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index d27c547b3..2e1f77153 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -32,12 +32,27 @@ samplers_data_k_diffusion = [
if hasattr(k_diffusion.sampling, funcname)
]
-samplers = [
+all_samplers = [
*samplers_data_k_diffusion,
SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), []),
SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), []),
]
-samplers_for_img2img = [x for x in samplers if x.name not in ['PLMS', 'DPM fast', 'DPM adaptive']]
+
+samplers = []
+samplers_for_img2img = []
+
+
+def set_samplers():
+ global samplers, samplers_for_img2img
+
+ hidden = set(opts.hide_samplers)
+ hidden_img2img = set(opts.hide_samplers + ['PLMS', 'DPM fast', 'DPM adaptive'])
+
+ samplers = [x for x in all_samplers if x.name not in hidden]
+ samplers_for_img2img = [x for x in all_samplers if x.name not in hidden_img2img]
+
+
+set_samplers()
sampler_extra_params = {
'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'],
diff --git a/modules/shared.py b/modules/shared.py
index bab0fe6ee..ca2e4c742 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -13,6 +13,7 @@ import modules.memmon
import modules.sd_models
import modules.styles
import modules.devices as devices
+from modules import sd_samplers
from modules.paths import script_path, sd_path
sd_model_file = os.path.join(script_path, 'model.ckpt')
@@ -238,14 +239,16 @@ options_templates.update(options_section(('ui', "User interface"), {
}))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
- "eta_ddim": OptionInfo(0.0, "eta (noise multiplier) for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
- 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
- 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
+ "hide_samplers": OptionInfo([], "Hide samplers in user interface (requires restart)", gr.CheckboxGroup, lambda: {"choices": [x.name for x in sd_samplers.all_samplers]}),
+ "eta_ddim": OptionInfo(0.0, "eta (noise multiplier) for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
+ "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
+ "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}),
+ 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
+ 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
+ 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}),
}))
+
class Options:
data = None
data_labels = options_templates
diff --git a/webui.py b/webui.py
index 47848ba58..9ef124274 100644
--- a/webui.py
+++ b/webui.py
@@ -2,7 +2,7 @@ import os
import threading
import time
import importlib
-from modules import devices
+from modules import devices, sd_samplers
from modules.paths import script_path
import signal
import threading
@@ -109,6 +109,8 @@ def webui():
time.sleep(0.5)
break
+ sd_samplers.set_samplers()
+
print('Reloading Custom Scripts')
modules.scripts.reload_scripts(os.path.join(script_path, "scripts"))
print('Reloading modules: modules.ui')
From 2d3ea42a2d1e909bbccdb6b49561b187c60a9402 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 6 Oct 2022 13:21:12 +0300
Subject: [PATCH 098/460] workaround for a mysterious bug where prompt weights
can't be matched
---
modules/prompt_parser.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index a7a6aa314..f00256f28 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -156,7 +156,9 @@ def get_multicond_prompt_list(prompts):
indexes = []
for subprompt in subprompts:
- text, weight = re_weight.search(subprompt).groups()
+ match = re_weight.search(subprompt)
+
+ text, weight = match.groups() if match is not None else (subprompt, 1.0)
weight = float(weight) if weight is not None else 1.0
From 2a532804957e47bc36c67c8f5b104dcfa8e8f3f0 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 6 Oct 2022 13:21:32 +0300
Subject: [PATCH 099/460] reorder imports to fix the bug with k-diffusion on
some version
---
webui.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/webui.py b/webui.py
index 9ef124274..480360fe0 100644
--- a/webui.py
+++ b/webui.py
@@ -2,11 +2,12 @@ import os
import threading
import time
import importlib
-from modules import devices, sd_samplers
-from modules.paths import script_path
import signal
import threading
+from modules.paths import script_path
+
+from modules import devices, sd_samplers
import modules.codeformer_model as codeformer
import modules.extras
import modules.face_restoration
From c30c06db207a580d76544fd10fc1e03cd58ce85e Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Mon, 3 Oct 2022 12:48:16 +0300
Subject: [PATCH 100/460] update k-diffusion
---
launch.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/launch.py b/launch.py
index 2f91f586c..c2713c645 100644
--- a/launch.py
+++ b/launch.py
@@ -19,7 +19,7 @@ clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLI
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
-k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "a7ec1974d4ccb394c2dca275f42cd97490618924")
+k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "567e11f7062ba20ae32b5a8cd07fb0fc4b9410cf")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
From c1a068ed0acc788774afc1541ca69342fd1d94ad Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Mon, 3 Oct 2022 12:49:17 +0300
Subject: [PATCH 101/460] Create alternate_sampler_noise_schedules.py
---
scripts/alternate_sampler_noise_schedules.py | 53 ++++++++++++++++++++
1 file changed, 53 insertions(+)
create mode 100644 scripts/alternate_sampler_noise_schedules.py
diff --git a/scripts/alternate_sampler_noise_schedules.py b/scripts/alternate_sampler_noise_schedules.py
new file mode 100644
index 000000000..4f3ed8fb1
--- /dev/null
+++ b/scripts/alternate_sampler_noise_schedules.py
@@ -0,0 +1,53 @@
+import inspect
+from modules.processing import Processed, process_images
+import gradio as gr
+import modules.scripts as scripts
+import k_diffusion.sampling
+import torch
+
+
+class Script(scripts.Script):
+
+ def title(self):
+ return "Alternate Sampler Noise Schedules"
+
+ def ui(self, is_img2img):
+ noise_scheduler = gr.Dropdown(label="Noise Scheduler", choices=['Default','Karras','Exponential', 'Variance Preserving'], value='Default', type="index")
+ sched_smin = gr.Slider(value=0.1, label="Sigma min", minimum=0.0, maximum=100.0, step=0.5,)
+ sched_smax = gr.Slider(value=10.0, label="Sigma max", minimum=0.0, maximum=100.0, step=0.5)
+ sched_rho = gr.Slider(value=7.0, label="Sigma rho (Karras only)", minimum=7.0, maximum=100.0, step=0.5)
+ sched_beta_d = gr.Slider(value=19.9, label="Beta distribution (VP only)",minimum=0.0, maximum=40.0, step=0.5)
+ sched_beta_min = gr.Slider(value=0.1, label="Beta min (VP only)", minimum=0.0, maximum=40.0, step=0.1)
+ sched_eps_s = gr.Slider(value=0.001, label="Epsilon (VP only)", minimum=0.001, maximum=1.0, step=0.001)
+
+ return [noise_scheduler, sched_smin, sched_smax, sched_rho, sched_beta_d, sched_beta_min, sched_eps_s]
+
+ def run(self, p, noise_scheduler, sched_smin, sched_smax, sched_rho, sched_beta_d, sched_beta_min, sched_eps_s):
+
+ noise_scheduler_func_name = ['-','get_sigmas_karras','get_sigmas_exponential','get_sigmas_vp'][noise_scheduler]
+
+ base_params = {
+ "sigma_min":sched_smin,
+ "sigma_max":sched_smax,
+ "rho":sched_rho,
+ "beta_d":sched_beta_d,
+ "beta_min":sched_beta_min,
+ "eps_s":sched_eps_s,
+ "device":"cuda" if torch.cuda.is_available() else "cpu"
+ }
+
+ if hasattr(k_diffusion.sampling,noise_scheduler_func_name):
+
+ sigma_func = getattr(k_diffusion.sampling,noise_scheduler_func_name)
+ sigma_func_kwargs = {}
+
+ for k,v in base_params.items():
+ if k in inspect.signature(sigma_func).parameters:
+ sigma_func_kwargs[k] = v
+
+ def substitute_noise_scheduler(n):
+ return sigma_func(n,**sigma_func_kwargs)
+
+ p.sampler_noise_scheduler_override = substitute_noise_scheduler
+
+ return process_images(p)
From 71901b3d3bea1d035bf4a7229d19356b4b062151 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Wed, 5 Oct 2022 14:30:57 +0300
Subject: [PATCH 102/460] add karras scheduling variants
---
modules/sd_samplers.py | 13 +++++++++++++
1 file changed, 13 insertions(+)
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 2e1f77153..8d6eb7620 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -26,6 +26,17 @@ samplers_k_diffusion = [
('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad']),
]
+if opts.show_karras_scheduler_variants:
+ k_diffusion.sampling.sample_dpm_2_ka = k_diffusion.sampling.sample_dpm_2
+ k_diffusion.sampling.sample_dpm_2_ancestral_ka = k_diffusion.sampling.sample_dpm_2_ancestral
+ k_diffusion.sampling.sample_lms_ka = k_diffusion.sampling.sample_lms
+ samplers_k_diffusion_ka = [
+ ('LMS K Scheduling', 'sample_lms_ka', ['k_lms_ka']),
+ ('DPM2 K Scheduling', 'sample_dpm_2_ka', ['k_dpm_2_ka']),
+ ('DPM2 a K Scheduling', 'sample_dpm_2_ancestral_ka', ['k_dpm_2_a_ka']),
+ ]
+ samplers_k_diffusion.extend(samplers_k_diffusion_ka)
+
samplers_data_k_diffusion = [
SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases)
for label, funcname, aliases in samplers_k_diffusion
@@ -345,6 +356,8 @@ class KDiffusionSampler:
if p.sampler_noise_scheduler_override:
sigmas = p.sampler_noise_scheduler_override(steps)
+ elif self.funcname.endswith('ka'):
+ sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device)
else:
sigmas = self.model_wrap.get_sigmas(steps)
x = x * sigmas[0]
From 3ddf80a9db8793188e2fe9488233d2b272cceb33 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Wed, 5 Oct 2022 14:31:51 +0300
Subject: [PATCH 103/460] add variant setting
---
modules/shared.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/shared.py b/modules/shared.py
index ca2e4c742..9e4860a28 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -236,6 +236,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initialy_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
+ "show_karras_scheduler_variants": OptionInfo(True, "Show Karras scheduling variants for select samplers. Try these variants if your K sampled images suffer from excessive noise."),
}))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
From a971e4a767118ec41ec0f129770122babfb16a16 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Thu, 6 Oct 2022 13:34:42 +0300
Subject: [PATCH 104/460] update k-diff once again
---
launch.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/launch.py b/launch.py
index c2713c645..9fe0fd675 100644
--- a/launch.py
+++ b/launch.py
@@ -19,7 +19,7 @@ clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLI
stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
-k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "567e11f7062ba20ae32b5a8cd07fb0fc4b9410cf")
+k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "f4e99857772fc3a126ba886aadf795a332774878")
codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
From 5993df24a1026225cb8af89237547c1d9101ce69 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 6 Oct 2022 14:12:52 +0300
Subject: [PATCH 105/460] integrate the new samplers PR
---
modules/processing.py | 7 ++-
modules/sd_samplers.py | 59 ++++++++++----------
modules/shared.py | 1 -
scripts/alternate_sampler_noise_schedules.py | 53 ------------------
scripts/img2imgalt.py | 3 +-
5 files changed, 36 insertions(+), 87 deletions(-)
delete mode 100644 scripts/alternate_sampler_noise_schedules.py
diff --git a/modules/processing.py b/modules/processing.py
index e01c8b3f6..e567956ce 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -477,7 +477,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
self.firstphase_height_truncated = int(scale * self.height)
def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength):
- self.sampler = sd_samplers.samplers[self.sampler_index].constructor(self.sd_model)
+ self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
if not self.enable_hr:
x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
@@ -520,7 +520,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
shared.state.nextjob()
- self.sampler = sd_samplers.samplers[self.sampler_index].constructor(self.sd_model)
+ self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model)
+
noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self)
# GC now before running the next img2img to prevent running out of memory
@@ -555,7 +556,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing):
self.nmask = None
def init(self, all_prompts, all_seeds, all_subseeds):
- self.sampler = sd_samplers.samplers_for_img2img[self.sampler_index].constructor(self.sd_model)
+ self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers_for_img2img, self.sampler_index, self.sd_model)
crop_region = None
if self.image_mask is not None:
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 8d6eb7620..497df9430 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -13,46 +13,46 @@ from modules.shared import opts, cmd_opts, state
import modules.shared as shared
-SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases'])
+SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options'])
samplers_k_diffusion = [
- ('Euler a', 'sample_euler_ancestral', ['k_euler_a']),
- ('Euler', 'sample_euler', ['k_euler']),
- ('LMS', 'sample_lms', ['k_lms']),
- ('Heun', 'sample_heun', ['k_heun']),
- ('DPM2', 'sample_dpm_2', ['k_dpm_2']),
- ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a']),
- ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast']),
- ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad']),
+ ('Euler a', 'sample_euler_ancestral', ['k_euler_a'], {}),
+ ('Euler', 'sample_euler', ['k_euler'], {}),
+ ('LMS', 'sample_lms', ['k_lms'], {}),
+ ('Heun', 'sample_heun', ['k_heun'], {}),
+ ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {}),
+ ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {}),
+ ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}),
+ ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}),
+ ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}),
+ ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras'}),
+ ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}),
]
-if opts.show_karras_scheduler_variants:
- k_diffusion.sampling.sample_dpm_2_ka = k_diffusion.sampling.sample_dpm_2
- k_diffusion.sampling.sample_dpm_2_ancestral_ka = k_diffusion.sampling.sample_dpm_2_ancestral
- k_diffusion.sampling.sample_lms_ka = k_diffusion.sampling.sample_lms
- samplers_k_diffusion_ka = [
- ('LMS K Scheduling', 'sample_lms_ka', ['k_lms_ka']),
- ('DPM2 K Scheduling', 'sample_dpm_2_ka', ['k_dpm_2_ka']),
- ('DPM2 a K Scheduling', 'sample_dpm_2_ancestral_ka', ['k_dpm_2_a_ka']),
- ]
- samplers_k_diffusion.extend(samplers_k_diffusion_ka)
-
samplers_data_k_diffusion = [
- SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases)
- for label, funcname, aliases in samplers_k_diffusion
+ SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options)
+ for label, funcname, aliases, options in samplers_k_diffusion
if hasattr(k_diffusion.sampling, funcname)
]
all_samplers = [
*samplers_data_k_diffusion,
- SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), []),
- SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), []),
+ SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {}),
+ SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {}),
]
samplers = []
samplers_for_img2img = []
+def create_sampler_with_index(list_of_configs, index, model):
+ config = list_of_configs[index]
+ sampler = config.constructor(model)
+ sampler.config = config
+
+ return sampler
+
+
def set_samplers():
global samplers, samplers_for_img2img
@@ -130,6 +130,7 @@ class VanillaStableDiffusionSampler:
self.step = 0
self.eta = None
self.default_eta = 0.0
+ self.config = None
def number_of_needed_noises(self, p):
return 0
@@ -291,6 +292,7 @@ class KDiffusionSampler:
self.stop_at = None
self.eta = None
self.default_eta = 1.0
+ self.config = None
def callback_state(self, d):
store_latent(d["denoised"])
@@ -355,11 +357,12 @@ class KDiffusionSampler:
steps = steps or p.steps
if p.sampler_noise_scheduler_override:
- sigmas = p.sampler_noise_scheduler_override(steps)
- elif self.funcname.endswith('ka'):
- sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device)
+ sigmas = p.sampler_noise_scheduler_override(steps)
+ elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
+ sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device)
else:
- sigmas = self.model_wrap.get_sigmas(steps)
+ sigmas = self.model_wrap.get_sigmas(steps)
+
x = x * sigmas[0]
extra_params_kwargs = self.initialize(p)
diff --git a/modules/shared.py b/modules/shared.py
index 9e4860a28..ca2e4c742 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -236,7 +236,6 @@ options_templates.update(options_section(('ui', "User interface"), {
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initialy_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
- "show_karras_scheduler_variants": OptionInfo(True, "Show Karras scheduling variants for select samplers. Try these variants if your K sampled images suffer from excessive noise."),
}))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
diff --git a/scripts/alternate_sampler_noise_schedules.py b/scripts/alternate_sampler_noise_schedules.py
deleted file mode 100644
index 4f3ed8fb1..000000000
--- a/scripts/alternate_sampler_noise_schedules.py
+++ /dev/null
@@ -1,53 +0,0 @@
-import inspect
-from modules.processing import Processed, process_images
-import gradio as gr
-import modules.scripts as scripts
-import k_diffusion.sampling
-import torch
-
-
-class Script(scripts.Script):
-
- def title(self):
- return "Alternate Sampler Noise Schedules"
-
- def ui(self, is_img2img):
- noise_scheduler = gr.Dropdown(label="Noise Scheduler", choices=['Default','Karras','Exponential', 'Variance Preserving'], value='Default', type="index")
- sched_smin = gr.Slider(value=0.1, label="Sigma min", minimum=0.0, maximum=100.0, step=0.5,)
- sched_smax = gr.Slider(value=10.0, label="Sigma max", minimum=0.0, maximum=100.0, step=0.5)
- sched_rho = gr.Slider(value=7.0, label="Sigma rho (Karras only)", minimum=7.0, maximum=100.0, step=0.5)
- sched_beta_d = gr.Slider(value=19.9, label="Beta distribution (VP only)",minimum=0.0, maximum=40.0, step=0.5)
- sched_beta_min = gr.Slider(value=0.1, label="Beta min (VP only)", minimum=0.0, maximum=40.0, step=0.1)
- sched_eps_s = gr.Slider(value=0.001, label="Epsilon (VP only)", minimum=0.001, maximum=1.0, step=0.001)
-
- return [noise_scheduler, sched_smin, sched_smax, sched_rho, sched_beta_d, sched_beta_min, sched_eps_s]
-
- def run(self, p, noise_scheduler, sched_smin, sched_smax, sched_rho, sched_beta_d, sched_beta_min, sched_eps_s):
-
- noise_scheduler_func_name = ['-','get_sigmas_karras','get_sigmas_exponential','get_sigmas_vp'][noise_scheduler]
-
- base_params = {
- "sigma_min":sched_smin,
- "sigma_max":sched_smax,
- "rho":sched_rho,
- "beta_d":sched_beta_d,
- "beta_min":sched_beta_min,
- "eps_s":sched_eps_s,
- "device":"cuda" if torch.cuda.is_available() else "cpu"
- }
-
- if hasattr(k_diffusion.sampling,noise_scheduler_func_name):
-
- sigma_func = getattr(k_diffusion.sampling,noise_scheduler_func_name)
- sigma_func_kwargs = {}
-
- for k,v in base_params.items():
- if k in inspect.signature(sigma_func).parameters:
- sigma_func_kwargs[k] = v
-
- def substitute_noise_scheduler(n):
- return sigma_func(n,**sigma_func_kwargs)
-
- p.sampler_noise_scheduler_override = substitute_noise_scheduler
-
- return process_images(p)
diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py
index 0ef137f7d..f9894cb01 100644
--- a/scripts/img2imgalt.py
+++ b/scripts/img2imgalt.py
@@ -8,7 +8,6 @@ import gradio as gr
from modules import processing, shared, sd_samplers, prompt_parser
from modules.processing import Processed
-from modules.sd_samplers import samplers
from modules.shared import opts, cmd_opts, state
import torch
@@ -159,7 +158,7 @@ class Script(scripts.Script):
combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5)
- sampler = samplers[p.sampler_index].constructor(p.sd_model)
+ sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, p.sampler_index, p.sd_model)
sigmas = sampler.model_wrap.get_sigmas(p.steps)
From f5490674a8fd84162b4e80c045e675633afb9ee7 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 6 Oct 2022 17:41:49 +0300
Subject: [PATCH 106/460] fix bad output for error when updating a git repo
---
launch.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/launch.py b/launch.py
index 9fe0fd675..75edb66a9 100644
--- a/launch.py
+++ b/launch.py
@@ -89,7 +89,7 @@ def git_clone(url, dir, name, commithash=None):
if commithash is None:
return
- current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, "Couldn't determine {name}'s hash: {commithash}").strip()
+ current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip()
if current_hash == commithash:
return
From be71115b1a1201d04f0e2a11e718fb31cbd26474 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Thu, 6 Oct 2022 01:09:44 +0100
Subject: [PATCH 107/460] Update shared.py
---
modules/shared.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/shared.py b/modules/shared.py
index ca2e4c742..9f7c6efe5 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -236,6 +236,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initialy_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
+ "show_progress_in_title": OptionInfo(False, "Show generation progress in window title."),
}))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
From c06298d1d003aa034007978ee7508af636c18124 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Thu, 6 Oct 2022 01:10:38 +0100
Subject: [PATCH 108/460] add check for progress in title setting
---
javascript/progressbar.js | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/javascript/progressbar.js b/javascript/progressbar.js
index 3e3220c3f..f9e9290e2 100644
--- a/javascript/progressbar.js
+++ b/javascript/progressbar.js
@@ -5,7 +5,7 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_inte
var progressbar = gradioApp().getElementById(id_progressbar)
var interrupt = gradioApp().getElementById(id_interrupt)
- if(progressbar && progressbar.offsetParent){
+ if(opts.show_progress_in_title && progressbar && progressbar.offsetParent){
if(progressbar.innerText){
let newtitle = 'Stable Diffusion - ' + progressbar.innerText
if(document.title != newtitle){
From fec71e4de24b65b0f205a3c071b71651bbcb0dfc Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Thu, 6 Oct 2022 01:35:07 +0100
Subject: [PATCH 109/460] Default window title progress updates on
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index 9f7c6efe5..5c16f0257 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -236,7 +236,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initialy_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
- "show_progress_in_title": OptionInfo(False, "Show generation progress in window title."),
+ "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
}))
options_templates.update(options_section(('sampler-params', "Sampler parameters"), {
From 5d0e6ab8567bda2ee8f5ed31f332ca07c1b84b98 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Thu, 6 Oct 2022 04:04:50 +0100
Subject: [PATCH 110/460] Allow escaping of commas in xy_grid
---
scripts/xy_grid.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 1237e754d..210829a79 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -168,6 +168,7 @@ re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d
re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*")
re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*")
+re_non_escaped_comma = re.compile(r"(?
Date: Thu, 6 Oct 2022 11:55:21 +0100
Subject: [PATCH 111/460] use csv.reader
---
scripts/xy_grid.py | 9 ++++-----
1 file changed, 4 insertions(+), 5 deletions(-)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 210829a79..1a625898f 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -1,8 +1,9 @@
from collections import namedtuple
from copy import copy
-from itertools import permutations
+from itertools import permutations, chain
import random
-
+import csv
+from io import StringIO
from PIL import Image
import numpy as np
@@ -168,8 +169,6 @@ re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d
re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*")
re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*")
-re_non_escaped_comma = re.compile(r"(?
Date: Thu, 6 Oct 2022 12:32:17 +0100
Subject: [PATCH 112/460] strip() split comma delimited lines
---
scripts/xy_grid.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 1a625898f..ec27e58bc 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -197,7 +197,7 @@ class Script(scripts.Script):
if opt.label == 'Nothing':
return [0]
- valslist = list(chain.from_iterable(csv.reader(StringIO(s))))
+ valslist = list(map(str.strip,chain.from_iterable(csv.reader(StringIO(s)))))
if opt.type == int:
valslist_ext = []
From 82eb8ea452b1e63535c58d15ec6db2ad2342faa8 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Thu, 6 Oct 2022 15:22:51 +0100
Subject: [PATCH 113/460] Update xy_grid.py
split vals not 's' from tests
---
scripts/xy_grid.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index ec27e58bc..210c7b6e9 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -197,7 +197,7 @@ class Script(scripts.Script):
if opt.label == 'Nothing':
return [0]
- valslist = list(map(str.strip,chain.from_iterable(csv.reader(StringIO(s)))))
+ valslist = list(map(str.strip,chain.from_iterable(csv.reader(StringIO(vals)))))
if opt.type == int:
valslist_ext = []
From 0bb458f0ca06a7be27cf1a1003c536d1f06a5bd3 Mon Sep 17 00:00:00 2001
From: Milly
Date: Wed, 5 Oct 2022 01:19:50 +0900
Subject: [PATCH 114/460] Removed duplicate image saving codes
Use `modules.images.save_image()` instead.
---
modules/images.py | 7 ++++---
modules/ui.py | 46 ++++++++++------------------------------------
2 files changed, 14 insertions(+), 39 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index c2fadab99..810f1446e 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -353,7 +353,7 @@ def get_next_sequence_number(path, basename):
return result + 1
-def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix=""):
+def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None):
if short_filename or prompt is None or seed is None:
file_decoration = ""
elif opts.save_to_dirs:
@@ -377,7 +377,8 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
else:
pnginfo = None
- save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
+ if save_to_dirs is None:
+ save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
if save_to_dirs:
dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt).strip('\\ /')
@@ -431,4 +432,4 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
with open(f"{fullfn_without_extension}.txt", "w", encoding="utf8") as file:
file.write(info + "\n")
-
+ return fullfn
diff --git a/modules/ui.py b/modules/ui.py
index 9620350fc..4f18126fb 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -35,7 +35,7 @@ import modules.codeformer_model
import modules.styles
import modules.generation_parameters_copypaste
from modules import prompt_parser
-from modules.images import apply_filename_pattern, get_next_sequence_number
+from modules.images import save_image
import modules.textual_inversion.ui
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
@@ -114,20 +114,13 @@ def save_files(js_data, images, index):
p = MyObject(data)
path = opts.outdir_save
save_to_dirs = opts.use_save_to_dirs_for_ui
-
- if save_to_dirs:
- dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, p.seed, p.prompt)
- path = os.path.join(opts.outdir_save, dirname)
-
- os.makedirs(path, exist_ok=True)
-
+ extension: str = opts.samples_format
+ start_index = 0
if index > -1 and opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only
images = [images[index]]
- infotexts = [data["infotexts"][index]]
- else:
- infotexts = data["infotexts"]
+ start_index = index
with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file:
at_start = file.tell() == 0
@@ -135,37 +128,18 @@ def save_files(js_data, images, index):
if at_start:
writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"])
- file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]"
- if file_decoration != "":
- file_decoration = "-" + file_decoration.lower()
- file_decoration = apply_filename_pattern(file_decoration, p, p.seed, p.prompt)
- truncated = (file_decoration[:240] + '..') if len(file_decoration) > 240 else file_decoration
- filename_base = truncated
- extension = opts.samples_format.lower()
-
- basecount = get_next_sequence_number(path, "")
- for i, filedata in enumerate(images):
- file_number = f"{basecount+i:05}"
- filename = file_number + filename_base + f".{extension}"
- filepath = os.path.join(path, filename)
-
-
+ for image_index, filedata in enumerate(images, start_index):
if filedata.startswith("data:image/png;base64,"):
filedata = filedata[len("data:image/png;base64,"):]
image = Image.open(io.BytesIO(base64.decodebytes(filedata.encode('utf-8'))))
- if opts.enable_pnginfo and extension == 'png':
- pnginfo = PngImagePlugin.PngInfo()
- pnginfo.add_text('parameters', infotexts[i])
- image.save(filepath, pnginfo=pnginfo)
- else:
- image.save(filepath, quality=opts.jpeg_quality)
- if opts.enable_pnginfo and extension in ("jpg", "jpeg", "webp"):
- piexif.insert(piexif.dump({"Exif": {
- piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(infotexts[i], encoding="unicode")
- }}), filepath)
+ is_grid = image_index < p.index_of_first_image
+ i = 0 if is_grid else (image_index - p.index_of_first_image)
+ fullfn = save_image(image, path, "", seed=p.all_seeds[i], prompt=p.all_prompts[i], extension=extension, info=p.infotexts[image_index], grid=is_grid, p=p, save_to_dirs=save_to_dirs)
+
+ filename = os.path.relpath(fullfn, path)
filenames.append(filename)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
From 1069ec49a35d04c1e85c92534e92a2d6aa59cb75 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 6 Oct 2022 20:16:21 +0300
Subject: [PATCH 115/460] revert back to using list comprehension rather than
list and map
---
scripts/xy_grid.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 210c7b6e9..6344e612f 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -197,7 +197,7 @@ class Script(scripts.Script):
if opt.label == 'Nothing':
return [0]
- valslist = list(map(str.strip,chain.from_iterable(csv.reader(StringIO(vals)))))
+ valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals)))]
if opt.type == int:
valslist_ext = []
From dbc8a4d35129b08eab30776bbbaf3a2e7ac10a6c Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 6 Oct 2022 20:27:50 +0300
Subject: [PATCH 116/460] add generation parameters to images shown in web ui
---
modules/processing.py | 8 ++++++--
1 file changed, 6 insertions(+), 2 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index de818d5b9..8faf90956 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -430,7 +430,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
if opts.samples_save and not p.do_not_save_samples:
images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p)
- infotexts.append(infotext(n, i))
+ text = infotext(n, i)
+ infotexts.append(text)
+ image.info["parameters"] = text
output_images.append(image)
del x_samples_ddim
@@ -447,7 +449,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
grid = images.image_grid(output_images, p.batch_size)
if opts.return_grid:
- infotexts.insert(0, infotext())
+ text = infotext()
+ infotexts.insert(0, text)
+ grid.info["parameters"] = text
output_images.insert(0, grid)
index_of_first_image = 1
From cf7c784fcc0c84a8a4edd8d3aca4dda4c7025c43 Mon Sep 17 00:00:00 2001
From: Milly
Date: Fri, 7 Oct 2022 00:19:52 +0900
Subject: [PATCH 117/460] Removed duplicate defined models_path
Use `modules.paths.models_path` instead `modules.shared.model_path`.
---
modules/shared.py | 19 +++++++++----------
1 file changed, 9 insertions(+), 10 deletions(-)
diff --git a/modules/shared.py b/modules/shared.py
index 5c16f0257..25bb6e6c9 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -14,11 +14,10 @@ import modules.sd_models
import modules.styles
import modules.devices as devices
from modules import sd_samplers
-from modules.paths import script_path, sd_path
+from modules.paths import models_path, script_path, sd_path
sd_model_file = os.path.join(script_path, 'model.ckpt')
default_sd_model_file = sd_model_file
-model_path = os.path.join(script_path, 'models')
parser = argparse.ArgumentParser()
parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",)
parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
@@ -36,14 +35,14 @@ parser.add_argument("--always-batch-cond-uncond", action='store_true', help="dis
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)")
-parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(model_path, 'Codeformer'))
-parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(model_path, 'GFPGAN'))
-parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(model_path, 'ESRGAN'))
-parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(model_path, 'BSRGAN'))
-parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(model_path, 'RealESRGAN'))
-parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(model_path, 'ScuNET'))
-parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(model_path, 'SwinIR'))
-parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(model_path, 'LDSR'))
+parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer'))
+parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN'))
+parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN'))
+parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN'))
+parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN'))
+parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET'))
+parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
+parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
From 070b7d60cf5dac6387b3bfc8f3b3977b620e4fd5 Mon Sep 17 00:00:00 2001
From: Milly
Date: Wed, 5 Oct 2022 02:13:09 +0900
Subject: [PATCH 118/460] Added styles to Processed
So `[styles]` pattern can use in saving image UI.
---
modules/images.py | 7 +------
modules/processing.py | 2 ++
2 files changed, 3 insertions(+), 6 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index 810f1446e..fa0714fd1 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -292,12 +292,7 @@ def apply_filename_pattern(x, p, seed, prompt):
x = x.replace("[cfg]", str(p.cfg_scale))
x = x.replace("[width]", str(p.width))
x = x.replace("[height]", str(p.height))
-
- #currently disabled if using the save button, will work otherwise
- # if enabled it will cause a bug because styles is not included in the save_files data dictionary
- if hasattr(p, "styles"):
- x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"]) or "None", replace_spaces=False))
-
+ x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"]) or "None", replace_spaces=False))
x = x.replace("[sampler]", sanitize_filename_part(sd_samplers.samplers[p.sampler_index].name, replace_spaces=False))
x = x.replace("[model_hash]", shared.sd_model.sd_model_hash)
diff --git a/modules/processing.py b/modules/processing.py
index 8faf90956..706dbfa87 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -121,6 +121,7 @@ class Processed:
self.denoising_strength = getattr(p, 'denoising_strength', None)
self.extra_generation_params = p.extra_generation_params
self.index_of_first_image = index_of_first_image
+ self.styles = p.styles
self.eta = p.eta
self.ddim_discretize = p.ddim_discretize
@@ -165,6 +166,7 @@ class Processed:
"extra_generation_params": self.extra_generation_params,
"index_of_first_image": self.index_of_first_image,
"infotexts": self.infotexts,
+ "styles": self.styles,
}
return json.dumps(obj)
From 1cc36d170ac15e7f04208df32db27af1b10c867c Mon Sep 17 00:00:00 2001
From: Milly
Date: Wed, 5 Oct 2022 02:17:15 +0900
Subject: [PATCH 119/460] Added job_timestamp to Processed
So `[job_timestamp]` pattern can use in saving image UI.
---
modules/images.py | 2 +-
modules/processing.py | 2 ++
2 files changed, 3 insertions(+), 1 deletion(-)
diff --git a/modules/images.py b/modules/images.py
index fa0714fd1..669d76af6 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -298,7 +298,7 @@ def apply_filename_pattern(x, p, seed, prompt):
x = x.replace("[model_hash]", shared.sd_model.sd_model_hash)
x = x.replace("[date]", datetime.date.today().isoformat())
x = x.replace("[datetime]", datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
- x = x.replace("[job_timestamp]", shared.state.job_timestamp)
+ x = x.replace("[job_timestamp]", getattr(p, "job_timestamp", shared.state.job_timestamp))
# Apply [prompt] at last. Because it may contain any replacement word.^M
if prompt is not None:
diff --git a/modules/processing.py b/modules/processing.py
index 706dbfa87..f773a30ef 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -122,6 +122,7 @@ class Processed:
self.extra_generation_params = p.extra_generation_params
self.index_of_first_image = index_of_first_image
self.styles = p.styles
+ self.job_timestamp = state.job_timestamp
self.eta = p.eta
self.ddim_discretize = p.ddim_discretize
@@ -167,6 +168,7 @@ class Processed:
"index_of_first_image": self.index_of_first_image,
"infotexts": self.infotexts,
"styles": self.styles,
+ "job_timestamp": self.job_timestamp,
}
return json.dumps(obj)
From 405c8171d1acbb994084d98770bbcb97d01d9406 Mon Sep 17 00:00:00 2001
From: Milly
Date: Thu, 6 Oct 2022 00:59:04 +0900
Subject: [PATCH 120/460] Prefer using `Processed.sd_model_hash` attribute when
filename pattern
---
modules/images.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/images.py b/modules/images.py
index 669d76af6..29c5ee249 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -295,7 +295,7 @@ def apply_filename_pattern(x, p, seed, prompt):
x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"]) or "None", replace_spaces=False))
x = x.replace("[sampler]", sanitize_filename_part(sd_samplers.samplers[p.sampler_index].name, replace_spaces=False))
- x = x.replace("[model_hash]", shared.sd_model.sd_model_hash)
+ x = x.replace("[model_hash]", getattr(p, "sd_model_hash", shared.sd_model.sd_model_hash))
x = x.replace("[date]", datetime.date.today().isoformat())
x = x.replace("[datetime]", datetime.datetime.now().strftime("%Y%m%d%H%M%S"))
x = x.replace("[job_timestamp]", getattr(p, "job_timestamp", shared.state.job_timestamp))
From b34b25b4c941819d34f29be6c4c1ec01e64585b4 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 6 Oct 2022 23:27:01 +0300
Subject: [PATCH 121/460] karras samplers for img2img?
---
modules/sd_samplers.py | 6 ++++--
1 file changed, 4 insertions(+), 2 deletions(-)
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 497df9430..df17e93ca 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -338,9 +338,11 @@ class KDiffusionSampler:
steps, t_enc = setup_img2img_steps(p, steps)
if p.sampler_noise_scheduler_override:
- sigmas = p.sampler_noise_scheduler_override(steps)
+ sigmas = p.sampler_noise_scheduler_override(steps)
+ elif self.config is not None and self.config.options.get('scheduler', None) == 'karras':
+ sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device)
else:
- sigmas = self.model_wrap.get_sigmas(steps)
+ sigmas = self.model_wrap.get_sigmas(steps)
noise = noise * sigmas[steps - t_enc - 1]
xi = x + noise
From 2995107fa24cfd72b0a991e18271dcde148c2807 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Thu, 6 Oct 2022 23:44:54 +0300
Subject: [PATCH 122/460] added ctrl+up or ctrl+down hotkeys for attention
---
README.md | 4 ++++
javascript/edit-attention.js | 41 ++++++++++++++++++++++++++++++++++++
2 files changed, 45 insertions(+)
create mode 100644 javascript/edit-attention.js
diff --git a/README.md b/README.md
index ec3d7532d..a14a63306 100644
--- a/README.md
+++ b/README.md
@@ -16,6 +16,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- Attention, specify parts of text that the model should pay more attention to
- a man in a ((tuxedo)) - will pay more attention to tuxedo
- a man in a (tuxedo:1.21) - alternative syntax
+ - select text and press ctrl+up or ctrl+down to aduotmatically adjust attention to selected text
- Loopback, run img2img processing multiple times
- X/Y plot, a way to draw a 2 dimensional plot of images with different parameters
- Textual Inversion
@@ -61,6 +62,9 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- Reloading checkpoints on the fly
- Checkpoint Merger, a tab that allows you to merge two checkpoints into one
- [Custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Scripts) with many extensions from community
+- [Composable-Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/), a way to use multiple prompts at once
+ - separate prompts using uppercase `AND`
+ - also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2`
## Installation and Running
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js
new file mode 100644
index 000000000..c67ed5794
--- /dev/null
+++ b/javascript/edit-attention.js
@@ -0,0 +1,41 @@
+addEventListener('keydown', (event) => {
+ let target = event.originalTarget;
+ if (!target.hasAttribute("placeholder")) return;
+ if (!target.placeholder.toLowerCase().includes("prompt")) return;
+
+ let plus = "ArrowUp"
+ let minus = "ArrowDown"
+ if (event.key != plus && event.key != minus) return;
+
+ selectionStart = target.selectionStart;
+ selectionEnd = target.selectionEnd;
+ if(selectionStart == selectionEnd) return;
+
+ event.preventDefault();
+
+ if (selectionStart == 0 || target.value[selectionStart - 1] != "(") {
+ target.value = target.value.slice(0, selectionStart) +
+ "(" + target.value.slice(selectionStart, selectionEnd) + ":1.0)" +
+ target.value.slice(selectionEnd);
+
+ target.focus();
+ target.selectionStart = selectionStart + 1;
+ target.selectionEnd = selectionEnd + 1;
+
+ } else {
+ end = target.value.slice(selectionEnd + 1).indexOf(")") + 1;
+ weight = parseFloat(target.value.slice(selectionEnd + 1, selectionEnd + 1 + end));
+ if (event.key == minus) weight -= 0.1;
+ if (event.key == plus) weight += 0.1;
+
+ weight = parseFloat(weight.toPrecision(12));
+
+ target.value = target.value.slice(0, selectionEnd + 1) +
+ weight +
+ target.value.slice(selectionEnd + 1 + end - 1);
+
+ target.focus();
+ target.selectionStart = selectionStart;
+ target.selectionEnd = selectionEnd;
+ }
+});
From f174fb29228a04955fb951b32b0bab79e33ec2b8 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Fri, 7 Oct 2022 05:21:49 +0300
Subject: [PATCH 123/460] add xformers attention
---
modules/sd_hijack_optimizations.py | 39 +++++++++++++++++++++++++++++-
1 file changed, 38 insertions(+), 1 deletion(-)
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index ea4cfdfcd..da1b76e1c 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -1,7 +1,9 @@
import math
import torch
from torch import einsum
-
+import xformers.ops
+import functorch
+xformers._is_functorch_available=True
from ldm.util import default
from einops import rearrange
@@ -92,6 +94,41 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
return self.to_out(r2)
+def _maybe_init(self, x):
+ """
+ Initialize the attention operator, if required We expect the head dimension to be exposed here, meaning that x
+ : B, Head, Length
+ """
+ if self.attention_op is not None:
+ return
+ _, M, K = x.shape
+ try:
+ self.attention_op = xformers.ops.AttentionOpDispatch(
+ dtype=x.dtype,
+ device=x.device,
+ k=K,
+ attn_bias_type=type(None),
+ has_dropout=False,
+ kv_len=M,
+ q_len=M,
+ ).op
+ except NotImplementedError as err:
+ raise NotImplementedError(f"Please install xformers with the flash attention / cutlass components.\n{err}")
+
+def xformers_attention_forward(self, x, context=None, mask=None):
+ h = self.heads
+ q_in = self.to_q(x)
+ context = default(context, x)
+ k_in = self.to_k(context)
+ v_in = self.to_v(context)
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
+ del q_in, k_in, v_in
+ self._maybe_init(q)
+ out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
+
+ out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
+ return self.to_out(out)
+
def cross_attention_attnblock_forward(self, x):
h_ = x
h_ = self.norm(h_)
From 2eb911b056ce6ff4434f673366782ed34f2b2f12 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Fri, 7 Oct 2022 05:22:28 +0300
Subject: [PATCH 124/460] Update sd_hijack.py
---
modules/sd_hijack.py | 13 +++++++++----
1 file changed, 9 insertions(+), 4 deletions(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index a6fa890c4..6221ed5ac 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -20,12 +20,17 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At
def apply_optimizations():
- ldm.modules.diffusionmodules.model.nonlinearity = silu
-
if cmd_opts.opt_split_attention_v1:
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
- elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
- ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
+ if cmd_opts.opt_split_attention:
+ ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward
+ ldm.modules.diffusionmodules.model.nonlinearity = sd_hijack_optimizations.nonlinearity_hijack
+ ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
+ elif not cmd_opts.disable_opt_xformers_attention:
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
+ ldm.modules.attention.CrossAttention._maybe_init = sd_hijack_optimizations._maybe_init
+ ldm.modules.attention.CrossAttention.attention_op = None
+ ldm.modules.diffusionmodules.model.nonlinearity = sd_hijack_optimizations.nonlinearity_hijack
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
From da4ab2707b4cb0611cf181ba248a271d1937433e Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Fri, 7 Oct 2022 05:23:06 +0300
Subject: [PATCH 125/460] Update shared.py
---
modules/shared.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/shared.py b/modules/shared.py
index 25bb6e6c9..8cc3b2fe2 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -43,6 +43,7 @@ parser.add_argument("--realesrgan-models-path", type=str, help="Path to director
parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET'))
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
+parser.add_argument("--disable-opt-xformers-attention", action='store_true', help="force-disables xformers attention optimization")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
From cd8bb597c6bcb6c59b538b7a1ab8f2face764fc5 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Fri, 7 Oct 2022 05:23:25 +0300
Subject: [PATCH 126/460] Update requirements.txt
---
requirements.txt | 2 ++
1 file changed, 2 insertions(+)
diff --git a/requirements.txt b/requirements.txt
index 631fe616a..304a066a3 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -23,3 +23,5 @@ resize-right
torchdiffeq
kornia
lark
+functorch
+#xformers?
From 35d6b231628d18d53d166c3a92fea1523e88d51e Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Fri, 7 Oct 2022 05:31:53 +0300
Subject: [PATCH 127/460] Update sd_hijack.py
---
modules/sd_hijack.py | 3 +--
1 file changed, 1 insertion(+), 2 deletions(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 6221ed5ac..a006c0a3b 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -20,17 +20,16 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At
def apply_optimizations():
+ ldm.modules.diffusionmodules.model.nonlinearity = silu
if cmd_opts.opt_split_attention_v1:
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
if cmd_opts.opt_split_attention:
ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward
- ldm.modules.diffusionmodules.model.nonlinearity = sd_hijack_optimizations.nonlinearity_hijack
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
elif not cmd_opts.disable_opt_xformers_attention:
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.attention.CrossAttention._maybe_init = sd_hijack_optimizations._maybe_init
ldm.modules.attention.CrossAttention.attention_op = None
- ldm.modules.diffusionmodules.model.nonlinearity = sd_hijack_optimizations.nonlinearity_hijack
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
From 5303df24282ba06abb34a423f2967354d37d078e Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Fri, 7 Oct 2022 06:01:14 +0300
Subject: [PATCH 128/460] Update sd_hijack.py
---
modules/sd_hijack.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index a006c0a3b..ddacb0ad8 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -23,10 +23,10 @@ def apply_optimizations():
ldm.modules.diffusionmodules.model.nonlinearity = silu
if cmd_opts.opt_split_attention_v1:
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
- if cmd_opts.opt_split_attention:
+ elif cmd_opts.opt_split_attention:
ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
- elif not cmd_opts.disable_opt_xformers_attention:
+ elif not cmd_opts.disable_opt_xformers_attention and not cmd_opts.opt_split_attention:
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.attention.CrossAttention._maybe_init = sd_hijack_optimizations._maybe_init
ldm.modules.attention.CrossAttention.attention_op = None
From 5e3ff846c56dc8e1d5c76ea04a8f2f74d7da07fc Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Fri, 7 Oct 2022 06:38:01 +0300
Subject: [PATCH 129/460] Update sd_hijack.py
---
modules/sd_hijack.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index ddacb0ad8..cbdb9d3c7 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -26,7 +26,7 @@ def apply_optimizations():
elif cmd_opts.opt_split_attention:
ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
- elif not cmd_opts.disable_opt_xformers_attention and not cmd_opts.opt_split_attention:
+ elif not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip):
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.attention.CrossAttention._maybe_init = sd_hijack_optimizations._maybe_init
ldm.modules.attention.CrossAttention.attention_op = None
From bad7cb29cecac51c5c0f39afec332b007ed73133 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 7 Oct 2022 10:17:52 +0300
Subject: [PATCH 130/460] added support for hypernetworks (???)
---
modules/hypernetwork.py | 55 ++++++++++++++++++++++++++++++
modules/sd_hijack_optimizations.py | 17 +++++++--
modules/shared.py | 9 ++++-
scripts/xy_grid.py | 10 ++++++
4 files changed, 88 insertions(+), 3 deletions(-)
create mode 100644 modules/hypernetwork.py
diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py
new file mode 100644
index 000000000..9ed1eed9b
--- /dev/null
+++ b/modules/hypernetwork.py
@@ -0,0 +1,55 @@
+import glob
+import os
+import torch
+from modules import devices
+
+
+class HypernetworkModule(torch.nn.Module):
+ def __init__(self, dim, state_dict):
+ super().__init__()
+
+ self.linear1 = torch.nn.Linear(dim, dim * 2)
+ self.linear2 = torch.nn.Linear(dim * 2, dim)
+
+ self.load_state_dict(state_dict, strict=True)
+ self.to(devices.device)
+
+ def forward(self, x):
+ return x + (self.linear2(self.linear1(x)))
+
+
+class Hypernetwork:
+ filename = None
+ name = None
+
+ def __init__(self, filename):
+ self.filename = filename
+ self.name = os.path.splitext(os.path.basename(filename))[0]
+ self.layers = {}
+
+ state_dict = torch.load(filename, map_location='cpu')
+ for size, sd in state_dict.items():
+ self.layers[size] = (HypernetworkModule(size, sd[0]), HypernetworkModule(size, sd[1]))
+
+
+def load_hypernetworks(path):
+ res = {}
+
+ for filename in glob.iglob(path + '**/*.pt', recursive=True):
+ hn = Hypernetwork(filename)
+ res[hn.name] = hn
+
+ return res
+
+def apply(self, x, context=None, mask=None, original=None):
+
+
+ if CrossAttention.hypernetwork is not None and context.shape[2] in CrossAttention.hypernetwork:
+ if context.shape[1] == 77 and CrossAttention.noise_cond:
+ context = context + (torch.randn_like(context) * 0.1)
+ h_k, h_v = CrossAttention.hypernetwork[context.shape[2]]
+ k = self.to_k(h_k(context))
+ v = self.to_v(h_v(context))
+ else:
+ k = self.to_k(context)
+ v = self.to_v(context)
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index ea4cfdfcd..d9cca4851 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -5,6 +5,8 @@ from torch import einsum
from ldm.util import default
from einops import rearrange
+from modules import shared
+
# see https://github.com/basujindal/stable-diffusion/pull/117 for discussion
def split_cross_attention_forward_v1(self, x, context=None, mask=None):
@@ -42,8 +44,19 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
q_in = self.to_q(x)
context = default(context, x)
- k_in = self.to_k(context) * self.scale
- v_in = self.to_v(context)
+
+ hypernetwork = shared.selected_hypernetwork()
+ hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
+
+ if hypernetwork_layers is not None:
+ k_in = self.to_k(hypernetwork_layers[0](context))
+ v_in = self.to_v(hypernetwork_layers[1](context))
+ else:
+ k_in = self.to_k(context)
+ v_in = self.to_v(context)
+
+ k_in *= self.scale
+
del context, x
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
diff --git a/modules/shared.py b/modules/shared.py
index 25bb6e6c9..879d8424a 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -13,7 +13,7 @@ import modules.memmon
import modules.sd_models
import modules.styles
import modules.devices as devices
-from modules import sd_samplers
+from modules import sd_samplers, hypernetwork
from modules.paths import models_path, script_path, sd_path
sd_model_file = os.path.join(script_path, 'model.ckpt')
@@ -76,6 +76,12 @@ parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
config_filename = cmd_opts.ui_settings_file
+hypernetworks = hypernetwork.load_hypernetworks(os.path.join(models_path, 'hypernetworks'))
+
+
+def selected_hypernetwork():
+ return hypernetworks.get(opts.sd_hypernetwork, None)
+
class State:
interrupted = False
@@ -206,6 +212,7 @@ options_templates.update(options_section(('system', "System"), {
options_templates.update(options_section(('sd', "Stable Diffusion"), {
"sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}),
+ "sd_hypernetwork": OptionInfo("None", "Stable Diffusion finetune hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}),
"img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."),
"save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"),
"img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."),
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 6344e612f..c0c364df8 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -77,6 +77,11 @@ def apply_checkpoint(p, x, xs):
modules.sd_models.reload_model_weights(shared.sd_model, info)
+def apply_hypernetwork(p, x, xs):
+ hn = shared.hypernetworks.get(x, None)
+ opts.data["sd_hypernetwork"] = hn.name if hn is not None else 'None'
+
+
def format_value_add_label(p, opt, x):
if type(x) == float:
x = round(x, 8)
@@ -122,6 +127,7 @@ axis_options = [
AxisOption("Prompt order", str_permutations, apply_order, format_value_join_list),
AxisOption("Sampler", str, apply_sampler, format_value),
AxisOption("Checkpoint name", str, apply_checkpoint, format_value),
+ AxisOption("Hypernetwork", str, apply_hypernetwork, format_value),
AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label),
AxisOption("Sigma min", float, apply_field("s_tmin"), format_value_add_label),
AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label),
@@ -193,6 +199,8 @@ class Script(scripts.Script):
modules.processing.fix_seed(p)
p.batch_size = 1
+ initial_hn = opts.sd_hypernetwork
+
def process_axis(opt, vals):
if opt.label == 'Nothing':
return [0]
@@ -300,4 +308,6 @@ class Script(scripts.Script):
# restore checkpoint in case it was changed by axes
modules.sd_models.reload_model_weights(shared.sd_model)
+ opts.data["sd_hypernetwork"] = initial_hn
+
return processed
From d15b3ec0013c10f02f0fb80e8448bac8872a151f Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 7 Oct 2022 10:40:22 +0300
Subject: [PATCH 131/460] support loading VAE
---
modules/sd_models.py | 8 ++++++++
1 file changed, 8 insertions(+)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 5f9920647..8f794b479 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -134,6 +134,14 @@ def load_model_weights(model, checkpoint_file, sd_model_hash):
devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16
+ vae_file = os.path.splitext(checkpoint_file)[0] + ".vae.pt"
+ if os.path.exists(vae_file):
+ print(f"Loading VAE weights from: {vae_file}")
+ vae_ckpt = torch.load(vae_file, map_location="cpu")
+ vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
+
+ model.first_stage_model.load_state_dict(vae_dict)
+
model.sd_model_hash = sd_model_hash
model.sd_model_checkpint = checkpoint_file
From 97bc0b9504572d2df80598d0b694703bcd626de6 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 7 Oct 2022 13:22:50 +0300
Subject: [PATCH 132/460] do not stop working on failed hypernetwork load
---
modules/hypernetwork.py | 11 +++++++++--
1 file changed, 9 insertions(+), 2 deletions(-)
diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py
index 9ed1eed9b..c5cf4afa4 100644
--- a/modules/hypernetwork.py
+++ b/modules/hypernetwork.py
@@ -1,5 +1,8 @@
import glob
import os
+import sys
+import traceback
+
import torch
from modules import devices
@@ -36,8 +39,12 @@ def load_hypernetworks(path):
res = {}
for filename in glob.iglob(path + '**/*.pt', recursive=True):
- hn = Hypernetwork(filename)
- res[hn.name] = hn
+ try:
+ hn = Hypernetwork(filename)
+ res[hn.name] = hn
+ except Exception:
+ print(f"Error loading hypernetwork {filename}", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
return res
From f7c787eb7c295c27439f4fbdf78c26b8389560be Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 7 Oct 2022 16:39:51 +0300
Subject: [PATCH 133/460] make it possible to use hypernetworks without opt
split attention
---
modules/hypernetwork.py | 42 +++++++++++++++++++++++++++++++++--------
modules/sd_hijack.py | 6 ++++--
2 files changed, 38 insertions(+), 10 deletions(-)
diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py
index c5cf4afa4..c7b866829 100644
--- a/modules/hypernetwork.py
+++ b/modules/hypernetwork.py
@@ -4,7 +4,12 @@ import sys
import traceback
import torch
-from modules import devices
+
+from ldm.util import default
+from modules import devices, shared
+import torch
+from torch import einsum
+from einops import rearrange, repeat
class HypernetworkModule(torch.nn.Module):
@@ -48,15 +53,36 @@ def load_hypernetworks(path):
return res
-def apply(self, x, context=None, mask=None, original=None):
+def attention_CrossAttention_forward(self, x, context=None, mask=None):
+ h = self.heads
- if CrossAttention.hypernetwork is not None and context.shape[2] in CrossAttention.hypernetwork:
- if context.shape[1] == 77 and CrossAttention.noise_cond:
- context = context + (torch.randn_like(context) * 0.1)
- h_k, h_v = CrossAttention.hypernetwork[context.shape[2]]
- k = self.to_k(h_k(context))
- v = self.to_v(h_v(context))
+ q = self.to_q(x)
+ context = default(context, x)
+
+ hypernetwork = shared.selected_hypernetwork()
+ hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
+
+ if hypernetwork_layers is not None:
+ k = self.to_k(hypernetwork_layers[0](context))
+ v = self.to_v(hypernetwork_layers[1](context))
else:
k = self.to_k(context)
v = self.to_v(context)
+
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
+
+ sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
+
+ if mask is not None:
+ mask = rearrange(mask, 'b ... -> b (...)')
+ max_neg_value = -torch.finfo(sim.dtype).max
+ mask = repeat(mask, 'b j -> (b h) () j', h=h)
+ sim.masked_fill_(~mask, max_neg_value)
+
+ # attention, what we cannot get enough of
+ attn = sim.softmax(dim=-1)
+
+ out = einsum('b i j, b j d -> b i d', attn, v)
+ out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
+ return self.to_out(out)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index a6fa890c4..d68f89cc2 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -8,7 +8,7 @@ from torch import einsum
from torch.nn.functional import silu
import modules.textual_inversion.textual_inversion
-from modules import prompt_parser, devices, sd_hijack_optimizations, shared
+from modules import prompt_parser, devices, sd_hijack_optimizations, shared, hypernetwork
from modules.shared import opts, device, cmd_opts
import ldm.modules.attention
@@ -20,6 +20,8 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At
def apply_optimizations():
+ undo_optimizations()
+
ldm.modules.diffusionmodules.model.nonlinearity = silu
if cmd_opts.opt_split_attention_v1:
@@ -30,7 +32,7 @@ def apply_optimizations():
def undo_optimizations():
- ldm.modules.attention.CrossAttention.forward = attention_CrossAttention_forward
+ ldm.modules.attention.CrossAttention.forward = hypernetwork.attention_CrossAttention_forward
ldm.modules.diffusionmodules.model.nonlinearity = diffusionmodules_model_nonlinearity
ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward
From 54fa613c8391e3973cca9d94cdf539061932508b Mon Sep 17 00:00:00 2001
From: Greendayle
Date: Fri, 7 Oct 2022 20:37:43 +0200
Subject: [PATCH 134/460] loading tf only in interrogation process
---
modules/deepbooru.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/modules/deepbooru.py b/modules/deepbooru.py
index fb5018a6c..79dc59bdf 100644
--- a/modules/deepbooru.py
+++ b/modules/deepbooru.py
@@ -1,12 +1,13 @@
import os.path
from concurrent.futures import ProcessPoolExecutor
-import numpy as np
-import deepdanbooru as dd
-import tensorflow as tf
def _load_tf_and_return_tags(pil_image, threshold):
+ import deepdanbooru as dd
+ import tensorflow as tf
+ import numpy as np
+
this_folder = os.path.dirname(__file__)
model_path = os.path.join(this_folder, '..', 'models', 'deepbooru', 'deepdanbooru-v3-20211112-sgd-e28')
From fa2ea648db81f5723bb5d722f2fe0ebd7dfc319a Mon Sep 17 00:00:00 2001
From: Greendayle
Date: Fri, 7 Oct 2022 20:46:38 +0200
Subject: [PATCH 135/460] even more powerfull fix
---
modules/deepbooru.py | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/modules/deepbooru.py b/modules/deepbooru.py
index 79dc59bdf..600943368 100644
--- a/modules/deepbooru.py
+++ b/modules/deepbooru.py
@@ -60,8 +60,13 @@ def _load_tf_and_return_tags(pil_image, threshold):
return ', '.join(result_tags_out).replace('_', ' ').replace(':', ' ')
+def subprocess_init_no_cuda():
+ import os
+ os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
+
+
def get_deepbooru_tags(pil_image, threshold=0.5):
- with ProcessPoolExecutor() as executor:
- f = executor.submit(_load_tf_and_return_tags, pil_image, threshold)
+ with ProcessPoolExecutor(initializer=subprocess_init_no_cuda) as executor:
+ f = executor.submit(_load_tf_and_return_tags, pil_image, threshold, )
ret = f.result() # will rethrow any exceptions
return ret
\ No newline at end of file
From 5f12e7efd92ad802742f96788b4be3249ad02829 Mon Sep 17 00:00:00 2001
From: Greendayle
Date: Fri, 7 Oct 2022 20:58:30 +0200
Subject: [PATCH 136/460] linux test
---
modules/deepbooru.py | 5 +++--
1 file changed, 3 insertions(+), 2 deletions(-)
diff --git a/modules/deepbooru.py b/modules/deepbooru.py
index 600943368..781b22492 100644
--- a/modules/deepbooru.py
+++ b/modules/deepbooru.py
@@ -1,6 +1,6 @@
import os.path
from concurrent.futures import ProcessPoolExecutor
-
+from multiprocessing import get_context
def _load_tf_and_return_tags(pil_image, threshold):
@@ -66,7 +66,8 @@ def subprocess_init_no_cuda():
def get_deepbooru_tags(pil_image, threshold=0.5):
- with ProcessPoolExecutor(initializer=subprocess_init_no_cuda) as executor:
+ context = get_context('spawn')
+ with ProcessPoolExecutor(initializer=subprocess_init_no_cuda, mp_context=context) as executor:
f = executor.submit(_load_tf_and_return_tags, pil_image, threshold, )
ret = f.result() # will rethrow any exceptions
return ret
\ No newline at end of file
From 12c4d5c6b5bf9dd50d0601c36af4f99b65316d58 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Fri, 7 Oct 2022 23:22:22 +0300
Subject: [PATCH 137/460] hypernetwork training mk1
---
modules/hypernetwork.py | 88 ------
modules/hypernetwork/hypernetwork.py | 267 +++++++++++++++++++
modules/hypernetwork/ui.py | 43 +++
modules/sd_hijack.py | 4 +-
modules/sd_hijack_optimizations.py | 3 +-
modules/shared.py | 13 +-
modules/textual_inversion/ui.py | 1 -
modules/ui.py | 58 +++-
scripts/xy_grid.py | 7 +-
textual_inversion_templates/hypernetwork.txt | 27 ++
textual_inversion_templates/none.txt | 1 +
webui.py | 9 +
12 files changed, 414 insertions(+), 107 deletions(-)
delete mode 100644 modules/hypernetwork.py
create mode 100644 modules/hypernetwork/hypernetwork.py
create mode 100644 modules/hypernetwork/ui.py
create mode 100644 textual_inversion_templates/hypernetwork.txt
create mode 100644 textual_inversion_templates/none.txt
diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py
deleted file mode 100644
index c7b866829..000000000
--- a/modules/hypernetwork.py
+++ /dev/null
@@ -1,88 +0,0 @@
-import glob
-import os
-import sys
-import traceback
-
-import torch
-
-from ldm.util import default
-from modules import devices, shared
-import torch
-from torch import einsum
-from einops import rearrange, repeat
-
-
-class HypernetworkModule(torch.nn.Module):
- def __init__(self, dim, state_dict):
- super().__init__()
-
- self.linear1 = torch.nn.Linear(dim, dim * 2)
- self.linear2 = torch.nn.Linear(dim * 2, dim)
-
- self.load_state_dict(state_dict, strict=True)
- self.to(devices.device)
-
- def forward(self, x):
- return x + (self.linear2(self.linear1(x)))
-
-
-class Hypernetwork:
- filename = None
- name = None
-
- def __init__(self, filename):
- self.filename = filename
- self.name = os.path.splitext(os.path.basename(filename))[0]
- self.layers = {}
-
- state_dict = torch.load(filename, map_location='cpu')
- for size, sd in state_dict.items():
- self.layers[size] = (HypernetworkModule(size, sd[0]), HypernetworkModule(size, sd[1]))
-
-
-def load_hypernetworks(path):
- res = {}
-
- for filename in glob.iglob(path + '**/*.pt', recursive=True):
- try:
- hn = Hypernetwork(filename)
- res[hn.name] = hn
- except Exception:
- print(f"Error loading hypernetwork {filename}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
-
- return res
-
-
-def attention_CrossAttention_forward(self, x, context=None, mask=None):
- h = self.heads
-
- q = self.to_q(x)
- context = default(context, x)
-
- hypernetwork = shared.selected_hypernetwork()
- hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
-
- if hypernetwork_layers is not None:
- k = self.to_k(hypernetwork_layers[0](context))
- v = self.to_v(hypernetwork_layers[1](context))
- else:
- k = self.to_k(context)
- v = self.to_v(context)
-
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
-
- sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
-
- if mask is not None:
- mask = rearrange(mask, 'b ... -> b (...)')
- max_neg_value = -torch.finfo(sim.dtype).max
- mask = repeat(mask, 'b j -> (b h) () j', h=h)
- sim.masked_fill_(~mask, max_neg_value)
-
- # attention, what we cannot get enough of
- attn = sim.softmax(dim=-1)
-
- out = einsum('b i j, b j d -> b i d', attn, v)
- out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
- return self.to_out(out)
diff --git a/modules/hypernetwork/hypernetwork.py b/modules/hypernetwork/hypernetwork.py
new file mode 100644
index 000000000..a3d6a47ef
--- /dev/null
+++ b/modules/hypernetwork/hypernetwork.py
@@ -0,0 +1,267 @@
+import datetime
+import glob
+import html
+import os
+import sys
+import traceback
+import tqdm
+
+import torch
+
+from ldm.util import default
+from modules import devices, shared, processing, sd_models
+import torch
+from torch import einsum
+from einops import rearrange, repeat
+import modules.textual_inversion.dataset
+
+
+class HypernetworkModule(torch.nn.Module):
+ def __init__(self, dim, state_dict=None):
+ super().__init__()
+
+ self.linear1 = torch.nn.Linear(dim, dim * 2)
+ self.linear2 = torch.nn.Linear(dim * 2, dim)
+
+ if state_dict is not None:
+ self.load_state_dict(state_dict, strict=True)
+ else:
+ self.linear1.weight.data.fill_(0.0001)
+ self.linear1.bias.data.fill_(0.0001)
+ self.linear2.weight.data.fill_(0.0001)
+ self.linear2.bias.data.fill_(0.0001)
+
+ self.to(devices.device)
+
+ def forward(self, x):
+ return x + (self.linear2(self.linear1(x)))
+
+
+class Hypernetwork:
+ filename = None
+ name = None
+
+ def __init__(self, name=None):
+ self.filename = None
+ self.name = name
+ self.layers = {}
+ self.step = 0
+ self.sd_checkpoint = None
+ self.sd_checkpoint_name = None
+
+ for size in [320, 640, 768, 1280]:
+ self.layers[size] = (HypernetworkModule(size), HypernetworkModule(size))
+
+ def weights(self):
+ res = []
+
+ for k, layers in self.layers.items():
+ for layer in layers:
+ layer.train()
+ res += [layer.linear1.weight, layer.linear1.bias, layer.linear2.weight, layer.linear2.bias]
+
+ return res
+
+ def save(self, filename):
+ state_dict = {}
+
+ for k, v in self.layers.items():
+ state_dict[k] = (v[0].state_dict(), v[1].state_dict())
+
+ state_dict['step'] = self.step
+ state_dict['name'] = self.name
+ state_dict['sd_checkpoint'] = self.sd_checkpoint
+ state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name
+
+ torch.save(state_dict, filename)
+
+ def load(self, filename):
+ self.filename = filename
+ if self.name is None:
+ self.name = os.path.splitext(os.path.basename(filename))[0]
+
+ state_dict = torch.load(filename, map_location='cpu')
+
+ for size, sd in state_dict.items():
+ if type(size) == int:
+ self.layers[size] = (HypernetworkModule(size, sd[0]), HypernetworkModule(size, sd[1]))
+
+ self.name = state_dict.get('name', self.name)
+ self.step = state_dict.get('step', 0)
+ self.sd_checkpoint = state_dict.get('sd_checkpoint', None)
+ self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None)
+
+
+def load_hypernetworks(path):
+ res = {}
+
+ for filename in glob.iglob(path + '**/*.pt', recursive=True):
+ try:
+ hn = Hypernetwork()
+ hn.load(filename)
+ res[hn.name] = hn
+ except Exception:
+ print(f"Error loading hypernetwork {filename}", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+
+ return res
+
+
+def attention_CrossAttention_forward(self, x, context=None, mask=None):
+ h = self.heads
+
+ q = self.to_q(x)
+ context = default(context, x)
+
+ hypernetwork_layers = (shared.hypernetwork.layers if shared.hypernetwork is not None else {}).get(context.shape[2], None)
+
+ if hypernetwork_layers is not None:
+ hypernetwork_k, hypernetwork_v = hypernetwork_layers
+
+ self.hypernetwork_k = hypernetwork_k
+ self.hypernetwork_v = hypernetwork_v
+
+ context_k = hypernetwork_k(context)
+ context_v = hypernetwork_v(context)
+ else:
+ context_k = context
+ context_v = context
+
+ k = self.to_k(context_k)
+ v = self.to_v(context_v)
+
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
+
+ sim = einsum('b i d, b j d -> b i j', q, k) * self.scale
+
+ if mask is not None:
+ mask = rearrange(mask, 'b ... -> b (...)')
+ max_neg_value = -torch.finfo(sim.dtype).max
+ mask = repeat(mask, 'b j -> (b h) () j', h=h)
+ sim.masked_fill_(~mask, max_neg_value)
+
+ # attention, what we cannot get enough of
+ attn = sim.softmax(dim=-1)
+
+ out = einsum('b i j, b j d -> b i d', attn, v)
+ out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
+ return self.to_out(out)
+
+
+def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, steps, create_image_every, save_hypernetwork_every, template_file, preview_image_prompt):
+ assert hypernetwork_name, 'embedding not selected'
+
+ shared.hypernetwork = shared.hypernetworks[hypernetwork_name]
+
+ shared.state.textinfo = "Initializing hypernetwork training..."
+ shared.state.job_count = steps
+
+ filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt')
+
+ log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name)
+
+ if save_hypernetwork_every > 0:
+ hypernetwork_dir = os.path.join(log_directory, "hypernetworks")
+ os.makedirs(hypernetwork_dir, exist_ok=True)
+ else:
+ hypernetwork_dir = None
+
+ if create_image_every > 0:
+ images_dir = os.path.join(log_directory, "images")
+ os.makedirs(images_dir, exist_ok=True)
+ else:
+ images_dir = None
+
+ cond_model = shared.sd_model.cond_stage_model
+
+ shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..."
+ with torch.autocast("cuda"):
+ ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, size=512, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file)
+
+ hypernetwork = shared.hypernetworks[hypernetwork_name]
+ weights = hypernetwork.weights()
+ for weight in weights:
+ weight.requires_grad = True
+
+ optimizer = torch.optim.AdamW(weights, lr=learn_rate)
+
+ losses = torch.zeros((32,))
+
+ last_saved_file = ""
+ last_saved_image = ""
+
+ ititial_step = hypernetwork.step or 0
+ if ititial_step > steps:
+ return hypernetwork, filename
+
+ pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step)
+ for i, (x, text) in pbar:
+ hypernetwork.step = i + ititial_step
+
+ if hypernetwork.step > steps:
+ break
+
+ if shared.state.interrupted:
+ break
+
+ with torch.autocast("cuda"):
+ c = cond_model([text])
+
+ x = x.to(devices.device)
+ loss = shared.sd_model(x.unsqueeze(0), c)[0]
+ del x
+
+ losses[hypernetwork.step % losses.shape[0]] = loss.item()
+
+ optimizer.zero_grad()
+ loss.backward()
+ optimizer.step()
+
+ pbar.set_description(f"loss: {losses.mean():.7f}")
+
+ if hypernetwork.step > 0 and hypernetwork_dir is not None and hypernetwork.step % save_hypernetwork_every == 0:
+ last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name}-{hypernetwork.step}.pt')
+ hypernetwork.save(last_saved_file)
+
+ if hypernetwork.step > 0 and images_dir is not None and hypernetwork.step % create_image_every == 0:
+ last_saved_image = os.path.join(images_dir, f'{hypernetwork_name}-{hypernetwork.step}.png')
+
+ preview_text = text if preview_image_prompt == "" else preview_image_prompt
+
+ p = processing.StableDiffusionProcessingTxt2Img(
+ sd_model=shared.sd_model,
+ prompt=preview_text,
+ steps=20,
+ do_not_save_grid=True,
+ do_not_save_samples=True,
+ )
+
+ processed = processing.process_images(p)
+ image = processed.images[0]
+
+ shared.state.current_image = image
+ image.save(last_saved_image)
+
+ last_saved_image += f", prompt: {preview_text}"
+
+ shared.state.job_no = hypernetwork.step
+
+ shared.state.textinfo = f"""
+
+Loss: {losses.mean():.7f}
+Step: {hypernetwork.step}
+Last prompt: {html.escape(text)}
+Last saved embedding: {html.escape(last_saved_file)}
+Last saved image: {html.escape(last_saved_image)}
+
+"""
+
+ checkpoint = sd_models.select_checkpoint()
+
+ hypernetwork.sd_checkpoint = checkpoint.hash
+ hypernetwork.sd_checkpoint_name = checkpoint.model_name
+ hypernetwork.save(filename)
+
+ return hypernetwork, filename
+
+
diff --git a/modules/hypernetwork/ui.py b/modules/hypernetwork/ui.py
new file mode 100644
index 000000000..525f978c5
--- /dev/null
+++ b/modules/hypernetwork/ui.py
@@ -0,0 +1,43 @@
+import html
+import os
+
+import gradio as gr
+
+import modules.textual_inversion.textual_inversion
+import modules.textual_inversion.preprocess
+from modules import sd_hijack, shared
+
+
+def create_hypernetwork(name):
+ fn = os.path.join(shared.cmd_opts.hypernetwork_dir, f"{name}.pt")
+ assert not os.path.exists(fn), f"file {fn} already exists"
+
+ hypernetwork = modules.hypernetwork.hypernetwork.Hypernetwork(name=name)
+ hypernetwork.save(fn)
+
+ shared.reload_hypernetworks()
+ shared.hypernetwork = shared.hypernetworks.get(shared.opts.sd_hypernetwork, None)
+
+ return gr.Dropdown.update(choices=sorted([x for x in shared.hypernetworks.keys()])), f"Created: {fn}", ""
+
+
+def train_hypernetwork(*args):
+
+ initial_hypernetwork = shared.hypernetwork
+
+ try:
+ sd_hijack.undo_optimizations()
+
+ hypernetwork, filename = modules.hypernetwork.hypernetwork.train_hypernetwork(*args)
+
+ res = f"""
+Training {'interrupted' if shared.state.interrupted else 'finished'} at {hypernetwork.step} steps.
+Hypernetwork saved to {html.escape(filename)}
+"""
+ return res, ""
+ except Exception:
+ raise
+ finally:
+ shared.hypernetwork = initial_hypernetwork
+ sd_hijack.apply_optimizations()
+
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index d68f89cc2..ec8c9d4b2 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -8,7 +8,7 @@ from torch import einsum
from torch.nn.functional import silu
import modules.textual_inversion.textual_inversion
-from modules import prompt_parser, devices, sd_hijack_optimizations, shared, hypernetwork
+from modules import prompt_parser, devices, sd_hijack_optimizations, shared
from modules.shared import opts, device, cmd_opts
import ldm.modules.attention
@@ -32,6 +32,8 @@ def apply_optimizations():
def undo_optimizations():
+ from modules.hypernetwork import hypernetwork
+
ldm.modules.attention.CrossAttention.forward = hypernetwork.attention_CrossAttention_forward
ldm.modules.diffusionmodules.model.nonlinearity = diffusionmodules_model_nonlinearity
ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index d9cca4851..3f32e0209 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -45,8 +45,7 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
q_in = self.to_q(x)
context = default(context, x)
- hypernetwork = shared.selected_hypernetwork()
- hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
+ hypernetwork_layers = (shared.hypernetwork.layers if shared.hypernetwork is not None else {}).get(context.shape[2], None)
if hypernetwork_layers is not None:
k_in = self.to_k(hypernetwork_layers[0](context))
diff --git a/modules/shared.py b/modules/shared.py
index 879d8424a..c5a893e8d 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -13,7 +13,7 @@ import modules.memmon
import modules.sd_models
import modules.styles
import modules.devices as devices
-from modules import sd_samplers, hypernetwork
+from modules import sd_samplers
from modules.paths import models_path, script_path, sd_path
sd_model_file = os.path.join(script_path, 'model.ckpt')
@@ -28,6 +28,7 @@ parser.add_argument("--no-half", action='store_true', help="do not switch the mo
parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)")
parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI")
parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)")
+parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory")
parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui")
parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage")
parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage")
@@ -76,11 +77,15 @@ parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
config_filename = cmd_opts.ui_settings_file
-hypernetworks = hypernetwork.load_hypernetworks(os.path.join(models_path, 'hypernetworks'))
+
+def reload_hypernetworks():
+ from modules.hypernetwork import hypernetwork
+ hypernetworks.clear()
+ hypernetworks.update(hypernetwork.load_hypernetworks(cmd_opts.hypernetwork_dir))
-def selected_hypernetwork():
- return hypernetworks.get(opts.sd_hypernetwork, None)
+hypernetworks = {}
+hypernetwork = None
class State:
diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py
index f19ac5e02..c57de1f94 100644
--- a/modules/textual_inversion/ui.py
+++ b/modules/textual_inversion/ui.py
@@ -22,7 +22,6 @@ def preprocess(*args):
def train_embedding(*args):
-
try:
sd_hijack.undo_optimizations()
diff --git a/modules/ui.py b/modules/ui.py
index 4f18126fb..051908c1c 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -37,6 +37,7 @@ import modules.generation_parameters_copypaste
from modules import prompt_parser
from modules.images import save_image
import modules.textual_inversion.ui
+import modules.hypernetwork.ui
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
mimetypes.init()
@@ -965,6 +966,18 @@ def create_ui(wrap_gradio_gpu_call):
with gr.Column():
create_embedding = gr.Button(value="Create", variant='primary')
+ with gr.Group():
+ gr.HTML(value="Create a new hypernetwork
")
+
+ new_hypernetwork_name = gr.Textbox(label="Name")
+
+ with gr.Row():
+ with gr.Column(scale=3):
+ gr.HTML(value="")
+
+ with gr.Column():
+ create_hypernetwork = gr.Button(value="Create", variant='primary')
+
with gr.Group():
gr.HTML(value="Preprocess images
")
@@ -986,6 +999,7 @@ def create_ui(wrap_gradio_gpu_call):
with gr.Group():
gr.HTML(value="Train an embedding; must specify a directory with a set of 512x512 images
")
train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys()))
+ train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', choices=[x for x in shared.hypernetworks.keys()])
learn_rate = gr.Number(label='Learning rate', value=5.0e-03)
dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images")
log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion")
@@ -993,15 +1007,12 @@ def create_ui(wrap_gradio_gpu_call):
steps = gr.Number(label='Max steps', value=100000, precision=0)
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0)
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0)
+ preview_image_prompt = gr.Textbox(label='Preview prompt', value="")
with gr.Row():
- with gr.Column(scale=2):
- gr.HTML(value="")
-
- with gr.Column():
- with gr.Row():
- interrupt_training = gr.Button(value="Interrupt")
- train_embedding = gr.Button(value="Train", variant='primary')
+ interrupt_training = gr.Button(value="Interrupt")
+ train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary')
+ train_embedding = gr.Button(value="Train Embedding", variant='primary')
with gr.Column():
progressbar = gr.HTML(elem_id="ti_progressbar")
@@ -1027,6 +1038,18 @@ def create_ui(wrap_gradio_gpu_call):
]
)
+ create_hypernetwork.click(
+ fn=modules.hypernetwork.ui.create_hypernetwork,
+ inputs=[
+ new_hypernetwork_name,
+ ],
+ outputs=[
+ train_hypernetwork_name,
+ ti_output,
+ ti_outcome,
+ ]
+ )
+
run_preprocess.click(
fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]),
_js="start_training_textual_inversion",
@@ -1062,12 +1085,33 @@ def create_ui(wrap_gradio_gpu_call):
]
)
+ train_hypernetwork.click(
+ fn=wrap_gradio_gpu_call(modules.hypernetwork.ui.train_hypernetwork, extra_outputs=[gr.update()]),
+ _js="start_training_textual_inversion",
+ inputs=[
+ train_hypernetwork_name,
+ learn_rate,
+ dataset_directory,
+ log_directory,
+ steps,
+ create_image_every,
+ save_embedding_every,
+ template_file,
+ preview_image_prompt,
+ ],
+ outputs=[
+ ti_output,
+ ti_outcome,
+ ]
+ )
+
interrupt_training.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
outputs=[],
)
+
def create_setting_component(key):
def fun():
return opts.data[key] if key in opts.data else opts.data_labels[key].default
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index c0c364df8..5b504de6b 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -78,8 +78,7 @@ def apply_checkpoint(p, x, xs):
def apply_hypernetwork(p, x, xs):
- hn = shared.hypernetworks.get(x, None)
- opts.data["sd_hypernetwork"] = hn.name if hn is not None else 'None'
+ shared.hypernetwork = shared.hypernetworks.get(x, None)
def format_value_add_label(p, opt, x):
@@ -199,7 +198,7 @@ class Script(scripts.Script):
modules.processing.fix_seed(p)
p.batch_size = 1
- initial_hn = opts.sd_hypernetwork
+ initial_hn = shared.hypernetwork
def process_axis(opt, vals):
if opt.label == 'Nothing':
@@ -308,6 +307,6 @@ class Script(scripts.Script):
# restore checkpoint in case it was changed by axes
modules.sd_models.reload_model_weights(shared.sd_model)
- opts.data["sd_hypernetwork"] = initial_hn
+ shared.hypernetwork = initial_hn
return processed
diff --git a/textual_inversion_templates/hypernetwork.txt b/textual_inversion_templates/hypernetwork.txt
new file mode 100644
index 000000000..91e068905
--- /dev/null
+++ b/textual_inversion_templates/hypernetwork.txt
@@ -0,0 +1,27 @@
+a photo of a [filewords]
+a rendering of a [filewords]
+a cropped photo of the [filewords]
+the photo of a [filewords]
+a photo of a clean [filewords]
+a photo of a dirty [filewords]
+a dark photo of the [filewords]
+a photo of my [filewords]
+a photo of the cool [filewords]
+a close-up photo of a [filewords]
+a bright photo of the [filewords]
+a cropped photo of a [filewords]
+a photo of the [filewords]
+a good photo of the [filewords]
+a photo of one [filewords]
+a close-up photo of the [filewords]
+a rendition of the [filewords]
+a photo of the clean [filewords]
+a rendition of a [filewords]
+a photo of a nice [filewords]
+a good photo of a [filewords]
+a photo of the nice [filewords]
+a photo of the small [filewords]
+a photo of the weird [filewords]
+a photo of the large [filewords]
+a photo of a cool [filewords]
+a photo of a small [filewords]
diff --git a/textual_inversion_templates/none.txt b/textual_inversion_templates/none.txt
new file mode 100644
index 000000000..f77af4612
--- /dev/null
+++ b/textual_inversion_templates/none.txt
@@ -0,0 +1 @@
+picture
diff --git a/webui.py b/webui.py
index 480360fe0..60f9061f9 100644
--- a/webui.py
+++ b/webui.py
@@ -74,6 +74,15 @@ def wrap_gradio_gpu_call(func, extra_outputs=None):
return modules.ui.wrap_gradio_call(f, extra_outputs=extra_outputs)
+def set_hypernetwork():
+ shared.hypernetwork = shared.hypernetworks.get(shared.opts.sd_hypernetwork, None)
+
+
+shared.reload_hypernetworks()
+shared.opts.onchange("sd_hypernetwork", set_hypernetwork)
+set_hypernetwork()
+
+
modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
shared.sd_model = modules.sd_models.load_model()
From c9cc65b201679ea43c763b0d85e749d40bbc5433 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 04:09:18 +0300
Subject: [PATCH 138/460] switch to the proper way of calling xformers
---
modules/sd_hijack_optimizations.py | 28 +++-------------------------
1 file changed, 3 insertions(+), 25 deletions(-)
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index da1b76e1c..7fb4a45e3 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -94,39 +94,17 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
return self.to_out(r2)
-def _maybe_init(self, x):
- """
- Initialize the attention operator, if required We expect the head dimension to be exposed here, meaning that x
- : B, Head, Length
- """
- if self.attention_op is not None:
- return
- _, M, K = x.shape
- try:
- self.attention_op = xformers.ops.AttentionOpDispatch(
- dtype=x.dtype,
- device=x.device,
- k=K,
- attn_bias_type=type(None),
- has_dropout=False,
- kv_len=M,
- q_len=M,
- ).op
- except NotImplementedError as err:
- raise NotImplementedError(f"Please install xformers with the flash attention / cutlass components.\n{err}")
-
def xformers_attention_forward(self, x, context=None, mask=None):
h = self.heads
q_in = self.to_q(x)
context = default(context, x)
k_in = self.to_k(context)
v_in = self.to_v(context)
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))
del q_in, k_in, v_in
- self._maybe_init(q)
- out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op)
+ out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None)
- out = rearrange(out, '(b h) n d -> b n (h d)', h=h)
+ out = rearrange(out, 'b n h d -> b n (h d)', h=h)
return self.to_out(out)
def cross_attention_attnblock_forward(self, x):
From b70eaeb2005a5a9593119e7fd32b8072c2a208d5 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 04:10:35 +0300
Subject: [PATCH 139/460] delete broken and unnecessary aliases
---
modules/sd_hijack.py | 10 ++++------
1 file changed, 4 insertions(+), 6 deletions(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index cbdb9d3c7..0e99c3192 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -21,16 +21,14 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At
def apply_optimizations():
ldm.modules.diffusionmodules.model.nonlinearity = silu
- if cmd_opts.opt_split_attention_v1:
+ if not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip):
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
+ ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
+ elif cmd_opts.opt_split_attention_v1:
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
elif cmd_opts.opt_split_attention:
ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
- elif not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip):
- ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
- ldm.modules.attention.CrossAttention._maybe_init = sd_hijack_optimizations._maybe_init
- ldm.modules.attention.CrossAttention.attention_op = None
- ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
def undo_optimizations():
From a958f9b3fdea95c01d360aba1b6fe0ce3ea6b349 Mon Sep 17 00:00:00 2001
From: Jairo Correa
Date: Fri, 7 Oct 2022 20:05:47 -0300
Subject: [PATCH 140/460] edit-attention browser compatibility and readme typo
---
README.md | 2 +-
javascript/edit-attention.js | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/README.md b/README.md
index a14a63306..0516c2cd8 100644
--- a/README.md
+++ b/README.md
@@ -16,7 +16,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- Attention, specify parts of text that the model should pay more attention to
- a man in a ((tuxedo)) - will pay more attention to tuxedo
- a man in a (tuxedo:1.21) - alternative syntax
- - select text and press ctrl+up or ctrl+down to aduotmatically adjust attention to selected text
+ - select text and press ctrl+up or ctrl+down to automatically adjust attention to selected text
- Loopback, run img2img processing multiple times
- X/Y plot, a way to draw a 2 dimensional plot of images with different parameters
- Textual Inversion
diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js
index c67ed5794..0280c603f 100644
--- a/javascript/edit-attention.js
+++ b/javascript/edit-attention.js
@@ -1,5 +1,5 @@
addEventListener('keydown', (event) => {
- let target = event.originalTarget;
+ let target = event.originalTarget || event.composedPath()[0];
if (!target.hasAttribute("placeholder")) return;
if (!target.placeholder.toLowerCase().includes("prompt")) return;
From f2055cb1d4ce45d7aaacc49d8ab5bec7791a8f47 Mon Sep 17 00:00:00 2001
From: brkirch
Date: Sat, 8 Oct 2022 01:47:02 -0400
Subject: [PATCH 141/460] Add hypernetwork support to split cross attention v1
* Add hypernetwork support to split_cross_attention_forward_v1
* Fix device check in esrgan_model.py to use devices.device_esrgan instead of shared.device
---
modules/esrgan_model.py | 2 +-
modules/sd_hijack_optimizations.py | 18 ++++++++++++++----
2 files changed, 15 insertions(+), 5 deletions(-)
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
index d17e730f9..285481242 100644
--- a/modules/esrgan_model.py
+++ b/modules/esrgan_model.py
@@ -111,7 +111,7 @@ class UpscalerESRGAN(Upscaler):
print("Unable to load %s from %s" % (self.model_path, filename))
return None
- pretrained_net = torch.load(filename, map_location='cpu' if shared.device.type == 'mps' else None)
+ pretrained_net = torch.load(filename, map_location='cpu' if devices.device_esrgan.type == 'mps' else None)
crt_model = arch.RRDBNet(3, 3, 64, 23, gc=32)
pretrained_net = fix_model_layers(crt_model, pretrained_net)
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index d9cca4851..3351c7409 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -12,13 +12,22 @@ from modules import shared
def split_cross_attention_forward_v1(self, x, context=None, mask=None):
h = self.heads
- q = self.to_q(x)
+ q_in = self.to_q(x)
context = default(context, x)
- k = self.to_k(context)
- v = self.to_v(context)
+
+ hypernetwork = shared.selected_hypernetwork()
+ hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
+
+ if hypernetwork_layers is not None:
+ k_in = self.to_k(hypernetwork_layers[0](context))
+ v_in = self.to_v(hypernetwork_layers[1](context))
+ else:
+ k_in = self.to_k(context)
+ v_in = self.to_v(context)
del context, x
- q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v))
+ q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in))
+ del q_in, k_in, v_in
r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device)
for i in range(0, q.shape[0], 2):
@@ -31,6 +40,7 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None):
r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end])
del s2
+ del q, k, v
r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h)
del r1
From e21e4732531299ef4895baccdb7a6493a3886924 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 8 Oct 2022 05:34:17 +0100
Subject: [PATCH 142/460] Context Menus
---
javascript/contextMenus.js | 165 +++++++++++++++++++++++++++++++++++++
1 file changed, 165 insertions(+)
create mode 100644 javascript/contextMenus.js
diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js
new file mode 100644
index 000000000..99d1d3f7d
--- /dev/null
+++ b/javascript/contextMenus.js
@@ -0,0 +1,165 @@
+
+contextMenuInit = function(){
+ let eventListenerApplied=false;
+ let menuSpecs = new Map();
+
+ const uid = function(){
+ return Date.now().toString(36) + Math.random().toString(36).substr(2);
+ }
+
+ function showContextMenu(event,element,menuEntries){
+ let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft;
+ let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop;
+
+ let oldMenu = gradioApp().querySelector('#context-menu')
+ if(oldMenu){
+ oldMenu.remove()
+ }
+
+ let tabButton = gradioApp().querySelector('button')
+ let baseStyle = window.getComputedStyle(tabButton)
+
+ const contextMenu = document.createElement('nav')
+ contextMenu.id = "context-menu"
+ contextMenu.style.background = baseStyle.background
+ contextMenu.style.color = baseStyle.color
+ contextMenu.style.fontFamily = baseStyle.fontFamily
+ contextMenu.style.top = posy+'px'
+ contextMenu.style.left = posx+'px'
+
+
+
+ const contextMenuList = document.createElement('ul')
+ contextMenuList.className = 'context-menu-items';
+ contextMenu.append(contextMenuList);
+
+ menuEntries.forEach(function(entry){
+ let contextMenuEntry = document.createElement('a')
+ contextMenuEntry.innerHTML = entry['name']
+ contextMenuEntry.addEventListener("click", function(e) {
+ entry['func']();
+ })
+ contextMenuList.append(contextMenuEntry);
+
+ })
+
+ gradioApp().getRootNode().appendChild(contextMenu)
+
+ let menuWidth = contextMenu.offsetWidth + 4;
+ let menuHeight = contextMenu.offsetHeight + 4;
+
+ let windowWidth = window.innerWidth;
+ let windowHeight = window.innerHeight;
+
+ if ( (windowWidth - posx) < menuWidth ) {
+ contextMenu.style.left = windowWidth - menuWidth + "px";
+ }
+
+ if ( (windowHeight - posy) < menuHeight ) {
+ contextMenu.style.top = windowHeight - menuHeight + "px";
+ }
+
+ }
+
+ function appendContextMenuOption(targetEmementSelector,entryName,entryFunction){
+
+ currentItems = menuSpecs.get(targetEmementSelector)
+
+ if(!currentItems){
+ currentItems = []
+ menuSpecs.set(targetEmementSelector,currentItems);
+ }
+ let newItem = {'id':targetEmementSelector+'_'+uid(),
+ 'name':entryName,
+ 'func':entryFunction,
+ 'isNew':true}
+
+ currentItems.push(newItem)
+ return newItem['id']
+ }
+
+ function removeContextMenuOption(uid){
+
+ }
+
+ function addContextMenuEventListener(){
+ if(eventListenerApplied){
+ return;
+ }
+ gradioApp().addEventListener("click", function(e) {
+ let source = e.composedPath()[0]
+ if(source.id && source.indexOf('check_progress')>-1){
+ return
+ }
+
+ let oldMenu = gradioApp().querySelector('#context-menu')
+ if(oldMenu){
+ oldMenu.remove()
+ }
+ });
+ gradioApp().addEventListener("contextmenu", function(e) {
+ let oldMenu = gradioApp().querySelector('#context-menu')
+ if(oldMenu){
+ oldMenu.remove()
+ }
+ menuSpecs.forEach(function(v,k) {
+ if(e.composedPath()[0].matches(k)){
+ showContextMenu(e,e.composedPath()[0],v)
+ e.preventDefault()
+ return
+ }
+ })
+ });
+ eventListenerApplied=true
+
+ }
+
+ return [appendContextMenuOption, removeContextMenuOption, addContextMenuEventListener]
+}
+
+initResponse = contextMenuInit()
+appendContextMenuOption = initResponse[0]
+removeContextMenuOption = initResponse[1]
+addContextMenuEventListener = initResponse[2]
+
+
+//Start example Context Menu Items
+generateOnRepeatId = appendContextMenuOption('#txt2img_generate','Generate forever',function(){
+ let genbutton = gradioApp().querySelector('#txt2img_generate');
+ let interruptbutton = gradioApp().querySelector('#txt2img_interrupt');
+ if(!interruptbutton.offsetParent){
+ genbutton.click();
+ }
+ clearInterval(window.generateOnRepeatInterval)
+ window.generateOnRepeatInterval = setInterval(function(){
+ if(!interruptbutton.offsetParent){
+ genbutton.click();
+ }
+ },
+ 500)}
+)
+
+cancelGenerateForever = function(){
+ clearInterval(window.generateOnRepeatInterval)
+ let interruptbutton = gradioApp().querySelector('#txt2img_interrupt');
+ if(interruptbutton.offsetParent){
+ interruptbutton.click();
+ }
+}
+
+appendContextMenuOption('#txt2img_interrupt','Cancel generate forever',cancelGenerateForever)
+appendContextMenuOption('#txt2img_generate','Cancel generate forever',cancelGenerateForever)
+
+appendContextMenuOption('#roll','Roll three',
+ function(){
+ let rollbutton = gradioApp().querySelector('#roll');
+ setTimeout(function(){rollbutton.click()},100)
+ setTimeout(function(){rollbutton.click()},200)
+ setTimeout(function(){rollbutton.click()},300)
+ }
+)
+//End example Context Menu Items
+
+onUiUpdate(function(){
+ addContextMenuEventListener()
+});
\ No newline at end of file
From 83749bfc72923b946abb825ebf4fdcc8b6035c8e Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 8 Oct 2022 05:35:03 +0100
Subject: [PATCH 143/460] context menu styling
---
style.css | 29 ++++++++++++++++++++++++++++-
1 file changed, 28 insertions(+), 1 deletion(-)
diff --git a/style.css b/style.css
index da0729a25..50c5e557c 100644
--- a/style.css
+++ b/style.css
@@ -410,4 +410,31 @@ input[type="range"]{
#img2img_image div.h-60{
height: 480px;
-}
\ No newline at end of file
+}
+
+#context-menu{
+ z-index:9999;
+ position:absolute;
+ display:block;
+ padding:0px 0;
+ border:2px solid #a55000;
+ border-radius:8px;
+ box-shadow:1px 1px 2px #CE6400;
+ width: 200px;
+}
+
+.context-menu-items{
+ list-style: none;
+ margin: 0;
+ padding: 0;
+}
+
+.context-menu-items a{
+ display:block;
+ padding:5px;
+ cursor:pointer;
+}
+
+.context-menu-items a:hover{
+ background: #a55000;
+}
From 21679435e531e729a4aea494e6cb9b7152ecdf75 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 8 Oct 2022 05:46:42 +0100
Subject: [PATCH 144/460] implement removal
---
javascript/contextMenus.js | 13 ++++++++++---
1 file changed, 10 insertions(+), 3 deletions(-)
diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js
index 99d1d3f7d..2d82269fc 100644
--- a/javascript/contextMenus.js
+++ b/javascript/contextMenus.js
@@ -79,7 +79,13 @@ contextMenuInit = function(){
}
function removeContextMenuOption(uid){
-
+ menuSpecs.forEach(function(v,k) {
+ let index = -1
+ v.forEach(function(e,ei){if(e['id']==uid){index=ei}})
+ if(index>=0){
+ v.splice(index, 1);
+ }
+ })
}
function addContextMenuEventListener(){
@@ -148,7 +154,8 @@ cancelGenerateForever = function(){
}
appendContextMenuOption('#txt2img_interrupt','Cancel generate forever',cancelGenerateForever)
-appendContextMenuOption('#txt2img_generate','Cancel generate forever',cancelGenerateForever)
+appendContextMenuOption('#txt2img_generate', 'Cancel generate forever',cancelGenerateForever)
+
appendContextMenuOption('#roll','Roll three',
function(){
@@ -162,4 +169,4 @@ appendContextMenuOption('#roll','Roll three',
onUiUpdate(function(){
addContextMenuEventListener()
-});
\ No newline at end of file
+});
From 87db6f01cc6b118fe0c82c36c6686d72d060c417 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 8 Oct 2022 10:15:29 +0300
Subject: [PATCH 145/460] add info about cross attention javascript shortcut
code
---
README.md | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/README.md b/README.md
index 0516c2cd8..d6e1d50bd 100644
--- a/README.md
+++ b/README.md
@@ -16,7 +16,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- Attention, specify parts of text that the model should pay more attention to
- a man in a ((tuxedo)) - will pay more attention to tuxedo
- a man in a (tuxedo:1.21) - alternative syntax
- - select text and press ctrl+up or ctrl+down to automatically adjust attention to selected text
+ - select text and press ctrl+up or ctrl+down to automatically adjust attention to selected text (code contributed by anonymous user)
- Loopback, run img2img processing multiple times
- X/Y plot, a way to draw a 2 dimensional plot of images with different parameters
- Textual Inversion
From 5d54f35c583bd5a3b0ee271a862827f1ca81ef09 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 11:55:02 +0300
Subject: [PATCH 146/460] add xformers attnblock and hypernetwork support
---
modules/sd_hijack_optimizations.py | 20 ++++++++++++++++++--
1 file changed, 18 insertions(+), 2 deletions(-)
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index 7fb4a45e3..c78d58382 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -98,8 +98,14 @@ def xformers_attention_forward(self, x, context=None, mask=None):
h = self.heads
q_in = self.to_q(x)
context = default(context, x)
- k_in = self.to_k(context)
- v_in = self.to_v(context)
+ hypernetwork = shared.selected_hypernetwork()
+ hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
+ if hypernetwork_layers is not None:
+ k_in = self.to_k(hypernetwork_layers[0](context))
+ v_in = self.to_v(hypernetwork_layers[1](context))
+ else:
+ k_in = self.to_k(context)
+ v_in = self.to_v(context)
q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in))
del q_in, k_in, v_in
out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None)
@@ -169,3 +175,13 @@ def cross_attention_attnblock_forward(self, x):
h3 += x
return h3
+
+ def xformers_attnblock_forward(self, x):
+ h_ = x
+ h_ = self.norm(h_)
+ q1 = self.q(h_).contiguous()
+ k1 = self.k(h_).contiguous()
+ v = self.v(h_).contiguous()
+ out = xformers.ops.memory_efficient_attention(q1, k1, v)
+ out = self.proj_out(out)
+ return x+out
From 76a616fa6b814c681eaf6edc87eb3001b8c2b6be Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 11:55:38 +0300
Subject: [PATCH 147/460] Update sd_hijack_optimizations.py
---
modules/sd_hijack_optimizations.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index c78d58382..ee58c7e4e 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -176,7 +176,7 @@ def cross_attention_attnblock_forward(self, x):
return h3
- def xformers_attnblock_forward(self, x):
+def xformers_attnblock_forward(self, x):
h_ = x
h_ = self.norm(h_)
q1 = self.q(h_).contiguous()
From 91d66f5520df416db718103d460550ad495e952d Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 11:56:01 +0300
Subject: [PATCH 148/460] use new attnblock for xformers path
---
modules/sd_hijack.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 0e99c3192..3da8c8ce2 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -23,7 +23,7 @@ def apply_optimizations():
ldm.modules.diffusionmodules.model.nonlinearity = silu
if not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip):
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
- ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
+ ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
elif cmd_opts.opt_split_attention_v1:
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
elif cmd_opts.opt_split_attention:
From 616b7218f7c469d25c138634472017a7e18e742e Mon Sep 17 00:00:00 2001
From: leko
Date: Fri, 7 Oct 2022 23:09:21 +0800
Subject: [PATCH 149/460] fix: handles when state_dict does not exist
---
modules/sd_models.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 8f794b479..9409d0707 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -122,7 +122,11 @@ def load_model_weights(model, checkpoint_file, sd_model_hash):
pl_sd = torch.load(checkpoint_file, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
- sd = pl_sd["state_dict"]
+
+ if "state_dict" in pl_sd:
+ sd = pl_sd["state_dict"]
+ else:
+ sd = pl_sd
model.load_state_dict(sd, strict=False)
From 706d5944a075a6523ea7f00165d630efc085ca22 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 8 Oct 2022 13:38:57 +0300
Subject: [PATCH 150/460] let user choose his own prompt token count limit
---
modules/processing.py | 6 ++++++
modules/sd_hijack.py | 13 +++++++------
modules/shared.py | 5 +++--
3 files changed, 16 insertions(+), 8 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index f773a30ef..d814d5acd 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -123,6 +123,7 @@ class Processed:
self.index_of_first_image = index_of_first_image
self.styles = p.styles
self.job_timestamp = state.job_timestamp
+ self.max_prompt_tokens = opts.max_prompt_tokens
self.eta = p.eta
self.ddim_discretize = p.ddim_discretize
@@ -141,6 +142,7 @@ class Processed:
self.all_subseeds = all_subseeds or [self.subseed]
self.infotexts = infotexts or [info]
+
def js(self):
obj = {
"prompt": self.prompt,
@@ -169,6 +171,7 @@ class Processed:
"infotexts": self.infotexts,
"styles": self.styles,
"job_timestamp": self.job_timestamp,
+ "max_prompt_tokens": self.max_prompt_tokens,
}
return json.dumps(obj)
@@ -266,6 +269,8 @@ def fix_seed(p):
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
index = position_in_batch + iteration * p.batch_size
+ max_tokens = getattr(p, 'max_prompt_tokens', opts.max_prompt_tokens)
+
generation_params = {
"Steps": p.steps,
"Sampler": sd_samplers.samplers[p.sampler_index].name,
@@ -281,6 +286,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None),
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
+ "Max tokens": (None if max_tokens == shared.vanilla_max_prompt_tokens else max_tokens)
}
generation_params.update(p.extra_generation_params)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index d68f89cc2..340329c0b 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -18,7 +18,6 @@ attention_CrossAttention_forward = ldm.modules.attention.CrossAttention.forward
diffusionmodules_model_nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity
diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.AttnBlock.forward
-
def apply_optimizations():
undo_optimizations()
@@ -83,7 +82,7 @@ class StableDiffusionModelHijack:
layer.padding_mode = 'circular' if enable else 'zeros'
def tokenize(self, text):
- max_length = self.clip.max_length - 2
+ max_length = opts.max_prompt_tokens - 2
_, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
return remade_batch_tokens[0], token_count, max_length
@@ -94,7 +93,6 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
self.wrapped = wrapped
self.hijack: StableDiffusionModelHijack = hijack
self.tokenizer = wrapped.tokenizer
- self.max_length = wrapped.max_length
self.token_mults = {}
tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k]
@@ -116,7 +114,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
def tokenize_line(self, line, used_custom_terms, hijack_comments):
id_start = self.wrapped.tokenizer.bos_token_id
id_end = self.wrapped.tokenizer.eos_token_id
- maxlen = self.wrapped.max_length
+ maxlen = opts.max_prompt_tokens
if opts.enable_emphasis:
parsed = prompt_parser.parse_prompt_attention(line)
@@ -191,7 +189,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
def process_text_old(self, text):
id_start = self.wrapped.tokenizer.bos_token_id
id_end = self.wrapped.tokenizer.eos_token_id
- maxlen = self.wrapped.max_length
+ maxlen = self.wrapped.max_length # you get to stay at 77
used_custom_terms = []
remade_batch_tokens = []
overflowing_words = []
@@ -268,8 +266,11 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
if len(used_custom_terms) > 0:
self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
+ position_ids_array = [min(x, 75) for x in range(len(remade_batch_tokens[0])-1)] + [76]
+ position_ids = torch.asarray(position_ids_array, device=devices.device).expand((1, -1))
+
tokens = torch.asarray(remade_batch_tokens).to(device)
- outputs = self.wrapped.transformer(input_ids=tokens)
+ outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids)
z = outputs.last_hidden_state
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
diff --git a/modules/shared.py b/modules/shared.py
index 879d8424a..864e772cf 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -118,8 +118,8 @@ prompt_styles = modules.styles.StyleDatabase(styles_filename)
interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = []
-# This was moved to webui.py with the other model "setup" calls.
-# modules.sd_models.list_models()
+
+vanilla_max_prompt_tokens = 77
def realesrgan_models_names():
@@ -221,6 +221,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
+ "max_prompt_tokens": OptionInfo(vanilla_max_prompt_tokens, f"Max prompt token count. Two tokens are reserved for for start and end. Default is {vanilla_max_prompt_tokens}. Setting this to a different value will result in different pictures for same seed.", gr.Number, {"precision": 0}),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
}))
From 786d9f63aaa4515df82eb2cf357ea92f3dae1e29 Mon Sep 17 00:00:00 2001
From: Trung Ngo
Date: Tue, 4 Oct 2022 22:56:30 -0500
Subject: [PATCH 151/460] Add button to skip the current iteration
---
javascript/hints.js | 1 +
javascript/progressbar.js | 20 ++++++++++++++------
modules/img2img.py | 4 ++++
modules/processing.py | 4 ++++
modules/shared.py | 5 +++++
modules/ui.py | 8 ++++++++
style.css | 14 ++++++++++++--
webui.py | 1 +
8 files changed, 49 insertions(+), 8 deletions(-)
diff --git a/javascript/hints.js b/javascript/hints.js
index 8adcd983e..8e352e94a 100644
--- a/javascript/hints.js
+++ b/javascript/hints.js
@@ -35,6 +35,7 @@ titles = {
"Denoising strength": "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.",
"Denoising strength change factor": "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.",
+ "Skip": "Stop processing current image and continue processing.",
"Interrupt": "Stop processing images and return any results accumulated so far.",
"Save": "Write image to a directory (default - log/images) and generation parameters into csv file.",
diff --git a/javascript/progressbar.js b/javascript/progressbar.js
index f9e9290e2..4395a2159 100644
--- a/javascript/progressbar.js
+++ b/javascript/progressbar.js
@@ -1,8 +1,9 @@
// code related to showing and updating progressbar shown as the image is being made
global_progressbars = {}
-function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_interrupt, id_preview, id_gallery){
+function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip, id_interrupt, id_preview, id_gallery){
var progressbar = gradioApp().getElementById(id_progressbar)
+ var skip = id_skip ? gradioApp().getElementById(id_skip) : null
var interrupt = gradioApp().getElementById(id_interrupt)
if(opts.show_progress_in_title && progressbar && progressbar.offsetParent){
@@ -32,30 +33,37 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_inte
var progressDiv = gradioApp().querySelectorAll('#' + id_progressbar_span).length > 0;
if(!progressDiv){
+ if (skip) {
+ skip.style.display = "none"
+ }
interrupt.style.display = "none"
}
}
- window.setTimeout(function(){ requestMoreProgress(id_part, id_progressbar_span, id_interrupt) }, 500)
+ window.setTimeout(function() { requestMoreProgress(id_part, id_progressbar_span, id_skip, id_interrupt) }, 500)
});
mutationObserver.observe( progressbar, { childList:true, subtree:true })
}
}
onUiUpdate(function(){
- check_progressbar('txt2img', 'txt2img_progressbar', 'txt2img_progress_span', 'txt2img_interrupt', 'txt2img_preview', 'txt2img_gallery')
- check_progressbar('img2img', 'img2img_progressbar', 'img2img_progress_span', 'img2img_interrupt', 'img2img_preview', 'img2img_gallery')
- check_progressbar('ti', 'ti_progressbar', 'ti_progress_span', 'ti_interrupt', 'ti_preview', 'ti_gallery')
+ check_progressbar('txt2img', 'txt2img_progressbar', 'txt2img_progress_span', 'txt2img_skip', 'txt2img_interrupt', 'txt2img_preview', 'txt2img_gallery')
+ check_progressbar('img2img', 'img2img_progressbar', 'img2img_progress_span', 'img2img_skip', 'img2img_interrupt', 'img2img_preview', 'img2img_gallery')
+ check_progressbar('ti', 'ti_progressbar', 'ti_progress_span', '', 'ti_interrupt', 'ti_preview', 'ti_gallery')
})
-function requestMoreProgress(id_part, id_progressbar_span, id_interrupt){
+function requestMoreProgress(id_part, id_progressbar_span, id_skip, id_interrupt){
btn = gradioApp().getElementById(id_part+"_check_progress");
if(btn==null) return;
btn.click();
var progressDiv = gradioApp().querySelectorAll('#' + id_progressbar_span).length > 0;
+ var skip = id_skip ? gradioApp().getElementById(id_skip) : null
var interrupt = gradioApp().getElementById(id_interrupt)
if(progressDiv && interrupt){
+ if (skip) {
+ skip.style.display = "block"
+ }
interrupt.style.display = "block"
}
}
diff --git a/modules/img2img.py b/modules/img2img.py
index da212d72b..e60b7e0ff 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -32,6 +32,10 @@ def process_batch(p, input_dir, output_dir, args):
for i, image in enumerate(images):
state.job = f"{i+1} out of {len(images)}"
+ if state.skipped:
+ state.skipped = False
+ state.interrupted = False
+ continue
if state.interrupted:
break
diff --git a/modules/processing.py b/modules/processing.py
index d814d5acd..6805039c1 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -355,6 +355,10 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
state.job_count = p.n_iter
for n in range(p.n_iter):
+ if state.skipped:
+ state.skipped = False
+ state.interrupted = False
+
if state.interrupted:
break
diff --git a/modules/shared.py b/modules/shared.py
index 864e772cf..7f802bd97 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -84,6 +84,7 @@ def selected_hypernetwork():
class State:
+ skipped = False
interrupted = False
job = ""
job_no = 0
@@ -96,6 +97,10 @@ class State:
current_image_sampling_step = 0
textinfo = None
+ def skip(self):
+ self.skipped = True
+ self.interrupted = True
+
def interrupt(self):
self.interrupted = True
diff --git a/modules/ui.py b/modules/ui.py
index 4f18126fb..e3e62fdd5 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -191,6 +191,7 @@ def wrap_gradio_call(func, extra_outputs=None):
# last item is always HTML
res[-1] += f""
+ shared.state.skipped = False
shared.state.interrupted = False
shared.state.job_count = 0
@@ -411,9 +412,16 @@ def create_toprow(is_img2img):
with gr.Column(scale=1):
with gr.Row():
+ skip = gr.Button('Skip', elem_id=f"{id_part}_skip")
interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt")
submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary')
+ skip.click(
+ fn=lambda: shared.state.skip(),
+ inputs=[],
+ outputs=[],
+ )
+
interrupt.click(
fn=lambda: shared.state.interrupt(),
inputs=[],
diff --git a/style.css b/style.css
index 50c5e557c..6904fc50e 100644
--- a/style.css
+++ b/style.css
@@ -393,10 +393,20 @@ input[type="range"]{
#txt2img_interrupt, #img2img_interrupt{
position: absolute;
- width: 100%;
+ width: 50%;
height: 72px;
background: #b4c0cc;
- border-radius: 8px;
+ border-radius: 0px;
+ display: none;
+}
+
+#txt2img_skip, #img2img_skip{
+ position: absolute;
+ width: 50%;
+ right: 0px;
+ height: 72px;
+ background: #b4c0cc;
+ border-radius: 0px;
display: none;
}
diff --git a/webui.py b/webui.py
index 480360fe0..3b4cf5e9d 100644
--- a/webui.py
+++ b/webui.py
@@ -58,6 +58,7 @@ def wrap_gradio_gpu_call(func, extra_outputs=None):
shared.state.current_latent = None
shared.state.current_image = None
shared.state.current_image_sampling_step = 0
+ shared.state.skipped = False
shared.state.interrupted = False
shared.state.textinfo = None
From 00117a07efbbe8482add12262a179326541467de Mon Sep 17 00:00:00 2001
From: Trung Ngo
Date: Sat, 8 Oct 2022 05:33:21 -0500
Subject: [PATCH 152/460] check specifically for skipped
---
modules/img2img.py | 2 --
modules/processing.py | 3 +--
modules/sd_samplers.py | 4 ++--
modules/shared.py | 1 -
4 files changed, 3 insertions(+), 7 deletions(-)
diff --git a/modules/img2img.py b/modules/img2img.py
index e60b7e0ff..241267745 100644
--- a/modules/img2img.py
+++ b/modules/img2img.py
@@ -34,8 +34,6 @@ def process_batch(p, input_dir, output_dir, args):
state.job = f"{i+1} out of {len(images)}"
if state.skipped:
state.skipped = False
- state.interrupted = False
- continue
if state.interrupted:
break
diff --git a/modules/processing.py b/modules/processing.py
index 6805039c1..3657fe69b 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -357,7 +357,6 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
for n in range(p.n_iter):
if state.skipped:
state.skipped = False
- state.interrupted = False
if state.interrupted:
break
@@ -385,7 +384,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
with devices.autocast():
samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength)
- if state.interrupted:
+ if state.interrupted or state.skipped:
# if we are interruped, sample returns just noise
# use the image collected previously in sampler loop
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index df17e93ca..13a8b3221 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -106,7 +106,7 @@ def extended_tdqm(sequence, *args, desc=None, **kwargs):
seq = sequence if cmd_opts.disable_console_progressbars else tqdm.tqdm(sequence, *args, desc=state.job, file=shared.progress_print_out, **kwargs)
for x in seq:
- if state.interrupted:
+ if state.interrupted or state.skipped:
break
yield x
@@ -254,7 +254,7 @@ def extended_trange(sampler, count, *args, **kwargs):
seq = range(count) if cmd_opts.disable_console_progressbars else tqdm.trange(count, *args, desc=state.job, file=shared.progress_print_out, **kwargs)
for x in seq:
- if state.interrupted:
+ if state.interrupted or state.skipped:
break
if sampler.stop_at is not None and x > sampler.stop_at:
diff --git a/modules/shared.py b/modules/shared.py
index 7f802bd97..ca4626282 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -99,7 +99,6 @@ class State:
def skip(self):
self.skipped = True
- self.interrupted = True
def interrupt(self):
self.interrupted = True
From 4999eb2ef9b30e8c42ca7e4a94d4bbffe4d1f015 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 8 Oct 2022 14:25:47 +0300
Subject: [PATCH 153/460] do not let user choose his own prompt token count
limit
---
README.md | 1 +
modules/processing.py | 5 -----
modules/sd_hijack.py | 25 ++++++++++++-------------
modules/shared.py | 3 ---
4 files changed, 13 insertions(+), 21 deletions(-)
diff --git a/README.md b/README.md
index d6e1d50bd..ef9b5e313 100644
--- a/README.md
+++ b/README.md
@@ -65,6 +65,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- [Composable-Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/), a way to use multiple prompts at once
- separate prompts using uppercase `AND`
- also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2`
+- No token limit for prompts (original stable diffusion lets you use up to 75 tokens)
## Installation and Running
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
diff --git a/modules/processing.py b/modules/processing.py
index 3657fe69b..d5162ddc0 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -123,7 +123,6 @@ class Processed:
self.index_of_first_image = index_of_first_image
self.styles = p.styles
self.job_timestamp = state.job_timestamp
- self.max_prompt_tokens = opts.max_prompt_tokens
self.eta = p.eta
self.ddim_discretize = p.ddim_discretize
@@ -171,7 +170,6 @@ class Processed:
"infotexts": self.infotexts,
"styles": self.styles,
"job_timestamp": self.job_timestamp,
- "max_prompt_tokens": self.max_prompt_tokens,
}
return json.dumps(obj)
@@ -269,8 +267,6 @@ def fix_seed(p):
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
index = position_in_batch + iteration * p.batch_size
- max_tokens = getattr(p, 'max_prompt_tokens', opts.max_prompt_tokens)
-
generation_params = {
"Steps": p.steps,
"Sampler": sd_samplers.samplers[p.sampler_index].name,
@@ -286,7 +282,6 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None),
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
- "Max tokens": (None if max_tokens == shared.vanilla_max_prompt_tokens else max_tokens)
}
generation_params.update(p.extra_generation_params)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 340329c0b..2c1332c9f 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -36,6 +36,13 @@ def undo_optimizations():
ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward
+def get_target_prompt_token_count(token_count):
+ if token_count < 75:
+ return 75
+
+ return math.ceil(token_count / 10) * 10
+
+
class StableDiffusionModelHijack:
fixes = None
comments = []
@@ -84,7 +91,7 @@ class StableDiffusionModelHijack:
def tokenize(self, text):
max_length = opts.max_prompt_tokens - 2
_, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
- return remade_batch_tokens[0], token_count, max_length
+ return remade_batch_tokens[0], token_count, get_target_prompt_token_count(token_count)
class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
@@ -114,7 +121,6 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
def tokenize_line(self, line, used_custom_terms, hijack_comments):
id_start = self.wrapped.tokenizer.bos_token_id
id_end = self.wrapped.tokenizer.eos_token_id
- maxlen = opts.max_prompt_tokens
if opts.enable_emphasis:
parsed = prompt_parser.parse_prompt_attention(line)
@@ -146,19 +152,12 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
used_custom_terms.append((embedding.name, embedding.checksum()))
i += embedding_length_in_tokens
- if len(remade_tokens) > maxlen - 2:
- vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()}
- ovf = remade_tokens[maxlen - 2:]
- overflowing_words = [vocab.get(int(x), "") for x in ovf]
- overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words))
- hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n")
-
token_count = len(remade_tokens)
- remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens))
- remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end]
+ prompt_target_length = get_target_prompt_token_count(token_count)
+ tokens_to_add = prompt_target_length - len(remade_tokens) + 1
- multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers))
- multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0]
+ remade_tokens = [id_start] + remade_tokens + [id_end] * tokens_to_add
+ multipliers = [1.0] + multipliers + [1.0] * tokens_to_add
return remade_tokens, fixes, multipliers, token_count
diff --git a/modules/shared.py b/modules/shared.py
index ca4626282..475d7e526 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -123,8 +123,6 @@ interrogator = modules.interrogate.InterrogateModels("interrogate")
face_restorers = []
-vanilla_max_prompt_tokens = 77
-
def realesrgan_models_names():
import modules.realesrgan_model
@@ -225,7 +223,6 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
- "max_prompt_tokens": OptionInfo(vanilla_max_prompt_tokens, f"Max prompt token count. Two tokens are reserved for for start and end. Default is {vanilla_max_prompt_tokens}. Setting this to a different value will result in different pictures for same seed.", gr.Number, {"precision": 0}),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
}))
From 4201fd14f5769a4cf6723d2bc5495c3c84a2cd00 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 14:42:34 +0300
Subject: [PATCH 154/460] install xformers
---
launch.py | 3 +++
1 file changed, 3 insertions(+)
diff --git a/launch.py b/launch.py
index 75edb66a9..f3fbe16a5 100644
--- a/launch.py
+++ b/launch.py
@@ -124,6 +124,9 @@ if not is_installed("gfpgan"):
if not is_installed("clip"):
run_pip(f"install {clip_package}", "clip")
+if not is_installed("xformers"):
+ run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
+
os.makedirs(dir_repos, exist_ok=True)
git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
From 3f166be1b60ff2ab33a6d2646809ec3f48796303 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 14:42:50 +0300
Subject: [PATCH 155/460] Update requirements.txt
---
requirements.txt | 1 -
1 file changed, 1 deletion(-)
diff --git a/requirements.txt b/requirements.txt
index 304a066a3..81641d68f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -24,4 +24,3 @@ torchdiffeq
kornia
lark
functorch
-#xformers?
From 77f4237d1c3af1756e7dab2699e3dcebad5619d6 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 8 Oct 2022 15:25:59 +0300
Subject: [PATCH 156/460] fix bugs related to variable prompt lengths
---
modules/sd_hijack.py | 14 +++++++++-----
modules/sd_samplers.py | 35 ++++++++++++++++++++++++++++-------
2 files changed, 37 insertions(+), 12 deletions(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 2c1332c9f..7e7fde0f9 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -89,7 +89,6 @@ class StableDiffusionModelHijack:
layer.padding_mode = 'circular' if enable else 'zeros'
def tokenize(self, text):
- max_length = opts.max_prompt_tokens - 2
_, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
return remade_batch_tokens[0], token_count, get_target_prompt_token_count(token_count)
@@ -174,7 +173,8 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
if line in cache:
remade_tokens, fixes, multipliers = cache[line]
else:
- remade_tokens, fixes, multipliers, token_count = self.tokenize_line(line, used_custom_terms, hijack_comments)
+ remade_tokens, fixes, multipliers, current_token_count = self.tokenize_line(line, used_custom_terms, hijack_comments)
+ token_count = max(current_token_count, token_count)
cache[line] = (remade_tokens, fixes, multipliers)
@@ -265,15 +265,19 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
if len(used_custom_terms) > 0:
self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
- position_ids_array = [min(x, 75) for x in range(len(remade_batch_tokens[0])-1)] + [76]
+ target_token_count = get_target_prompt_token_count(token_count) + 2
+
+ position_ids_array = [min(x, 75) for x in range(target_token_count-1)] + [76]
position_ids = torch.asarray(position_ids_array, device=devices.device).expand((1, -1))
- tokens = torch.asarray(remade_batch_tokens).to(device)
+ remade_batch_tokens_of_same_length = [x + [self.wrapped.tokenizer.eos_token_id] * (target_token_count - len(x)) for x in remade_batch_tokens]
+ tokens = torch.asarray(remade_batch_tokens_of_same_length).to(device)
outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids)
z = outputs.last_hidden_state
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
- batch_multipliers = torch.asarray(batch_multipliers).to(device)
+ batch_multipliers_of_same_length = [x + [1.0] * (target_token_count - len(x)) for x in batch_multipliers]
+ batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(device)
original_mean = z.mean()
z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape)
new_mean = z.mean()
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index 13a8b3221..eade0dbbd 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -142,6 +142,16 @@ class VanillaStableDiffusionSampler:
assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers'
cond = tensor
+ # for DDIM, shapes must match, we can't just process cond and uncond independently;
+ # filling unconditional_conditioning with repeats of the last vector to match length is
+ # not 100% correct but should work well enough
+ if unconditional_conditioning.shape[1] < cond.shape[1]:
+ last_vector = unconditional_conditioning[:, -1:]
+ last_vector_repeated = last_vector.repeat([1, cond.shape[1] - unconditional_conditioning.shape[1], 1])
+ unconditional_conditioning = torch.hstack([unconditional_conditioning, last_vector_repeated])
+ elif unconditional_conditioning.shape[1] > cond.shape[1]:
+ unconditional_conditioning = unconditional_conditioning[:, :cond.shape[1]]
+
if self.mask is not None:
img_orig = self.sampler.model.q_sample(self.init_latent, ts)
x_dec = img_orig * self.mask + self.nmask * x_dec
@@ -221,18 +231,29 @@ class CFGDenoiser(torch.nn.Module):
x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x])
sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma])
- cond_in = torch.cat([tensor, uncond])
- if shared.batch_cond_uncond:
- x_out = self.inner_model(x_in, sigma_in, cond=cond_in)
+ if tensor.shape[1] == uncond.shape[1]:
+ cond_in = torch.cat([tensor, uncond])
+
+ if shared.batch_cond_uncond:
+ x_out = self.inner_model(x_in, sigma_in, cond=cond_in)
+ else:
+ x_out = torch.zeros_like(x_in)
+ for batch_offset in range(0, x_out.shape[0], batch_size):
+ a = batch_offset
+ b = a + batch_size
+ x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=cond_in[a:b])
else:
x_out = torch.zeros_like(x_in)
- for batch_offset in range(0, x_out.shape[0], batch_size):
+ batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size
+ for batch_offset in range(0, tensor.shape[0], batch_size):
a = batch_offset
- b = a + batch_size
- x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=cond_in[a:b])
+ b = min(a + batch_size, tensor.shape[0])
+ x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=tensor[a:b])
- denoised_uncond = x_out[-batch_size:]
+ x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=uncond)
+
+ denoised_uncond = x_out[-uncond.shape[0]:]
denoised = torch.clone(denoised_uncond)
for i, conds in enumerate(conds_list):
From 7001bffe0247804793dfabb69ac96d832572ccd0 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 8 Oct 2022 15:43:25 +0300
Subject: [PATCH 157/460] fix AND broken for long prompts
---
modules/prompt_parser.py | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index f00256f28..156660736 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -239,6 +239,15 @@ def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current_step):
conds_list.append(conds_for_batch)
+ # if prompts have wildly different lengths above the limit we'll get tensors fo different shapes
+ # and won't be able to torch.stack them. So this fixes that.
+ token_count = max([x.shape[0] for x in tensors])
+ for i in range(len(tensors)):
+ if tensors[i].shape[0] != token_count:
+ last_vector = tensors[i][-1:]
+ last_vector_repeated = last_vector.repeat([token_count - tensors[i].shape[0], 1])
+ tensors[i] = torch.vstack([tensors[i], last_vector_repeated])
+
return conds_list, torch.stack(tensors).to(device=param.device, dtype=param.dtype)
From 772db721a52da374d627b60994222051f26c27a7 Mon Sep 17 00:00:00 2001
From: ddPn08
Date: Fri, 7 Oct 2022 23:02:07 +0900
Subject: [PATCH 158/460] fix glob path in hypernetwork.py
---
modules/hypernetwork.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py
index c7b866829..7f0622428 100644
--- a/modules/hypernetwork.py
+++ b/modules/hypernetwork.py
@@ -43,7 +43,7 @@ class Hypernetwork:
def load_hypernetworks(path):
res = {}
- for filename in glob.iglob(path + '**/*.pt', recursive=True):
+ for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True):
try:
hn = Hypernetwork(filename)
res[hn.name] = hn
From 32e428ff19c28c87bb2ed362316b928b372e3a70 Mon Sep 17 00:00:00 2001
From: guaneec
Date: Sat, 8 Oct 2022 16:01:34 +0800
Subject: [PATCH 159/460] Remove duplicate event listeners
---
javascript/imageviewer.js | 3 +++
1 file changed, 3 insertions(+)
diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js
index 3a0baac8c..4c0e8f4bb 100644
--- a/javascript/imageviewer.js
+++ b/javascript/imageviewer.js
@@ -86,6 +86,9 @@ function showGalleryImage(){
if(fullImg_preview != null){
fullImg_preview.forEach(function function_name(e) {
+ if (e.dataset.modded)
+ return;
+ e.dataset.modded = true;
if(e && e.parentElement.tagName == 'DIV'){
e.style.cursor='pointer'
From 5f85a74b00c0154bfd559dc67edfa7e30342b7c9 Mon Sep 17 00:00:00 2001
From: MrCheeze
Date: Fri, 7 Oct 2022 17:48:34 -0400
Subject: [PATCH 160/460] fix bug where when using prompt composition,
hijack_comments generated before the final AND will be dropped
---
modules/processing.py | 1 +
modules/sd_hijack.py | 5 ++++-
2 files changed, 5 insertions(+), 1 deletion(-)
diff --git a/modules/processing.py b/modules/processing.py
index d5162ddc0..8240ee270 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -313,6 +313,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
os.makedirs(p.outpath_grids, exist_ok=True)
modules.sd_hijack.model_hijack.apply_circular(p.tiling)
+ modules.sd_hijack.model_hijack.clear_comments()
comments = {}
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 7e7fde0f9..ba808a397 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -88,6 +88,9 @@ class StableDiffusionModelHijack:
for layer in [layer for layer in self.layers if type(layer) == torch.nn.Conv2d]:
layer.padding_mode = 'circular' if enable else 'zeros'
+ def clear_comments(self):
+ self.comments = []
+
def tokenize(self, text):
_, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text])
return remade_batch_tokens[0], token_count, get_target_prompt_token_count(token_count)
@@ -260,7 +263,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text)
self.hijack.fixes = hijack_fixes
- self.hijack.comments = hijack_comments
+ self.hijack.comments += hijack_comments
if len(used_custom_terms) > 0:
self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms]))
From d0e85873ac72416d32dee8720dc9e93ab3d3e236 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 16:13:26 +0300
Subject: [PATCH 161/460] check for OS and env variable
---
launch.py | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/launch.py b/launch.py
index f3fbe16a5..a2089b3bc 100644
--- a/launch.py
+++ b/launch.py
@@ -4,6 +4,7 @@ import os
import sys
import importlib.util
import shlex
+import platform
dir_repos = "repositories"
dir_tmp = "tmp"
@@ -31,6 +32,7 @@ def extract_arg(args, name):
args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test')
+args, xformers = extract_arg(args, '--xformers')
def repo_dir(name):
@@ -124,8 +126,11 @@ if not is_installed("gfpgan"):
if not is_installed("clip"):
run_pip(f"install {clip_package}", "clip")
-if not is_installed("xformers"):
- run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
+if not is_installed("xformers") and xformers:
+ if platform.system() == "Windows":
+ run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
+ elif:
+ run_pip("install xformers", "xformers")
os.makedirs(dir_repos, exist_ok=True)
From 26b459a3799c5cdf71ca8ed5315a99f69c69f02c Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 16:20:04 +0300
Subject: [PATCH 162/460] default to split attention if cuda is available and
xformers is not
---
modules/sd_hijack.py | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 3da8c8ce2..04adcf035 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -21,12 +21,12 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At
def apply_optimizations():
ldm.modules.diffusionmodules.model.nonlinearity = silu
- if not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip):
+ if not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip or shared.xformers_available):
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
elif cmd_opts.opt_split_attention_v1:
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
- elif cmd_opts.opt_split_attention:
+ elif cmd_opts.opt_split_attention or torch.cuda.is_available():
ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
From ddfa9a97865c732193023a71521c5b7b53d8571b Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 16:20:41 +0300
Subject: [PATCH 163/460] add xformers_available shared variable
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index 8cc3b2fe2..6ed4b8021 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -74,7 +74,7 @@ device = devices.device
batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram)
parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
-
+xformers_available = False
config_filename = cmd_opts.ui_settings_file
From 69d0053583757ce2942d62de81e8b89e6be07840 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 16:21:40 +0300
Subject: [PATCH 164/460] update sd_hijack_opt to respect new env variables
---
modules/sd_hijack_optimizations.py | 11 ++++++++---
1 file changed, 8 insertions(+), 3 deletions(-)
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index ee58c7e4e..be09ec8f4 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -1,9 +1,14 @@
import math
import torch
from torch import einsum
-import xformers.ops
-import functorch
-xformers._is_functorch_available=True
+try:
+ import xformers.ops
+ import functorch
+ xformers._is_functorch_available = True
+ shared.xformers_available = True
+except:
+ print('Cannot find xformers, defaulting to split attention. Try setting --xformers in your webui-user file if you wish to install it.')
+ continue
from ldm.util import default
from einops import rearrange
From ca5f0f149c29c344a6badd055b15b5e5fcd6e938 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 16:22:38 +0300
Subject: [PATCH 165/460] Update launch.py
---
launch.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/launch.py b/launch.py
index a2089b3bc..a592e1ba7 100644
--- a/launch.py
+++ b/launch.py
@@ -129,7 +129,7 @@ if not is_installed("clip"):
if not is_installed("xformers") and xformers:
if platform.system() == "Windows":
run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
- elif:
+ elif platform.system() == "Linux":
run_pip("install xformers", "xformers")
os.makedirs(dir_repos, exist_ok=True)
From 7ffea1507813540b8cd9e73feb7bf23de1ac4e27 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 16:24:06 +0300
Subject: [PATCH 166/460] Update requirements_versions.txt
---
requirements_versions.txt | 1 +
1 file changed, 1 insertion(+)
diff --git a/requirements_versions.txt b/requirements_versions.txt
index fdff26878..fec3e9d5b 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -22,3 +22,4 @@ resize-right==0.0.2
torchdiffeq==0.2.3
kornia==0.6.7
lark==1.1.2
+functorch==0.2.1
From 970de9ee6891ff586821d0d80dde01c2f6c681b3 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 16:29:43 +0300
Subject: [PATCH 167/460] Update sd_hijack.py
---
modules/sd_hijack.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 04adcf035..5b30539fe 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -21,7 +21,7 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At
def apply_optimizations():
ldm.modules.diffusionmodules.model.nonlinearity = silu
- if not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip or shared.xformers_available):
+ if not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip) and shared.xformers_available:
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
elif cmd_opts.opt_split_attention_v1:
From 7ff1170a2e11b6f00f587407326db0b9f8f51adf Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 8 Oct 2022 16:33:39 +0300
Subject: [PATCH 168/460] emergency fix for xformers (continue + shared)
---
modules/sd_hijack_optimizations.py | 16 ++++++++--------
1 file changed, 8 insertions(+), 8 deletions(-)
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index e43e2c7a3..05023b6fd 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -1,19 +1,19 @@
import math
import torch
from torch import einsum
-try:
- import xformers.ops
- import functorch
- xformers._is_functorch_available = True
- shared.xformers_available = True
-except:
- print('Cannot find xformers, defaulting to split attention. Try setting --xformers in your webui-user file if you wish to install it.')
- continue
+
from ldm.util import default
from einops import rearrange
from modules import shared
+try:
+ import xformers.ops
+ import functorch
+ xformers._is_functorch_available = True
+ shared.xformers_available = True
+except Exception:
+ print('Cannot find xformers, defaulting to split attention. Try adding --xformers commandline argument to your webui-user file if you wish to install it.')
# see https://github.com/basujindal/stable-diffusion/pull/117 for discussion
def split_cross_attention_forward_v1(self, x, context=None, mask=None):
From dc1117233ef8f9b25ff1ac40b158f20b70ba2fcb Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 8 Oct 2022 17:02:18 +0300
Subject: [PATCH 169/460] simplify xfrmers options: --xformers to enable and
that's it
---
launch.py | 2 +-
modules/sd_hijack.py | 2 +-
modules/sd_hijack_optimizations.py | 20 +++++++++++++-------
modules/shared.py | 2 +-
4 files changed, 16 insertions(+), 10 deletions(-)
diff --git a/launch.py b/launch.py
index a592e1ba7..61f62096c 100644
--- a/launch.py
+++ b/launch.py
@@ -32,7 +32,7 @@ def extract_arg(args, name):
args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test')
-args, xformers = extract_arg(args, '--xformers')
+xformers = '--xformers' in args
def repo_dir(name):
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 5d93f7f6a..91e98c16b 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -22,7 +22,7 @@ def apply_optimizations():
undo_optimizations()
ldm.modules.diffusionmodules.model.nonlinearity = silu
- if not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip) and shared.xformers_available:
+ if cmd_opts.xformers and shared.xformers_available and not torch.version.hip:
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
elif cmd_opts.opt_split_attention_v1:
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index 05023b6fd..d23d733b0 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -1,4 +1,7 @@
import math
+import sys
+import traceback
+
import torch
from torch import einsum
@@ -7,13 +10,16 @@ from einops import rearrange
from modules import shared
-try:
- import xformers.ops
- import functorch
- xformers._is_functorch_available = True
- shared.xformers_available = True
-except Exception:
- print('Cannot find xformers, defaulting to split attention. Try adding --xformers commandline argument to your webui-user file if you wish to install it.')
+if shared.cmd_opts.xformers:
+ try:
+ import xformers.ops
+ import functorch
+ xformers._is_functorch_available = True
+ shared.xformers_available = True
+ except Exception:
+ print("Cannot import xformers", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+
# see https://github.com/basujindal/stable-diffusion/pull/117 for discussion
def split_cross_attention_forward_v1(self, x, context=None, mask=None):
diff --git a/modules/shared.py b/modules/shared.py
index d68df7511..02cb27228 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -43,7 +43,7 @@ parser.add_argument("--realesrgan-models-path", type=str, help="Path to director
parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET'))
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
-parser.add_argument("--disable-opt-xformers-attention", action='store_true', help="force-disables xformers attention optimization")
+parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
From 27032c47df9c07ac21dd5b89fa7dc247bb8705b6 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 8 Oct 2022 17:10:05 +0300
Subject: [PATCH 170/460] restore old
opt_split_attention/disable_opt_split_attention logic
---
modules/sd_hijack.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 91e98c16b..335a2bcfb 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -27,7 +27,7 @@ def apply_optimizations():
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
elif cmd_opts.opt_split_attention_v1:
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
- elif cmd_opts.opt_split_attention or torch.cuda.is_available():
+ elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
From 4f33289d0fc5aa3a197f4a4c926d03d44f0d597e Mon Sep 17 00:00:00 2001
From: Milly
Date: Sat, 8 Oct 2022 22:48:15 +0900
Subject: [PATCH 171/460] Fixed typo
---
modules/ui.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/ui.py b/modules/ui.py
index e3e62fdd5..ffd75f6ac 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -946,7 +946,7 @@ def create_ui(wrap_gradio_gpu_call):
custom_name = gr.Textbox(label="Custom Name (Optional)")
interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Interpolation Amount', value=0.3)
interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method")
- save_as_half = gr.Checkbox(value=False, label="Safe as float16")
+ save_as_half = gr.Checkbox(value=False, label="Save as float16")
modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary')
with gr.Column(variant='panel'):
From cfc33f99d47d1f45af15499e5965834089d11858 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 8 Oct 2022 17:28:58 +0300
Subject: [PATCH 172/460] why did you do this
---
modules/sd_hijack.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 335a2bcfb..ed271976b 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -28,7 +28,7 @@ def apply_optimizations():
elif cmd_opts.opt_split_attention_v1:
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
- ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
From 7e639cd49855ef59e087ae9a9122756a937007eb Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 17:22:20 +0300
Subject: [PATCH 173/460] check for 3.10
---
launch.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/launch.py b/launch.py
index 61f62096c..1d65a779c 100644
--- a/launch.py
+++ b/launch.py
@@ -126,7 +126,7 @@ if not is_installed("gfpgan"):
if not is_installed("clip"):
run_pip(f"install {clip_package}", "clip")
-if not is_installed("xformers") and xformers:
+if not is_installed("xformers") and xformers and platform.python_version().startswith("3.10"):
if platform.system() == "Windows":
run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
elif platform.system() == "Linux":
From 017b6b8744f0771e498656ec043e12d5cc6969a7 Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 17:27:21 +0300
Subject: [PATCH 174/460] check for ampere
---
modules/sd_hijack.py | 7 ++++---
1 file changed, 4 insertions(+), 3 deletions(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index ed271976b..5e266d5e5 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -22,9 +22,10 @@ def apply_optimizations():
undo_optimizations()
ldm.modules.diffusionmodules.model.nonlinearity = silu
- if cmd_opts.xformers and shared.xformers_available and not torch.version.hip:
- ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
- ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
+ if cmd_opts.xformers and shared.xformers_available and torch.version.cuda:
+ if torch.cuda.get_device_capability(shared.device) == (8, 6):
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
+ ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
elif cmd_opts.opt_split_attention_v1:
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
From cc0258aea7b6605be3648900063cfa96ed7c5ffa Mon Sep 17 00:00:00 2001
From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com>
Date: Sat, 8 Oct 2022 17:44:53 +0300
Subject: [PATCH 175/460] check for ampere without destroying the
optimizations. again.
---
modules/sd_hijack.py | 7 +++----
1 file changed, 3 insertions(+), 4 deletions(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 5e266d5e5..a3e374f09 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -22,10 +22,9 @@ def apply_optimizations():
undo_optimizations()
ldm.modules.diffusionmodules.model.nonlinearity = silu
- if cmd_opts.xformers and shared.xformers_available and torch.version.cuda:
- if torch.cuda.get_device_capability(shared.device) == (8, 6):
- ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
- ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
+ if cmd_opts.xformers and shared.xformers_available and torch.version.cuda and torch.cuda.get_device_capability(shared.device) == (8, 6):
+ ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
+ ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
elif cmd_opts.opt_split_attention_v1:
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
From 34acad1628e98a5e0cbd459fa69ded915864f53d Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Fri, 7 Oct 2022 22:56:00 +0100
Subject: [PATCH 176/460] Add GZipMiddleware to root demo
---
webui.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/webui.py b/webui.py
index 3b4cf5e9d..18de8e165 100644
--- a/webui.py
+++ b/webui.py
@@ -5,6 +5,8 @@ import importlib
import signal
import threading
+from fastapi.middleware.gzip import GZipMiddleware
+
from modules.paths import script_path
from modules import devices, sd_samplers
@@ -93,7 +95,7 @@ def webui():
demo = modules.ui.create_ui(wrap_gradio_gpu_call=wrap_gradio_gpu_call)
- demo.launch(
+ app,local_url,share_url = demo.launch(
share=cmd_opts.share,
server_name="0.0.0.0" if cmd_opts.listen else None,
server_port=cmd_opts.port,
@@ -102,6 +104,8 @@ def webui():
inbrowser=cmd_opts.autolaunch,
prevent_thread_lock=True
)
+
+ app.add_middleware(GZipMiddleware,minimum_size=1000)
while 1:
time.sleep(0.5)
From a5550f0213c3f145b1c984816ebcef92c48853ee Mon Sep 17 00:00:00 2001
From: Artem Zagidulin
Date: Wed, 5 Oct 2022 19:10:39 +0300
Subject: [PATCH 177/460] alternate prompt
---
modules/prompt_parser.py | 9 +++++++--
1 file changed, 7 insertions(+), 2 deletions(-)
diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py
index 156660736..919d5d31a 100644
--- a/modules/prompt_parser.py
+++ b/modules/prompt_parser.py
@@ -13,13 +13,14 @@ import lark
schedule_parser = lark.Lark(r"""
!start: (prompt | /[][():]/+)*
-prompt: (emphasized | scheduled | plain | WHITESPACE)*
+prompt: (emphasized | scheduled | alternate | plain | WHITESPACE)*
!emphasized: "(" prompt ")"
| "(" prompt ":" prompt ")"
| "[" prompt "]"
scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER "]"
+alternate: "[" prompt ("|" prompt)+ "]"
WHITESPACE: /\s+/
-plain: /([^\\\[\]():]|\\.)+/
+plain: /([^\\\[\]():|]|\\.)+/
%import common.SIGNED_NUMBER -> NUMBER
""")
@@ -59,6 +60,8 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
tree.children[-1] *= steps
tree.children[-1] = min(steps, int(tree.children[-1]))
l.append(tree.children[-1])
+ def alternate(self, tree):
+ l.extend(range(1, steps+1))
CollectSteps().visit(tree)
return sorted(set(l))
@@ -67,6 +70,8 @@ def get_learned_conditioning_prompt_schedules(prompts, steps):
def scheduled(self, args):
before, after, _, when = args
yield before or () if step <= when else after
+ def alternate(self, args):
+ yield next(args[(step - 1)%len(args)])
def start(self, args):
def flatten(x):
if type(x) == str:
From 01f8cb44474e454903c11718e6a4f33dbde34bb8 Mon Sep 17 00:00:00 2001
From: Greendayle
Date: Sat, 8 Oct 2022 18:02:56 +0200
Subject: [PATCH 178/460] made deepdanbooru optional, added to readme,
automatic download of deepbooru model
---
README.md | 2 ++
launch.py | 4 ++++
modules/deepbooru.py | 20 ++++++++++----------
modules/shared.py | 1 +
modules/ui.py | 19 ++++++++++++-------
requirements.txt | 3 ---
requirements_versions.txt | 3 ---
7 files changed, 29 insertions(+), 23 deletions(-)
diff --git a/README.md b/README.md
index ef9b5e313..6cd7a1f9d 100644
--- a/README.md
+++ b/README.md
@@ -66,6 +66,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- separate prompts using uppercase `AND`
- also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2`
- No token limit for prompts (original stable diffusion lets you use up to 75 tokens)
+- DeepDanbooru integration, creates danbooru style tags for anime prompts (add --deepdanbooru to commandline args)
## Installation and Running
Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
@@ -123,4 +124,5 @@ The documentation was moved from this README over to the project's [wiki](https:
- Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot
- CLIP interrogator idea and borrowing some code - https://github.com/pharmapsychotic/clip-interrogator
- Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user.
+- DeepDanbooru - interrogator for anime diffusors https://github.com/KichangKim/DeepDanbooru
- (You)
diff --git a/launch.py b/launch.py
index 61f62096c..d46426eb3 100644
--- a/launch.py
+++ b/launch.py
@@ -33,6 +33,7 @@ def extract_arg(args, name):
args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test')
xformers = '--xformers' in args
+deepdanbooru = '--deepdanbooru' in args
def repo_dir(name):
@@ -132,6 +133,9 @@ if not is_installed("xformers") and xformers:
elif platform.system() == "Linux":
run_pip("install xformers", "xformers")
+if not is_installed("deepdanbooru") and deepdanbooru:
+ run_pip("install git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru")
+
os.makedirs(dir_repos, exist_ok=True)
git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
diff --git a/modules/deepbooru.py b/modules/deepbooru.py
index 781b22492..7e3c06182 100644
--- a/modules/deepbooru.py
+++ b/modules/deepbooru.py
@@ -9,16 +9,16 @@ def _load_tf_and_return_tags(pil_image, threshold):
import numpy as np
this_folder = os.path.dirname(__file__)
- model_path = os.path.join(this_folder, '..', 'models', 'deepbooru', 'deepdanbooru-v3-20211112-sgd-e28')
-
- model_good = False
- for path_candidate in [model_path, os.path.dirname(model_path)]:
- if os.path.exists(os.path.join(path_candidate, 'project.json')):
- model_path = path_candidate
- model_good = True
- if not model_good:
- return ("Download https://github.com/KichangKim/DeepDanbooru/releases/download/v3-20211112-sgd-e28/"
- "deepdanbooru-v3-20211112-sgd-e28.zip unpack and put into models/deepbooru")
+ model_path = os.path.abspath(os.path.join(this_folder, '..', 'models', 'deepbooru'))
+ if not os.path.exists(os.path.join(model_path, 'project.json')):
+ # there is no point importing these every time
+ import zipfile
+ from basicsr.utils.download_util import load_file_from_url
+ load_file_from_url(r"https://github.com/KichangKim/DeepDanbooru/releases/download/v3-20211112-sgd-e28/deepdanbooru-v3-20211112-sgd-e28.zip",
+ model_path)
+ with zipfile.ZipFile(os.path.join(model_path, "deepdanbooru-v3-20211112-sgd-e28.zip"), "r") as zip_ref:
+ zip_ref.extractall(model_path)
+ os.remove(os.path.join(model_path, "deepdanbooru-v3-20211112-sgd-e28.zip"))
tags = dd.project.load_tags_from_project(model_path)
model = dd.project.load_model_from_project(
diff --git a/modules/shared.py b/modules/shared.py
index 02cb27228..c87b726e7 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -44,6 +44,7 @@ parser.add_argument("--scunet-models-path", type=str, help="Path to directory wi
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
+parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
diff --git a/modules/ui.py b/modules/ui.py
index 30583fe93..c5c11c3c9 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -23,9 +23,10 @@ import gradio.utils
import gradio.routes
from modules import sd_hijack
-from modules.deepbooru import get_deepbooru_tags
from modules.paths import script_path
from modules.shared import opts, cmd_opts
+if cmd_opts.deepdanbooru:
+ from modules.deepbooru import get_deepbooru_tags
import modules.shared as shared
from modules.sd_samplers import samplers, samplers_for_img2img
from modules.sd_hijack import model_hijack
@@ -437,7 +438,10 @@ def create_toprow(is_img2img):
with gr.Row(scale=1):
if is_img2img:
interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate")
- deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru")
+ if cmd_opts.deepdanbooru:
+ deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru")
+ else:
+ deepbooru = None
else:
interrogate = None
deepbooru = None
@@ -782,11 +786,12 @@ def create_ui(wrap_gradio_gpu_call):
outputs=[img2img_prompt],
)
- img2img_deepbooru.click(
- fn=interrogate_deepbooru,
- inputs=[init_img],
- outputs=[img2img_prompt],
- )
+ if cmd_opts.deepdanbooru:
+ img2img_deepbooru.click(
+ fn=interrogate_deepbooru,
+ inputs=[init_img],
+ outputs=[img2img_prompt],
+ )
save.click(
fn=wrap_gradio_call(save_files),
diff --git a/requirements.txt b/requirements.txt
index cd3953c6c..81641d68f 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -23,7 +23,4 @@ resize-right
torchdiffeq
kornia
lark
-deepdanbooru
-tensorflow
-tensorflow-io
functorch
diff --git a/requirements_versions.txt b/requirements_versions.txt
index 2d256a54f..fec3e9d5b 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -22,7 +22,4 @@ resize-right==0.0.2
torchdiffeq==0.2.3
kornia==0.6.7
lark==1.1.2
-git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26#egg=deepdanbooru[tensorflow]
-tensorflow==2.10.0
-tensorflow-io==0.27.0
functorch==0.2.1
From f9c5da159245bb1e7603b3c8b9e0703bcb1c2ff5 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 8 Oct 2022 19:05:19 +0300
Subject: [PATCH 179/460] add fallback for xformers_attnblock_forward
---
modules/sd_hijack_optimizations.py | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index d23d733b0..dba21192b 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -211,6 +211,7 @@ def cross_attention_attnblock_forward(self, x):
return h3
def xformers_attnblock_forward(self, x):
+ try:
h_ = x
h_ = self.norm(h_)
q1 = self.q(h_).contiguous()
@@ -218,4 +219,6 @@ def xformers_attnblock_forward(self, x):
v = self.v(h_).contiguous()
out = xformers.ops.memory_efficient_attention(q1, k1, v)
out = self.proj_out(out)
- return x+out
+ return x + out
+ except NotImplementedError:
+ return cross_attention_attnblock_forward(self, x)
From 3061cdb7b610d4ba7f1ea695d9d6364b591e5bc7 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 8 Oct 2022 19:22:15 +0300
Subject: [PATCH 180/460] add --force-enable-xformers option and also add
messages to console regarding cross attention optimizations
---
modules/sd_hijack.py | 6 +++++-
modules/shared.py | 1 +
2 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index a3e374f09..307cc67dd 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -22,12 +22,16 @@ def apply_optimizations():
undo_optimizations()
ldm.modules.diffusionmodules.model.nonlinearity = silu
- if cmd_opts.xformers and shared.xformers_available and torch.version.cuda and torch.cuda.get_device_capability(shared.device) == (8, 6):
+
+ if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and torch.cuda.get_device_capability(shared.device) == (8, 6)):
+ print("Applying xformers cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward
elif cmd_opts.opt_split_attention_v1:
+ print("Applying v1 cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1
elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()):
+ print("Applying cross attention optimization.")
ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward
ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward
diff --git a/modules/shared.py b/modules/shared.py
index 02cb27228..8f9412262 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -44,6 +44,7 @@ parser.add_argument("--scunet-models-path", type=str, help="Path to directory wi
parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR'))
parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR'))
parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers")
+parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work")
parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.")
parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization")
parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find")
From 15c4278f1a18b8104e135dd82690d10cff39a2e7 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 8 Oct 2022 17:50:01 +0100
Subject: [PATCH 181/460] TI preprocess wording
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
I had to check the code to work out what splitting was 🤷🏿
---
modules/ui.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index ffd75f6ac..d52d74c6d 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -980,9 +980,9 @@ def create_ui(wrap_gradio_gpu_call):
process_dst = gr.Textbox(label='Destination directory')
with gr.Row():
- process_flip = gr.Checkbox(label='Flip')
- process_split = gr.Checkbox(label='Split into two')
- process_caption = gr.Checkbox(label='Add caption')
+ process_flip = gr.Checkbox(label='Create flipped copies')
+ process_split = gr.Checkbox(label='Split oversized images into two')
+ process_caption = gr.Checkbox(label='Use CLIP caption as filename')
with gr.Row():
with gr.Column(scale=3):
From b458fa48fe5734a872bca83061d702609cb52940 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sat, 8 Oct 2022 17:56:28 +0100
Subject: [PATCH 182/460] Update ui.py
---
modules/ui.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/ui.py b/modules/ui.py
index d52d74c6d..b09359aae 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -982,7 +982,7 @@ def create_ui(wrap_gradio_gpu_call):
with gr.Row():
process_flip = gr.Checkbox(label='Create flipped copies')
process_split = gr.Checkbox(label='Split oversized images into two')
- process_caption = gr.Checkbox(label='Use CLIP caption as filename')
+ process_caption = gr.Checkbox(label='Use BLIP caption as filename')
with gr.Row():
with gr.Column(scale=3):
From 1371d7608b402d6f15c200ec2f5fde4579836a05 Mon Sep 17 00:00:00 2001
From: Fampai
Date: Sat, 8 Oct 2022 14:28:22 -0400
Subject: [PATCH 183/460] Added ability to ignore last n layers in
FrozenCLIPEmbedder
---
modules/sd_hijack.py | 11 +++++++++--
modules/shared.py | 1 +
2 files changed, 10 insertions(+), 2 deletions(-)
diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py
index 307cc67dd..f12a9696f 100644
--- a/modules/sd_hijack.py
+++ b/modules/sd_hijack.py
@@ -281,8 +281,15 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module):
remade_batch_tokens_of_same_length = [x + [self.wrapped.tokenizer.eos_token_id] * (target_token_count - len(x)) for x in remade_batch_tokens]
tokens = torch.asarray(remade_batch_tokens_of_same_length).to(device)
- outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids)
- z = outputs.last_hidden_state
+
+ tmp = -opts.CLIP_ignore_last_layers
+ if (opts.CLIP_ignore_last_layers == 0):
+ outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids)
+ z = outputs.last_hidden_state
+ else:
+ outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids, output_hidden_states=tmp)
+ z = outputs.hidden_states[tmp]
+ z = self.wrapped.transformer.text_model.final_layer_norm(z)
# restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise
batch_multipliers_of_same_length = [x + [1.0] * (target_token_count - len(x)) for x in batch_multipliers]
diff --git a/modules/shared.py b/modules/shared.py
index 8f9412262..af8dc7447 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -225,6 +225,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), {
"use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."),
"enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"),
"filter_nsfw": OptionInfo(False, "Filter NSFW content"),
+ 'CLIP_ignore_last_layers': OptionInfo(0, "Ignore last layers of CLIP model", gr.Slider, {"minimum": 0, "maximum": 5, "step": 1}),
"random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}),
}))
From e6e42f98df2c928c4f49351ad6b466387ce87d42 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 8 Oct 2022 19:25:10 +0300
Subject: [PATCH 184/460] make --force-enable-xformers work without needing
--xformers
---
modules/sd_hijack_optimizations.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index dba21192b..c4396bb9b 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -10,7 +10,7 @@ from einops import rearrange
from modules import shared
-if shared.cmd_opts.xformers:
+if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers:
try:
import xformers.ops
import functorch
From 3b2141c5fb6a3c2b8ab4b1e759a97ead77260129 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 8 Oct 2022 22:21:15 +0300
Subject: [PATCH 185/460] add 'Ignore last layers of CLIP model' option as a
parameter to the infotext
---
modules/processing.py | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/modules/processing.py b/modules/processing.py
index 8240ee270..515fc91a3 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -123,6 +123,7 @@ class Processed:
self.index_of_first_image = index_of_first_image
self.styles = p.styles
self.job_timestamp = state.job_timestamp
+ self.clip_skip = opts.CLIP_ignore_last_layers
self.eta = p.eta
self.ddim_discretize = p.ddim_discretize
@@ -141,7 +142,6 @@ class Processed:
self.all_subseeds = all_subseeds or [self.subseed]
self.infotexts = infotexts or [info]
-
def js(self):
obj = {
"prompt": self.prompt,
@@ -170,6 +170,7 @@ class Processed:
"infotexts": self.infotexts,
"styles": self.styles,
"job_timestamp": self.job_timestamp,
+ "clip_skip": self.clip_skip,
}
return json.dumps(obj)
@@ -267,6 +268,8 @@ def fix_seed(p):
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0):
index = position_in_batch + iteration * p.batch_size
+ clip_skip = getattr(p, 'clip_skip', opts.CLIP_ignore_last_layers)
+
generation_params = {
"Steps": p.steps,
"Sampler": sd_samplers.samplers[p.sampler_index].name,
@@ -282,6 +285,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
"Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None),
"Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta),
+ "Clip skip": None if clip_skip==0 else clip_skip,
}
generation_params.update(p.extra_generation_params)
From 610a7f4e1480c0ffeedb2a07dc27ae86bf03c3a8 Mon Sep 17 00:00:00 2001
From: Edouard Leurent
Date: Sat, 8 Oct 2022 16:49:43 +0100
Subject: [PATCH 186/460] Break after finding the local directory of stable
diffusion
Otherwise, we may override it with one of the next two path (. or ..) if it is present there, and then the local paths of other modules (taming transformers, codeformers, etc.) wont be found in sd_path/../.
Fix https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/1085
---
modules/paths.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/modules/paths.py b/modules/paths.py
index 606f7d666..0519caa0a 100644
--- a/modules/paths.py
+++ b/modules/paths.py
@@ -12,6 +12,7 @@ possible_sd_paths = [os.path.join(script_path, 'repositories/stable-diffusion'),
for possible_sd_path in possible_sd_paths:
if os.path.exists(os.path.join(possible_sd_path, 'ldm/models/diffusion/ddpm.py')):
sd_path = os.path.abspath(possible_sd_path)
+ break
assert sd_path is not None, "Couldn't find Stable Diffusion in any of: " + str(possible_sd_paths)
From 432782163ae53e605470bcefc9a6f796c4556912 Mon Sep 17 00:00:00 2001
From: Aidan Holland
Date: Sat, 8 Oct 2022 15:12:24 -0400
Subject: [PATCH 187/460] chore: Fix typos
---
README.md | 2 +-
javascript/imageviewer.js | 2 +-
modules/interrogate.py | 4 ++--
modules/processing.py | 2 +-
modules/scunet_model_arch.py | 4 ++--
modules/sd_models.py | 4 ++--
modules/sd_samplers.py | 4 ++--
modules/shared.py | 6 +++---
modules/swinir_model_arch.py | 2 +-
modules/ui.py | 4 ++--
10 files changed, 17 insertions(+), 17 deletions(-)
diff --git a/README.md b/README.md
index ef9b5e313..63dd0c187 100644
--- a/README.md
+++ b/README.md
@@ -34,7 +34,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web
- Sampling method selection
- Interrupt processing at any time
- 4GB video card support (also reports of 2GB working)
-- Correct seeds for batches
+- Correct seeds for batches
- Prompt length validation
- get length of prompt in tokens as you type
- get a warning after generation if some text was truncated
diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js
index 4c0e8f4bb..6a00c0da4 100644
--- a/javascript/imageviewer.js
+++ b/javascript/imageviewer.js
@@ -95,7 +95,7 @@ function showGalleryImage(){
e.addEventListener('click', function (evt) {
if(!opts.js_modal_lightbox) return;
- modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initialy_zoomed)
+ modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed)
showModal(evt)
},true);
}
diff --git a/modules/interrogate.py b/modules/interrogate.py
index eed87144f..635e266e7 100644
--- a/modules/interrogate.py
+++ b/modules/interrogate.py
@@ -140,11 +140,11 @@ class InterrogateModels:
res = caption
- cilp_image = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(shared.device)
+ clip_image = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(shared.device)
precision_scope = torch.autocast if shared.cmd_opts.precision == "autocast" else contextlib.nullcontext
with torch.no_grad(), precision_scope("cuda"):
- image_features = self.clip_model.encode_image(cilp_image).type(self.dtype)
+ image_features = self.clip_model.encode_image(clip_image).type(self.dtype)
image_features /= image_features.norm(dim=-1, keepdim=True)
diff --git a/modules/processing.py b/modules/processing.py
index 515fc91a3..31220881e 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -386,7 +386,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
if state.interrupted or state.skipped:
- # if we are interruped, sample returns just noise
+ # if we are interrupted, sample returns just noise
# use the image collected previously in sampler loop
samples_ddim = shared.state.current_latent
diff --git a/modules/scunet_model_arch.py b/modules/scunet_model_arch.py
index 972a2639a..43ca8d36f 100644
--- a/modules/scunet_model_arch.py
+++ b/modules/scunet_model_arch.py
@@ -40,7 +40,7 @@ class WMSA(nn.Module):
Returns:
attn_mask: should be (1 1 w p p),
"""
- # supporting sqaure.
+ # supporting square.
attn_mask = torch.zeros(h, w, p, p, p, p, dtype=torch.bool, device=self.relative_position_params.device)
if self.type == 'W':
return attn_mask
@@ -65,7 +65,7 @@ class WMSA(nn.Module):
x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size)
h_windows = x.size(1)
w_windows = x.size(2)
- # sqaure validation
+ # square validation
# assert h_windows == w_windows
x = rearrange(x, 'b w1 w2 p1 p2 c -> b (w1 w2) (p1 p2) c', p1=self.window_size, p2=self.window_size)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 9409d0707..a09866ce6 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -147,7 +147,7 @@ def load_model_weights(model, checkpoint_file, sd_model_hash):
model.first_stage_model.load_state_dict(vae_dict)
model.sd_model_hash = sd_model_hash
- model.sd_model_checkpint = checkpoint_file
+ model.sd_model_checkpoint = checkpoint_file
def load_model():
@@ -175,7 +175,7 @@ def reload_model_weights(sd_model, info=None):
from modules import lowvram, devices, sd_hijack
checkpoint_info = info or select_checkpoint()
- if sd_model.sd_model_checkpint == checkpoint_info.filename:
+ if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py
index eade0dbbd..6e743f7e9 100644
--- a/modules/sd_samplers.py
+++ b/modules/sd_samplers.py
@@ -181,7 +181,7 @@ class VanillaStableDiffusionSampler:
self.initialize(p)
- # existing code fails with cetain step counts, like 9
+ # existing code fails with certain step counts, like 9
try:
self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False)
except Exception:
@@ -204,7 +204,7 @@ class VanillaStableDiffusionSampler:
steps = steps or p.steps
- # existing code fails with cetin step counts, like 9
+ # existing code fails with certain step counts, like 9
try:
samples_ddim, _ = self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta)
except Exception:
diff --git a/modules/shared.py b/modules/shared.py
index af8dc7447..2dc092d68 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -141,9 +141,9 @@ class OptionInfo:
self.section = None
-def options_section(section_identifer, options_dict):
+def options_section(section_identifier, options_dict):
for k, v in options_dict.items():
- v.section = section_identifer
+ v.section = section_identifier
return options_dict
@@ -246,7 +246,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
- "js_modal_lightbox_initialy_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
+ "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
"show_progress_in_title": OptionInfo(True, "Show generation progress in window title."),
}))
diff --git a/modules/swinir_model_arch.py b/modules/swinir_model_arch.py
index 461fb354c..863f42db6 100644
--- a/modules/swinir_model_arch.py
+++ b/modules/swinir_model_arch.py
@@ -166,7 +166,7 @@ class SwinTransformerBlock(nn.Module):
Args:
dim (int): Number of input channels.
- input_resolution (tuple[int]): Input resulotion.
+ input_resolution (tuple[int]): Input resolution.
num_heads (int): Number of attention heads.
window_size (int): Window size.
shift_size (int): Shift size for SW-MSA.
diff --git a/modules/ui.py b/modules/ui.py
index b09359aae..b51af1214 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -38,7 +38,7 @@ from modules import prompt_parser
from modules.images import save_image
import modules.textual_inversion.ui
-# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI
+# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
mimetypes.init()
mimetypes.add_type('application/javascript', '.js')
@@ -102,7 +102,7 @@ def save_files(js_data, images, index):
import csv
filenames = []
- #quick dictionary to class object conversion. Its neccesary due apply_filename_pattern requiring it
+ #quick dictionary to class object conversion. Its necessary due apply_filename_pattern requiring it
class MyObject:
def __init__(self, d=None):
if d is not None:
From 050a6a798cec90ae2f881c2ddd3f0221e69907dc Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sat, 8 Oct 2022 23:26:48 +0300
Subject: [PATCH 188/460] support loading .yaml config with same name as model
support EMA weights in processing (????)
---
modules/processing.py | 2 +-
modules/sd_models.py | 30 +++++++++++++++++++++++-------
2 files changed, 24 insertions(+), 8 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index 31220881e..4fea6d567 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -347,7 +347,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
infotexts = []
output_images = []
- with torch.no_grad():
+ with torch.no_grad(), p.sd_model.ema_scope():
with devices.autocast():
p.init(all_prompts, all_seeds, all_subseeds)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index a09866ce6..cb3982b16 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -14,7 +14,7 @@ from modules.paths import models_path
model_dir = "Stable-diffusion"
model_path = os.path.abspath(os.path.join(models_path, model_dir))
-CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name'])
+CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config'])
checkpoints_list = {}
try:
@@ -63,14 +63,20 @@ def list_models():
if os.path.exists(cmd_ckpt):
h = model_hash(cmd_ckpt)
title, short_model_name = modeltitle(cmd_ckpt, h)
- checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name)
+ checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name, shared.cmd_opts.config)
shared.opts.data['sd_model_checkpoint'] = title
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
for filename in model_list:
h = model_hash(filename)
title, short_model_name = modeltitle(filename, h)
- checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name)
+
+ basename, _ = os.path.splitext(filename)
+ config = basename + ".yaml"
+ if not os.path.exists(config):
+ config = shared.cmd_opts.config
+
+ checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name, config)
def get_closet_checkpoint_match(searchString):
@@ -116,7 +122,10 @@ def select_checkpoint():
return checkpoint_info
-def load_model_weights(model, checkpoint_file, sd_model_hash):
+def load_model_weights(model, checkpoint_info):
+ checkpoint_file = checkpoint_info.filename
+ sd_model_hash = checkpoint_info.hash
+
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
pl_sd = torch.load(checkpoint_file, map_location="cpu")
@@ -148,15 +157,19 @@ def load_model_weights(model, checkpoint_file, sd_model_hash):
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
+ model.sd_checkpoint_info = checkpoint_info
def load_model():
from modules import lowvram, sd_hijack
checkpoint_info = select_checkpoint()
- sd_config = OmegaConf.load(shared.cmd_opts.config)
+ if checkpoint_info.config != shared.cmd_opts.config:
+ print(f"Loading config from: {shared.cmd_opts.config}")
+
+ sd_config = OmegaConf.load(checkpoint_info.config)
sd_model = instantiate_from_config(sd_config.model)
- load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash)
+ load_model_weights(sd_model, checkpoint_info)
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram)
@@ -178,6 +191,9 @@ def reload_model_weights(sd_model, info=None):
if sd_model.sd_model_checkpoint == checkpoint_info.filename:
return
+ if sd_model.sd_checkpoint_info.config != checkpoint_info.config:
+ return load_model()
+
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
else:
@@ -185,7 +201,7 @@ def reload_model_weights(sd_model, info=None):
sd_hijack.model_hijack.undo_hijack(sd_model)
- load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash)
+ load_model_weights(sd_model, checkpoint_info)
sd_hijack.model_hijack.hijack(sd_model)
From 5841990b0df04906da7321beef6f7f7902b7d57b Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sun, 9 Oct 2022 05:38:38 +0100
Subject: [PATCH 189/460] Update textual_inversion.py
---
.../textual_inversion/textual_inversion.py | 25 ++++++++++++++++---
1 file changed, 22 insertions(+), 3 deletions(-)
diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py
index cd9f34984..f63160208 100644
--- a/modules/textual_inversion/textual_inversion.py
+++ b/modules/textual_inversion/textual_inversion.py
@@ -7,6 +7,9 @@ import tqdm
import html
import datetime
+from PIL import Image, PngImagePlugin
+import base64
+from io import BytesIO
from modules import shared, devices, sd_hijack, processing, sd_models
import modules.textual_inversion.dataset
@@ -80,7 +83,15 @@ class EmbeddingDatabase:
def process_file(path, filename):
name = os.path.splitext(filename)[0]
- data = torch.load(path, map_location="cpu")
+ data = []
+
+ if filename.upper().endswith('.PNG'):
+ embed_image = Image.open(path)
+ if 'sd-embedding' in embed_image.text:
+ embeddingData = base64.b64decode(embed_image.text['sd-embedding'])
+ data = torch.load(BytesIO(embeddingData), map_location="cpu")
+ else:
+ data = torch.load(path, map_location="cpu")
# textual inversion embeddings
if 'string_to_param' in data:
@@ -156,7 +167,7 @@ def create_embedding(name, num_vectors_per_token, init_text='*'):
return fn
-def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps, create_image_every, save_embedding_every, template_file):
+def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding):
assert embedding_name, 'embedding not selected'
shared.state.textinfo = "Initializing textual inversion training..."
@@ -244,7 +255,15 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps,
image = processed.images[0]
shared.state.current_image = image
- image.save(last_saved_image)
+
+ if save_image_with_stored_embedding:
+ info = PngImagePlugin.PngInfo()
+ info.add_text("sd-embedding", base64.b64encode(open(last_saved_file,'rb').read()))
+ image.save(last_saved_image, "PNG", pnginfo=info)
+ else:
+ image.save(last_saved_image)
+
+
last_saved_image += f", prompt: {text}"
From cd8673bd9b2e59bddefee8d307340d643695fe11 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sun, 9 Oct 2022 05:40:57 +0100
Subject: [PATCH 190/460] add embed embedding to ui
---
modules/ui.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/modules/ui.py b/modules/ui.py
index b51af1214..a59832041 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1001,7 +1001,8 @@ def create_ui(wrap_gradio_gpu_call):
steps = gr.Number(label='Max steps', value=100000, precision=0)
create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0)
save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0)
-
+ save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True)
+
with gr.Row():
with gr.Column(scale=2):
gr.HTML(value="")
@@ -1063,6 +1064,7 @@ def create_ui(wrap_gradio_gpu_call):
create_image_every,
save_embedding_every,
template_file,
+ save_image_with_stored_embedding,
],
outputs=[
ti_output,
From c77c89cc83c618472ad352cf8a28fde28c3a1377 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 9 Oct 2022 10:23:31 +0300
Subject: [PATCH 191/460] make main model loading and model merger use the same
code
---
modules/extras.py | 6 +++---
modules/sd_models.py | 14 +++++++++-----
2 files changed, 12 insertions(+), 8 deletions(-)
diff --git a/modules/extras.py b/modules/extras.py
index 1d9e64e55..ef6e6de7a 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -169,9 +169,9 @@ def run_modelmerger(primary_model_name, secondary_model_name, interp_method, int
print(f"Loading {secondary_model_info.filename}...")
secondary_model = torch.load(secondary_model_info.filename, map_location='cpu')
-
- theta_0 = primary_model['state_dict']
- theta_1 = secondary_model['state_dict']
+
+ theta_0 = sd_models.get_state_dict_from_checkpoint(primary_model)
+ theta_1 = sd_models.get_state_dict_from_checkpoint(secondary_model)
theta_funcs = {
"Weighted Sum": weighted_sum,
diff --git a/modules/sd_models.py b/modules/sd_models.py
index cb3982b16..18fb8c2ed 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -122,6 +122,13 @@ def select_checkpoint():
return checkpoint_info
+def get_state_dict_from_checkpoint(pl_sd):
+ if "state_dict" in pl_sd:
+ return pl_sd["state_dict"]
+
+ return pl_sd
+
+
def load_model_weights(model, checkpoint_info):
checkpoint_file = checkpoint_info.filename
sd_model_hash = checkpoint_info.hash
@@ -131,11 +138,8 @@ def load_model_weights(model, checkpoint_info):
pl_sd = torch.load(checkpoint_file, map_location="cpu")
if "global_step" in pl_sd:
print(f"Global Step: {pl_sd['global_step']}")
-
- if "state_dict" in pl_sd:
- sd = pl_sd["state_dict"]
- else:
- sd = pl_sd
+
+ sd = get_state_dict_from_checkpoint(pl_sd)
model.load_state_dict(sd, strict=False)
From 4e569fd888f8e3c5632a072d51abbb6e4d17abd6 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 9 Oct 2022 10:31:47 +0300
Subject: [PATCH 192/460] fixed incorrect message about loading config; thanks
anon!
---
modules/sd_models.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 18fb8c2ed..2101b18da 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -169,7 +169,7 @@ def load_model():
checkpoint_info = select_checkpoint()
if checkpoint_info.config != shared.cmd_opts.config:
- print(f"Loading config from: {shared.cmd_opts.config}")
+ print(f"Loading config from: {checkpoint_info.config}")
sd_config = OmegaConf.load(checkpoint_info.config)
sd_model = instantiate_from_config(sd_config.model)
From 5ab7e88d9b0bb0125af9f7237242a00a93360ce5 Mon Sep 17 00:00:00 2001
From: aoirusann <82883326+aoirusann@users.noreply.github.com>
Date: Sat, 8 Oct 2022 13:09:29 +0800
Subject: [PATCH 193/460] Add `Download` & `Download as zip`
---
modules/ui.py | 39 ++++++++++++++++++++++++++++++++++-----
1 file changed, 34 insertions(+), 5 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index b51af1214..fe7f10a73 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -98,9 +98,10 @@ def send_gradio_gallery_to_image(x):
return image_from_url_text(x[0])
-def save_files(js_data, images, index):
+def save_files(js_data, images, do_make_zip, index):
import csv
filenames = []
+ fullfns = []
#quick dictionary to class object conversion. Its necessary due apply_filename_pattern requiring it
class MyObject:
@@ -141,10 +142,22 @@ def save_files(js_data, images, index):
filename = os.path.relpath(fullfn, path)
filenames.append(filename)
+ fullfns.append(fullfn)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
- return '', '', plaintext_to_html(f"Saved: {filenames[0]}")
+ # Make Zip
+ if do_make_zip:
+ zip_filepath = os.path.join(path, "images.zip")
+
+ from zipfile import ZipFile
+ with ZipFile(zip_filepath, "w") as zip_file:
+ for i in range(len(fullfns)):
+ with open(fullfns[i], mode="rb") as f:
+ zip_file.writestr(filenames[i], f.read())
+ fullfns.insert(0, zip_filepath)
+
+ return fullfns, '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func, extra_outputs=None):
@@ -521,6 +534,12 @@ def create_ui(wrap_gradio_gpu_call):
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_txt2img_folder = gr.Button(folder_symbol, elem_id=button_id)
+ with gr.Row():
+ do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False)
+
+ with gr.Row():
+ download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False)
+
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
@@ -570,13 +589,15 @@ def create_ui(wrap_gradio_gpu_call):
save.click(
fn=wrap_gradio_call(save_files),
- _js="(x, y, z) => [x, y, selected_gallery_index()]",
+ _js="(x, y, z, w) => [x, y, z, selected_gallery_index()]",
inputs=[
generation_info,
txt2img_gallery,
+ do_make_zip,
html_info,
],
outputs=[
+ download_files,
html_info,
html_info,
html_info,
@@ -701,6 +722,12 @@ def create_ui(wrap_gradio_gpu_call):
button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder'
open_img2img_folder = gr.Button(folder_symbol, elem_id=button_id)
+ with gr.Row():
+ do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False)
+
+ with gr.Row():
+ download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False)
+
with gr.Group():
html_info = gr.HTML()
generation_info = gr.Textbox(visible=False)
@@ -776,13 +803,15 @@ def create_ui(wrap_gradio_gpu_call):
save.click(
fn=wrap_gradio_call(save_files),
- _js="(x, y, z) => [x, y, selected_gallery_index()]",
+ _js="(x, y, z, w) => [x, y, z, selected_gallery_index()]",
inputs=[
generation_info,
img2img_gallery,
- html_info
+ do_make_zip,
+ html_info,
],
outputs=[
+ download_files,
html_info,
html_info,
html_info,
From 14192c5b207b16b1ec7a4c9c4ea538d1a6811a4d Mon Sep 17 00:00:00 2001
From: aoirusann
Date: Sun, 9 Oct 2022 13:01:10 +0800
Subject: [PATCH 194/460] Support `Download` for txt files.
---
modules/images.py | 39 +++++++++++++++++++++++++++++++++++++--
modules/ui.py | 5 ++++-
2 files changed, 41 insertions(+), 3 deletions(-)
diff --git a/modules/images.py b/modules/images.py
index 29c5ee249..c0a906762 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -349,6 +349,38 @@ def get_next_sequence_number(path, basename):
def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None):
+ '''Save an image.
+
+ Args:
+ image (`PIL.Image`):
+ The image to be saved.
+ path (`str`):
+ The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory.
+ basename (`str`):
+ The base filename which will be applied to `filename pattern`.
+ seed, prompt, short_filename,
+ extension (`str`):
+ Image file extension, default is `png`.
+ pngsectionname (`str`):
+ Specify the name of the section which `info` will be saved in.
+ info (`str` or `PngImagePlugin.iTXt`):
+ PNG info chunks.
+ existing_info (`dict`):
+ Additional PNG info. `existing_info == {pngsectionname: info, ...}`
+ no_prompt:
+ TODO I don't know its meaning.
+ p (`StableDiffusionProcessing`)
+ forced_filename (`str`):
+ If specified, `basename` and filename pattern will be ignored.
+ save_to_dirs (bool):
+ If true, the image will be saved into a subdirectory of `path`.
+
+ Returns: (fullfn, txt_fullfn)
+ fullfn (`str`):
+ The full path of the saved imaged.
+ txt_fullfn (`str` or None):
+ If a text file is saved for this image, this will be its full path. Otherwise None.
+ '''
if short_filename or prompt is None or seed is None:
file_decoration = ""
elif opts.save_to_dirs:
@@ -424,7 +456,10 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
piexif.insert(exif_bytes(), fullfn_without_extension + ".jpg")
if opts.save_txt and info is not None:
- with open(f"{fullfn_without_extension}.txt", "w", encoding="utf8") as file:
+ txt_fullfn = f"{fullfn_without_extension}.txt"
+ with open(txt_fullfn, "w", encoding="utf8") as file:
file.write(info + "\n")
+ else:
+ txt_fullfn = None
- return fullfn
+ return fullfn, txt_fullfn
diff --git a/modules/ui.py b/modules/ui.py
index fe7f10a73..debd8873b 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -138,11 +138,14 @@ def save_files(js_data, images, do_make_zip, index):
is_grid = image_index < p.index_of_first_image
i = 0 if is_grid else (image_index - p.index_of_first_image)
- fullfn = save_image(image, path, "", seed=p.all_seeds[i], prompt=p.all_prompts[i], extension=extension, info=p.infotexts[image_index], grid=is_grid, p=p, save_to_dirs=save_to_dirs)
+ fullfn, txt_fullfn = save_image(image, path, "", seed=p.all_seeds[i], prompt=p.all_prompts[i], extension=extension, info=p.infotexts[image_index], grid=is_grid, p=p, save_to_dirs=save_to_dirs)
filename = os.path.relpath(fullfn, path)
filenames.append(filename)
fullfns.append(fullfn)
+ if txt_fullfn:
+ filenames.append(os.path.basename(txt_fullfn))
+ fullfns.append(txt_fullfn)
writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]])
From 122d42687b97ec4df4c2a8c335d2de385cd1f1a1 Mon Sep 17 00:00:00 2001
From: Fampai
Date: Sat, 8 Oct 2022 22:37:35 -0400
Subject: [PATCH 195/460] Fix VRAM Issue by only loading in hypernetwork when
selected in settings
---
modules/hypernetwork.py | 27 +++++++++++++++++----------
modules/sd_hijack_optimizations.py | 6 +++---
modules/shared.py | 7 ++-----
webui.py | 3 +++
4 files changed, 25 insertions(+), 18 deletions(-)
diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py
index 7f0622428..19f1c2270 100644
--- a/modules/hypernetwork.py
+++ b/modules/hypernetwork.py
@@ -40,27 +40,34 @@ class Hypernetwork:
self.layers[size] = (HypernetworkModule(size, sd[0]), HypernetworkModule(size, sd[1]))
-def load_hypernetworks(path):
+def list_hypernetworks(path):
res = {}
-
for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True):
- try:
- hn = Hypernetwork(filename)
- res[hn.name] = hn
- except Exception:
- print(f"Error loading hypernetwork {filename}", file=sys.stderr)
- print(traceback.format_exc(), file=sys.stderr)
-
+ name = os.path.splitext(os.path.basename(filename))[0]
+ res[name] = filename
return res
+def load_hypernetwork(filename):
+ print(f"Loading hypernetwork {filename}")
+ path = shared.hypernetworks.get(filename, None)
+ if (path is not None):
+ try:
+ shared.loaded_hypernetwork = Hypernetwork(path)
+ except Exception:
+ print(f"Error loading hypernetwork {path}", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+ else:
+ shared.loaded_hypernetwork = None
+
+
def attention_CrossAttention_forward(self, x, context=None, mask=None):
h = self.heads
q = self.to_q(x)
context = default(context, x)
- hypernetwork = shared.selected_hypernetwork()
+ hypernetwork = shared.loaded_hypernetwork
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
if hypernetwork_layers is not None:
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index c4396bb9b..634fb4b24 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -28,7 +28,7 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None):
q_in = self.to_q(x)
context = default(context, x)
- hypernetwork = shared.selected_hypernetwork()
+ hypernetwork = shared.loaded_hypernetwork
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
if hypernetwork_layers is not None:
@@ -68,7 +68,7 @@ def split_cross_attention_forward(self, x, context=None, mask=None):
q_in = self.to_q(x)
context = default(context, x)
- hypernetwork = shared.selected_hypernetwork()
+ hypernetwork = shared.loaded_hypernetwork
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
if hypernetwork_layers is not None:
@@ -132,7 +132,7 @@ def xformers_attention_forward(self, x, context=None, mask=None):
h = self.heads
q_in = self.to_q(x)
context = default(context, x)
- hypernetwork = shared.selected_hypernetwork()
+ hypernetwork = shared.loaded_hypernetwork
hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None)
if hypernetwork_layers is not None:
k_in = self.to_k(hypernetwork_layers[0](context))
diff --git a/modules/shared.py b/modules/shared.py
index b2c76a323..9dce6cb7b 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -79,11 +79,8 @@ parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram
xformers_available = False
config_filename = cmd_opts.ui_settings_file
-hypernetworks = hypernetwork.load_hypernetworks(os.path.join(models_path, 'hypernetworks'))
-
-
-def selected_hypernetwork():
- return hypernetworks.get(opts.sd_hypernetwork, None)
+hypernetworks = hypernetwork.list_hypernetworks(os.path.join(models_path, 'hypernetworks'))
+loaded_hypernetwork = None
class State:
diff --git a/webui.py b/webui.py
index 18de8e165..270584f77 100644
--- a/webui.py
+++ b/webui.py
@@ -82,6 +82,9 @@ modules.scripts.load_scripts(os.path.join(script_path, "scripts"))
shared.sd_model = modules.sd_models.load_model()
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model)))
+loaded_hypernetwork = modules.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork)
+shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork)))
+
def webui():
# make the program just exit at ctrl+c without waiting for anything
From 03e570886f430f39020e504aba057a95f2e62484 Mon Sep 17 00:00:00 2001
From: frostydad <64224601+Cyberes@users.noreply.github.com>
Date: Sat, 8 Oct 2022 18:13:13 -0600
Subject: [PATCH 196/460] Fix incorrect sampler name in output
---
modules/processing.py | 9 ++++++++-
scripts/xy_grid.py | 16 +++++++++-------
2 files changed, 17 insertions(+), 8 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index 4fea6d567..6b8664a07 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -1,3 +1,4 @@
+
import json
import math
import os
@@ -46,6 +47,12 @@ def apply_color_correction(correction, image):
return image
+def get_correct_sampler(p):
+ if isinstance(p, modules.processing.StableDiffusionProcessingTxt2Img):
+ return sd_samplers.samplers
+ elif isinstance(p, modules.processing.StableDiffusionProcessingImg2Img):
+ return sd_samplers.samplers_for_img2img
+
class StableDiffusionProcessing:
def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt="", styles=None, seed=-1, subseed=-1, subseed_strength=0, seed_resize_from_h=-1, seed_resize_from_w=-1, seed_enable_extras=True, sampler_index=0, batch_size=1, n_iter=1, steps=50, cfg_scale=7.0, width=512, height=512, restore_faces=False, tiling=False, do_not_save_samples=False, do_not_save_grid=False, extra_generation_params=None, overlay_images=None, negative_prompt=None, eta=None):
self.sd_model = sd_model
@@ -272,7 +279,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
generation_params = {
"Steps": p.steps,
- "Sampler": sd_samplers.samplers[p.sampler_index].name,
+ "Sampler": get_correct_sampler(p)[p.sampler_index].name,
"CFG scale": p.cfg_scale,
"Seed": all_seeds[index],
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index c0c364df8..26ae2199d 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -11,7 +11,7 @@ import modules.scripts as scripts
import gradio as gr
from modules import images
-from modules.processing import process_images, Processed
+from modules.processing import process_images, Processed, get_correct_sampler
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
import modules.sd_samplers
@@ -56,15 +56,17 @@ def apply_order(p, x, xs):
p.prompt = prompt_tmp + p.prompt
-samplers_dict = {}
-for i, sampler in enumerate(modules.sd_samplers.samplers):
- samplers_dict[sampler.name.lower()] = i
- for alias in sampler.aliases:
- samplers_dict[alias.lower()] = i
+def build_samplers_dict(p):
+ samplers_dict = {}
+ for i, sampler in enumerate(get_correct_sampler(p)):
+ samplers_dict[sampler.name.lower()] = i
+ for alias in sampler.aliases:
+ samplers_dict[alias.lower()] = i
+ return samplers_dict
def apply_sampler(p, x, xs):
- sampler_index = samplers_dict.get(x.lower(), None)
+ sampler_index = build_samplers_dict(p).get(x.lower(), None)
if sampler_index is None:
raise RuntimeError(f"Unknown sampler: {x}")
From ef93acdc731b7a2b3c13651b6de1bce58af989d4 Mon Sep 17 00:00:00 2001
From: frostydad <64224601+Cyberes@users.noreply.github.com>
Date: Sat, 8 Oct 2022 18:15:35 -0600
Subject: [PATCH 197/460] remove line break
---
modules/processing.py | 1 -
1 file changed, 1 deletion(-)
diff --git a/modules/processing.py b/modules/processing.py
index 6b8664a07..7fa1144e6 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -1,4 +1,3 @@
-
import json
import math
import os
From 1ffeb42d38d9276dc28918189d32f60d593a162c Mon Sep 17 00:00:00 2001
From: Nicolas Noullet
Date: Sun, 9 Oct 2022 00:18:45 +0200
Subject: [PATCH 198/460] Fix typo
---
modules/shared.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/shared.py b/modules/shared.py
index 9dce6cb7b..dffa0094b 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -238,7 +238,7 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"),
options_templates.update(options_section(('ui', "User interface"), {
"show_progressbar": OptionInfo(True, "Show progressbar"),
- "show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
+ "show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}),
"return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
From e2930f9821c197da94e208b5ae73711002844efc Mon Sep 17 00:00:00 2001
From: Tony Beeman
Date: Fri, 7 Oct 2022 17:46:39 -0700
Subject: [PATCH 199/460] Fix for Prompts_from_file showing extra textbox.
---
modules/scripts.py | 30 ++++++++++++++++++++++++++----
scripts/prompts_from_file.py | 4 ++++
2 files changed, 30 insertions(+), 4 deletions(-)
diff --git a/modules/scripts.py b/modules/scripts.py
index 45230f9a1..d8f87927e 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -1,4 +1,5 @@
import os
+from pydoc import visiblename
import sys
import traceback
@@ -31,6 +32,15 @@ class Script:
def show(self, is_img2img):
return True
+
+ # Called when the ui for this script has been shown.
+ # Useful for hiding some controls, since the scripts module sets visibility to
+ # everything to true. The parameters will be the parameters returned by the ui method
+ # The return value should be gradio updates, similar to what you would return
+ # from a Gradio event handler.
+ def on_show(self, *args):
+ return [ui.gr_show(True)] * len(args)
+
# This is where the additional processing is implemented. The parameters include
# self, the model object "p" (a StableDiffusionProcessing class, see
# processing.py), and the parameters returned by the ui method.
@@ -125,20 +135,32 @@ class ScriptRunner:
inputs += controls
script.args_to = len(inputs)
- def select_script(script_index):
+ def select_script(*args):
+ script_index = args[0]
+ on_show_updates = []
if 0 < script_index <= len(self.scripts):
script = self.scripts[script_index-1]
args_from = script.args_from
args_to = script.args_to
+ script_args = args[args_from:args_to]
+ on_show_updates = wrap_call(script.on_show, script.filename, "on_show", *script_args)
else:
args_from = 0
args_to = 0
- return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))]
+ ret = [ ui.gr_show(True)] # always show the dropdown
+ for i in range(1, len(inputs)):
+ if (args_from <= i < args_to):
+ ret.append( on_show_updates[i - args_from] )
+ else:
+ ret.append(ui.gr_show(False))
+ return ret
+
+ # return [ui.gr_show(True if (i == 0) else on_show_updates[i - args_from] if args_from <= i < args_to else False) for i in range(len(inputs))]
dropdown.change(
fn=select_script,
- inputs=[dropdown],
+ inputs=inputs,
outputs=inputs
)
@@ -198,4 +220,4 @@ def reload_scripts(basedir):
load_scripts(basedir)
scripts_txt2img = ScriptRunner()
- scripts_img2img = ScriptRunner()
+ scripts_img2img = ScriptRunner()
\ No newline at end of file
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index 513d9a1c5..110889a66 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -10,6 +10,7 @@ from modules.processing import Processed, process_images
from PIL import Image
from modules.shared import opts, cmd_opts, state
+g_txt_mode = False
class Script(scripts.Script):
def title(self):
@@ -29,6 +30,9 @@ class Script(scripts.Script):
checkbox_txt.change(fn=lambda x: [gr.File.update(visible = not x), gr.TextArea.update(visible = x)], inputs=[checkbox_txt], outputs=[file, prompt_txt])
return [checkbox_txt, file, prompt_txt]
+ def on_show(self, checkbox_txt, file, prompt_txt):
+ return [ gr.Checkbox.update(visible = True), gr.File.update(visible = not checkbox_txt), gr.TextArea.update(visible = checkbox_txt) ]
+
def run(self, p, checkbox_txt, data: bytes, prompt_txt: str):
if (checkbox_txt):
lines = [x.strip() for x in prompt_txt.splitlines()]
From 86cb16886f8f48169cee4658ad0c5e5443beed2a Mon Sep 17 00:00:00 2001
From: Tony Beeman
Date: Fri, 7 Oct 2022 23:51:50 -0700
Subject: [PATCH 200/460] Pull Request Code Review Fixes
---
modules/scripts.py | 1 -
scripts/prompts_from_file.py | 2 --
2 files changed, 3 deletions(-)
diff --git a/modules/scripts.py b/modules/scripts.py
index d8f87927e..8dfd4de94 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -1,5 +1,4 @@
import os
-from pydoc import visiblename
import sys
import traceback
diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py
index 110889a66..b24f1a806 100644
--- a/scripts/prompts_from_file.py
+++ b/scripts/prompts_from_file.py
@@ -10,8 +10,6 @@ from modules.processing import Processed, process_images
from PIL import Image
from modules.shared import opts, cmd_opts, state
-g_txt_mode = False
-
class Script(scripts.Script):
def title(self):
return "Prompts from file or textbox"
From cbf6dad02d04d98e5a2d5e870777ab99b5796b2d Mon Sep 17 00:00:00 2001
From: Tony Beeman
Date: Sat, 8 Oct 2022 10:40:30 -0700
Subject: [PATCH 201/460] Handle case where on_show returns the wrong number of
arguments
---
modules/scripts.py | 9 ++++++---
1 file changed, 6 insertions(+), 3 deletions(-)
diff --git a/modules/scripts.py b/modules/scripts.py
index 8dfd4de94..7d89979d7 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -143,6 +143,8 @@ class ScriptRunner:
args_to = script.args_to
script_args = args[args_from:args_to]
on_show_updates = wrap_call(script.on_show, script.filename, "on_show", *script_args)
+ if (len(on_show_updates) != (args_to - args_from)):
+ print("Error in custom script (" + script.filename + "): on_show() method should return the same number of arguments as ui().", file=sys.stderr)
else:
args_from = 0
args_to = 0
@@ -150,13 +152,14 @@ class ScriptRunner:
ret = [ ui.gr_show(True)] # always show the dropdown
for i in range(1, len(inputs)):
if (args_from <= i < args_to):
- ret.append( on_show_updates[i - args_from] )
+ if (i - args_from) < len(on_show_updates):
+ ret.append( on_show_updates[i - args_from] )
+ else:
+ ret.append(ui.gr_show(True))
else:
ret.append(ui.gr_show(False))
return ret
- # return [ui.gr_show(True if (i == 0) else on_show_updates[i - args_from] if args_from <= i < args_to else False) for i in range(len(inputs))]
-
dropdown.change(
fn=select_script,
inputs=inputs,
From ab4fe4f44c3d2675a351269fe2ff1ddeac557aa6 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 9 Oct 2022 11:59:41 +0300
Subject: [PATCH 202/460] hide filenames for save button by default
---
modules/ui.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/modules/ui.py b/modules/ui.py
index 8071b1cb6..e1ab26658 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -162,7 +162,7 @@ def save_files(js_data, images, do_make_zip, index):
zip_file.writestr(filenames[i], f.read())
fullfns.insert(0, zip_filepath)
- return fullfns, '', '', plaintext_to_html(f"Saved: {filenames[0]}")
+ return gr.File.update(value=fullfns, visible=True), '', '', plaintext_to_html(f"Saved: {filenames[0]}")
def wrap_gradio_call(func, extra_outputs=None):
@@ -553,7 +553,7 @@ def create_ui(wrap_gradio_gpu_call):
do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False)
with gr.Row():
- download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False)
+ download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False)
with gr.Group():
html_info = gr.HTML()
@@ -741,7 +741,7 @@ def create_ui(wrap_gradio_gpu_call):
do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False)
with gr.Row():
- download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False)
+ download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False)
with gr.Group():
html_info = gr.HTML()
From 0241d811d23427b99f6b1eda1540bdf8d87963d5 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 9 Oct 2022 12:04:44 +0300
Subject: [PATCH 203/460] Revert "Fix for Prompts_from_file showing extra
textbox."
This reverts commit e2930f9821c197da94e208b5ae73711002844efc.
---
modules/scripts.py | 32 ++++----------------------------
1 file changed, 4 insertions(+), 28 deletions(-)
diff --git a/modules/scripts.py b/modules/scripts.py
index 7d89979d7..45230f9a1 100644
--- a/modules/scripts.py
+++ b/modules/scripts.py
@@ -31,15 +31,6 @@ class Script:
def show(self, is_img2img):
return True
-
- # Called when the ui for this script has been shown.
- # Useful for hiding some controls, since the scripts module sets visibility to
- # everything to true. The parameters will be the parameters returned by the ui method
- # The return value should be gradio updates, similar to what you would return
- # from a Gradio event handler.
- def on_show(self, *args):
- return [ui.gr_show(True)] * len(args)
-
# This is where the additional processing is implemented. The parameters include
# self, the model object "p" (a StableDiffusionProcessing class, see
# processing.py), and the parameters returned by the ui method.
@@ -134,35 +125,20 @@ class ScriptRunner:
inputs += controls
script.args_to = len(inputs)
- def select_script(*args):
- script_index = args[0]
- on_show_updates = []
+ def select_script(script_index):
if 0 < script_index <= len(self.scripts):
script = self.scripts[script_index-1]
args_from = script.args_from
args_to = script.args_to
- script_args = args[args_from:args_to]
- on_show_updates = wrap_call(script.on_show, script.filename, "on_show", *script_args)
- if (len(on_show_updates) != (args_to - args_from)):
- print("Error in custom script (" + script.filename + "): on_show() method should return the same number of arguments as ui().", file=sys.stderr)
else:
args_from = 0
args_to = 0
- ret = [ ui.gr_show(True)] # always show the dropdown
- for i in range(1, len(inputs)):
- if (args_from <= i < args_to):
- if (i - args_from) < len(on_show_updates):
- ret.append( on_show_updates[i - args_from] )
- else:
- ret.append(ui.gr_show(True))
- else:
- ret.append(ui.gr_show(False))
- return ret
+ return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))]
dropdown.change(
fn=select_script,
- inputs=inputs,
+ inputs=[dropdown],
outputs=inputs
)
@@ -222,4 +198,4 @@ def reload_scripts(basedir):
load_scripts(basedir)
scripts_txt2img = ScriptRunner()
- scripts_img2img = ScriptRunner()
\ No newline at end of file
+ scripts_img2img = ScriptRunner()
From 6f6798ddabe10d320fe8ea05edf0fdcef0c51a8e Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 9 Oct 2022 12:33:37 +0300
Subject: [PATCH 204/460] prevent a possible code execution error (thanks,
RyotaK)
---
modules/ui.py | 9 +++++++++
1 file changed, 9 insertions(+)
diff --git a/modules/ui.py b/modules/ui.py
index e1ab26658..dad509f3a 100644
--- a/modules/ui.py
+++ b/modules/ui.py
@@ -1153,6 +1153,15 @@ def create_ui(wrap_gradio_gpu_call):
component_dict = {}
def open_folder(f):
+ if not os.path.isdir(f):
+ print(f"""
+WARNING
+An open_folder request was made with an argument that is not a folder.
+This could be an error or a malicious attempt to run code on your computer.
+Requested path was: {f}
+""", file=sys.stderr)
+ return
+
if not shared.cmd_opts.hide_ui_dir_config:
path = os.path.normpath(f)
if platform.system() == "Windows":
From d74c38108f95e44d83a1706ee5ab218124972868 Mon Sep 17 00:00:00 2001
From: Jesse Williams <33797815+xram64@users.noreply.github.com>
Date: Sat, 8 Oct 2022 01:30:49 -0400
Subject: [PATCH 205/460] Confirm that options are valid before starting
When using the 'Sampler' or 'Checkpoint' options, if one of the entered
names has a typo, an error will only be thrown once the `draw_xy_grid`
loop reaches that name. This can waste a lot of time for large grids
with a typo near the end of a list, since the script needs to start over
and re-generate any earlier images to finish making the grid.
Also fixing typo in variable name in `draw_xy_grid`.
---
scripts/xy_grid.py | 21 +++++++++++++++------
1 file changed, 15 insertions(+), 6 deletions(-)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 26ae2199d..07040886a 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -145,7 +145,7 @@ def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend):
ver_texts = [[images.GridAnnotation(y)] for y in y_labels]
hor_texts = [[images.GridAnnotation(x)] for x in x_labels]
- first_pocessed = None
+ first_processed = None
state.job_count = len(xs) * len(ys) * p.n_iter
@@ -154,8 +154,8 @@ def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend):
state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}"
processed = cell(x, y)
- if first_pocessed is None:
- first_pocessed = processed
+ if first_processed is None:
+ first_processed = processed
try:
res.append(processed.images[0])
@@ -166,9 +166,9 @@ def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend):
if draw_legend:
grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts)
- first_pocessed.images = [grid]
+ first_processed.images = [grid]
- return first_pocessed
+ return first_processed
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
@@ -216,7 +216,6 @@ class Script(scripts.Script):
m = re_range.fullmatch(val)
mc = re_range_count.fullmatch(val)
if m is not None:
-
start = int(m.group(1))
end = int(m.group(2))+1
step = int(m.group(3)) if m.group(3) is not None else 1
@@ -258,6 +257,16 @@ class Script(scripts.Script):
valslist = list(permutations(valslist))
valslist = [opt.type(x) for x in valslist]
+
+ # Confirm options are valid before starting
+ if opt.label == "Sampler":
+ for sampler_val in valslist:
+ if sampler_val.lower() not in samplers_dict.keys():
+ raise RuntimeError(f"Unknown sampler: {sampler_val}")
+ elif opt.label == "Checkpoint name":
+ for ckpt_val in valslist:
+ if modules.sd_models.get_closet_checkpoint_match(ckpt_val) is None:
+ raise RuntimeError(f"Checkpoint for {ckpt_val} not found")
return valslist
From a65a45272e8f26ee3bc52a5300b396266508a9a5 Mon Sep 17 00:00:00 2001
From: Brendan Byrd
Date: Thu, 6 Oct 2022 19:31:36 -0400
Subject: [PATCH 206/460] Don't change the seed initially if "Keep -1 for
seeds" is checked
Fixes #1049
---
scripts/xy_grid.py | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index 07040886a..a8f53befe 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -198,7 +198,9 @@ class Script(scripts.Script):
return [x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds]
def run(self, p, x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds):
- modules.processing.fix_seed(p)
+ if not no_fixed_seeds:
+ modules.processing.fix_seed(p)
+
p.batch_size = 1
initial_hn = opts.sd_hypernetwork
From 0609ce06c0778536cb368ac3867292f87c6d9fc7 Mon Sep 17 00:00:00 2001
From: Milly
Date: Fri, 7 Oct 2022 03:36:08 +0900
Subject: [PATCH 207/460] Removed duplicate definition model_path
---
modules/bsrgan_model.py | 2 --
modules/esrgan_model.py | 2 --
modules/ldsr_model.py | 2 --
modules/realesrgan_model.py | 2 --
modules/scunet_model.py | 2 --
modules/swinir_model.py | 2 --
modules/upscaler.py | 7 ++++---
7 files changed, 4 insertions(+), 15 deletions(-)
diff --git a/modules/bsrgan_model.py b/modules/bsrgan_model.py
index 3bd80791a..737e1a761 100644
--- a/modules/bsrgan_model.py
+++ b/modules/bsrgan_model.py
@@ -10,13 +10,11 @@ from basicsr.utils.download_util import load_file_from_url
import modules.upscaler
from modules import devices, modelloader
from modules.bsrgan_model_arch import RRDBNet
-from modules.paths import models_path
class UpscalerBSRGAN(modules.upscaler.Upscaler):
def __init__(self, dirname):
self.name = "BSRGAN"
- self.model_path = os.path.join(models_path, self.name)
self.model_name = "BSRGAN 4x"
self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/BSRGAN.pth"
self.user_path = dirname
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
index 285481242..3970e6e47 100644
--- a/modules/esrgan_model.py
+++ b/modules/esrgan_model.py
@@ -7,7 +7,6 @@ from basicsr.utils.download_util import load_file_from_url
import modules.esrgam_model_arch as arch
from modules import shared, modelloader, images, devices
-from modules.paths import models_path
from modules.upscaler import Upscaler, UpscalerData
from modules.shared import opts
@@ -76,7 +75,6 @@ class UpscalerESRGAN(Upscaler):
self.model_name = "ESRGAN_4x"
self.scalers = []
self.user_path = dirname
- self.model_path = os.path.join(models_path, self.name)
super().__init__()
model_paths = self.find_models(ext_filter=[".pt", ".pth"])
scalers = []
diff --git a/modules/ldsr_model.py b/modules/ldsr_model.py
index 1c1070fc6..8c4db44ad 100644
--- a/modules/ldsr_model.py
+++ b/modules/ldsr_model.py
@@ -7,13 +7,11 @@ from basicsr.utils.download_util import load_file_from_url
from modules.upscaler import Upscaler, UpscalerData
from modules.ldsr_model_arch import LDSR
from modules import shared
-from modules.paths import models_path
class UpscalerLDSR(Upscaler):
def __init__(self, user_path):
self.name = "LDSR"
- self.model_path = os.path.join(models_path, self.name)
self.user_path = user_path
self.model_url = "https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1"
self.yaml_url = "https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1"
diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py
index dc0123e02..3ac0b97ae 100644
--- a/modules/realesrgan_model.py
+++ b/modules/realesrgan_model.py
@@ -8,14 +8,12 @@ from basicsr.utils.download_util import load_file_from_url
from realesrgan import RealESRGANer
from modules.upscaler import Upscaler, UpscalerData
-from modules.paths import models_path
from modules.shared import cmd_opts, opts
class UpscalerRealESRGAN(Upscaler):
def __init__(self, path):
self.name = "RealESRGAN"
- self.model_path = os.path.join(models_path, self.name)
self.user_path = path
super().__init__()
try:
diff --git a/modules/scunet_model.py b/modules/scunet_model.py
index fb64b7409..36a996bf0 100644
--- a/modules/scunet_model.py
+++ b/modules/scunet_model.py
@@ -9,14 +9,12 @@ from basicsr.utils.download_util import load_file_from_url
import modules.upscaler
from modules import devices, modelloader
-from modules.paths import models_path
from modules.scunet_model_arch import SCUNet as net
class UpscalerScuNET(modules.upscaler.Upscaler):
def __init__(self, dirname):
self.name = "ScuNET"
- self.model_path = os.path.join(models_path, self.name)
self.model_name = "ScuNET GAN"
self.model_name2 = "ScuNET PSNR"
self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_gan.pth"
diff --git a/modules/swinir_model.py b/modules/swinir_model.py
index 9bd454c69..fbd11f843 100644
--- a/modules/swinir_model.py
+++ b/modules/swinir_model.py
@@ -8,7 +8,6 @@ from basicsr.utils.download_util import load_file_from_url
from tqdm import tqdm
from modules import modelloader
-from modules.paths import models_path
from modules.shared import cmd_opts, opts, device
from modules.swinir_model_arch import SwinIR as net
from modules.upscaler import Upscaler, UpscalerData
@@ -25,7 +24,6 @@ class UpscalerSwinIR(Upscaler):
"/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR" \
"-L_x4_GAN.pth "
self.model_name = "SwinIR 4x"
- self.model_path = os.path.join(models_path, self.name)
self.user_path = dirname
super().__init__()
scalers = []
diff --git a/modules/upscaler.py b/modules/upscaler.py
index d9d7c5e2a..34672be70 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -36,10 +36,11 @@ class Upscaler:
self.half = not modules.shared.cmd_opts.no_half
self.pre_pad = 0
self.mod_scale = None
- if self.name is not None and create_dirs:
+
+ if self.model_path is not None and self.name:
self.model_path = os.path.join(models_path, self.name)
- if not os.path.exists(self.model_path):
- os.makedirs(self.model_path)
+ if self.model_path and create_dirs:
+ os.makedirs(self.model_path, exist_ok=True)
try:
import cv2
From bd833409ac7b8337040d521f6b65ced51e1b2ea8 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 9 Oct 2022 13:10:15 +0300
Subject: [PATCH 208/460] additional changes for saving pnginfo for #1803
---
modules/extras.py | 4 ++++
modules/processing.py | 6 ++++--
2 files changed, 8 insertions(+), 2 deletions(-)
diff --git a/modules/extras.py b/modules/extras.py
index ef6e6de7a..39dd38060 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -98,6 +98,10 @@ def run_extras(extras_mode, image, image_folder, gfpgan_visibility, codeformer_v
no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo,
forced_filename=image_name if opts.use_original_name_batch else None)
+ if opts.enable_pnginfo:
+ image.info = existing_pnginfo
+ image.info["extras"] = info
+
outputs.append(image)
devices.torch_gc()
diff --git a/modules/processing.py b/modules/processing.py
index 7fa1144e6..2c9913170 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -451,7 +451,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
text = infotext(n, i)
infotexts.append(text)
- image.info["parameters"] = text
+ if opts.enable_pnginfo:
+ image.info["parameters"] = text
output_images.append(image)
del x_samples_ddim
@@ -470,7 +471,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
if opts.return_grid:
text = infotext()
infotexts.insert(0, text)
- grid.info["parameters"] = text
+ if opts.enable_pnginfo:
+ grid.info["parameters"] = text
output_images.insert(0, grid)
index_of_first_image = 1
From f4578b343ded3b8ccd1879ea0c0b3cdadfcc3a5f Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 9 Oct 2022 13:23:30 +0300
Subject: [PATCH 209/460] fix model switching not working properly if there is
a different yaml config
---
modules/sd_models.py | 3 ++-
1 file changed, 2 insertions(+), 1 deletion(-)
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 2101b18da..d0c74dd84 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -196,7 +196,8 @@ def reload_model_weights(sd_model, info=None):
return
if sd_model.sd_checkpoint_info.config != checkpoint_info.config:
- return load_model()
+ shared.sd_model = load_model()
+ return shared.sd_model
if shared.cmd_opts.lowvram or shared.cmd_opts.medvram:
lowvram.send_everything_to_cpu()
From 77a719648db515f10136e8b8483d5b16bda2eaeb Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 9 Oct 2022 13:48:04 +0300
Subject: [PATCH 210/460] fix logic error in #1832
---
modules/upscaler.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/upscaler.py b/modules/upscaler.py
index 34672be70..6ab2fb408 100644
--- a/modules/upscaler.py
+++ b/modules/upscaler.py
@@ -37,7 +37,7 @@ class Upscaler:
self.pre_pad = 0
self.mod_scale = None
- if self.model_path is not None and self.name:
+ if self.model_path is None and self.name:
self.model_path = os.path.join(models_path, self.name)
if self.model_path and create_dirs:
os.makedirs(self.model_path, exist_ok=True)
From 542a3d3a4a00c1383fbdaf938ceefef87cf834bb Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 9 Oct 2022 14:33:22 +0300
Subject: [PATCH 211/460] fix btoken hypernetworks in XY plot
---
modules/hypernetwork.py | 7 +++++--
scripts/xy_grid.py | 9 +++------
2 files changed, 8 insertions(+), 8 deletions(-)
diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py
index 19f1c2270..498bc9d8f 100644
--- a/modules/hypernetwork.py
+++ b/modules/hypernetwork.py
@@ -49,15 +49,18 @@ def list_hypernetworks(path):
def load_hypernetwork(filename):
- print(f"Loading hypernetwork {filename}")
path = shared.hypernetworks.get(filename, None)
- if (path is not None):
+ if path is not None:
+ print(f"Loading hypernetwork {filename}")
try:
shared.loaded_hypernetwork = Hypernetwork(path)
except Exception:
print(f"Error loading hypernetwork {path}", file=sys.stderr)
print(traceback.format_exc(), file=sys.stderr)
else:
+ if shared.loaded_hypernetwork is not None:
+ print(f"Unloading hypernetwork")
+
shared.loaded_hypernetwork = None
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index a8f53befe..fe9490673 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -10,7 +10,7 @@ import numpy as np
import modules.scripts as scripts
import gradio as gr
-from modules import images
+from modules import images, hypernetwork
from modules.processing import process_images, Processed, get_correct_sampler
from modules.shared import opts, cmd_opts, state
import modules.shared as shared
@@ -80,8 +80,7 @@ def apply_checkpoint(p, x, xs):
def apply_hypernetwork(p, x, xs):
- hn = shared.hypernetworks.get(x, None)
- opts.data["sd_hypernetwork"] = hn.name if hn is not None else 'None'
+ hypernetwork.load_hypernetwork(x)
def format_value_add_label(p, opt, x):
@@ -203,8 +202,6 @@ class Script(scripts.Script):
p.batch_size = 1
- initial_hn = opts.sd_hypernetwork
-
def process_axis(opt, vals):
if opt.label == 'Nothing':
return [0]
@@ -321,6 +318,6 @@ class Script(scripts.Script):
# restore checkpoint in case it was changed by axes
modules.sd_models.reload_model_weights(shared.sd_model)
- opts.data["sd_hypernetwork"] = initial_hn
+ hypernetwork.load_hypernetwork(opts.sd_hypernetwork)
return processed
From d6d10a37bfd21568e74efb46137f906da96d5fdb Mon Sep 17 00:00:00 2001
From: William Moorehouse
Date: Sun, 9 Oct 2022 04:58:40 -0400
Subject: [PATCH 212/460] Added extended model details to infotext
---
modules/processing.py | 3 +++
modules/sd_models.py | 3 ++-
modules/shared.py | 1 +
3 files changed, 6 insertions(+), 1 deletion(-)
diff --git a/modules/processing.py b/modules/processing.py
index 2c9913170..d1bcee4aa 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -284,6 +284,9 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}",
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
+ "Model": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_model_name else shared.sd_model.sd_model_name),
+ "Model VAE": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_model_vae_name else shared.sd_model.sd_model_vae_name),
+ "Model hypernetwork": (None if not opts.add_extended_model_details_to_info or not opts.sd_hypernetwork else opts.sd_hypernetwork),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
diff --git a/modules/sd_models.py b/modules/sd_models.py
index d0c74dd84..3fa42329c 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -4,7 +4,7 @@ import sys
from collections import namedtuple
import torch
from omegaconf import OmegaConf
-
+from pathlib import Path
from ldm.util import instantiate_from_config
@@ -158,6 +158,7 @@ def load_model_weights(model, checkpoint_info):
vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
model.first_stage_model.load_state_dict(vae_dict)
+ model.sd_model_vae_name = Path(vae_file).stem
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
diff --git a/modules/shared.py b/modules/shared.py
index dffa0094b..ca63f7d8e 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -242,6 +242,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
+ "add_extended_model_details_to_info": OptionInfo(False, "Add extended model details to generation information (model name, VAE, hypernetwork)"),
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
From 006791c13d70e582eee766b7d0499e9821a86bf9 Mon Sep 17 00:00:00 2001
From: William Moorehouse
Date: Sun, 9 Oct 2022 05:09:18 -0400
Subject: [PATCH 213/460] Fix grabbing the model name for infotext
---
modules/processing.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/processing.py b/modules/processing.py
index d1bcee4aa..c035c9902 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -284,7 +284,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}",
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
- "Model": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_model_name else shared.sd_model.sd_model_name),
+ "Model": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name),
"Model VAE": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_model_vae_name else shared.sd_model.sd_model_vae_name),
"Model hypernetwork": (None if not opts.add_extended_model_details_to_info or not opts.sd_hypernetwork else opts.sd_hypernetwork),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
From 594cbfd8fbe4078b43ceccf01509eeef3d6790c6 Mon Sep 17 00:00:00 2001
From: William Moorehouse
Date: Sun, 9 Oct 2022 07:27:11 -0400
Subject: [PATCH 214/460] Sanitize infotext output (for now)
---
modules/processing.py | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index c035c9902..049f37698 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -284,9 +284,9 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}",
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
- "Model": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name),
- "Model VAE": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_model_vae_name else shared.sd_model.sd_model_vae_name),
- "Model hypernetwork": (None if not opts.add_extended_model_details_to_info or not opts.sd_hypernetwork else opts.sd_hypernetwork),
+ "Model": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
+ "Model VAE": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_model_vae_name else shared.sd_model.sd_model_vae_name.replace(',', '').replace(':', '')),
+ "Model hypernetwork": (None if not opts.add_extended_model_details_to_info or not opts.sd_hypernetwork else opts.sd_hypernetwork.replace(',', '').replace(':', '')),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
From e6e8cabe0c9c335e0d72345602c069b198558b53 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 9 Oct 2022 14:57:48 +0300
Subject: [PATCH 215/460] change up #2056 to make it work how i want it to plus
make xy plot write correct values to images
---
modules/processing.py | 5 ++---
modules/sd_models.py | 2 --
modules/shared.py | 2 +-
3 files changed, 3 insertions(+), 6 deletions(-)
diff --git a/modules/processing.py b/modules/processing.py
index 049f37698..04aed989d 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -284,9 +284,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}",
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
- "Model": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
- "Model VAE": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_model_vae_name else shared.sd_model.sd_model_vae_name.replace(',', '').replace(':', '')),
- "Model hypernetwork": (None if not opts.add_extended_model_details_to_info or not opts.sd_hypernetwork else opts.sd_hypernetwork.replace(',', '').replace(':', '')),
+ "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')),
+ "Hypernet": (None if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name.replace(',', '').replace(':', '')),
"Batch size": (None if p.batch_size < 2 else p.batch_size),
"Batch pos": (None if p.batch_size < 2 else position_in_batch),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
diff --git a/modules/sd_models.py b/modules/sd_models.py
index 3fa42329c..e63d3c292 100644
--- a/modules/sd_models.py
+++ b/modules/sd_models.py
@@ -4,7 +4,6 @@ import sys
from collections import namedtuple
import torch
from omegaconf import OmegaConf
-from pathlib import Path
from ldm.util import instantiate_from_config
@@ -158,7 +157,6 @@ def load_model_weights(model, checkpoint_info):
vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"}
model.first_stage_model.load_state_dict(vae_dict)
- model.sd_model_vae_name = Path(vae_file).stem
model.sd_model_hash = sd_model_hash
model.sd_model_checkpoint = checkpoint_file
diff --git a/modules/shared.py b/modules/shared.py
index ca63f7d8e..6ecc2503a 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -242,7 +242,7 @@ options_templates.update(options_section(('ui', "User interface"), {
"return_grid": OptionInfo(True, "Show grid in results for web"),
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
- "add_extended_model_details_to_info": OptionInfo(False, "Add extended model details to generation information (model name, VAE, hypernetwork)"),
+ "add_model_name_to_info": OptionInfo(False, "Add model name to generation information"),
"font": OptionInfo("", "Font for image grids that have text"),
"js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"),
"js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"),
From 2c52f4da7ff80a3ec277105f4db6146c6379898a Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 9 Oct 2022 15:01:42 +0300
Subject: [PATCH 216/460] fix broken samplers in XY plot
---
scripts/xy_grid.py | 1 +
1 file changed, 1 insertion(+)
diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py
index fe9490673..c89ca1a9b 100644
--- a/scripts/xy_grid.py
+++ b/scripts/xy_grid.py
@@ -259,6 +259,7 @@ class Script(scripts.Script):
# Confirm options are valid before starting
if opt.label == "Sampler":
+ samplers_dict = build_samplers_dict(p)
for sampler_val in valslist:
if sampler_val.lower() not in samplers_dict.keys():
raise RuntimeError(f"Unknown sampler: {sampler_val}")
From 9d1138e2940c4ddcd2685bcba12c7d407e9e0ec5 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 9 Oct 2022 15:08:10 +0300
Subject: [PATCH 217/460] fix typo in filename for ESRGAN arch
---
modules/esrgan_model.py | 2 +-
modules/{esrgam_model_arch.py => esrgan_model_arch.py} | 0
2 files changed, 1 insertion(+), 1 deletion(-)
rename modules/{esrgam_model_arch.py => esrgan_model_arch.py} (100%)
diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py
index 3970e6e47..46ad0da3c 100644
--- a/modules/esrgan_model.py
+++ b/modules/esrgan_model.py
@@ -5,7 +5,7 @@ import torch
from PIL import Image
from basicsr.utils.download_util import load_file_from_url
-import modules.esrgam_model_arch as arch
+import modules.esrgan_model_arch as arch
from modules import shared, modelloader, images, devices
from modules.upscaler import Upscaler, UpscalerData
from modules.shared import opts
diff --git a/modules/esrgam_model_arch.py b/modules/esrgan_model_arch.py
similarity index 100%
rename from modules/esrgam_model_arch.py
rename to modules/esrgan_model_arch.py
From f8197976ef5f0523faffb2b237e9166fb2bedecd Mon Sep 17 00:00:00 2001
From: Greendayle
Date: Sun, 9 Oct 2022 13:44:13 +0200
Subject: [PATCH 218/460] Shielded launch enviroment creation stuff from
multiprocessing
---
launch.py | 178 ++++++++++++++++++++++++++----------------------------
1 file changed, 87 insertions(+), 91 deletions(-)
diff --git a/launch.py b/launch.py
index b0a59b6a1..d1a4fd6ae 100644
--- a/launch.py
+++ b/launch.py
@@ -6,40 +6,11 @@ import importlib.util
import shlex
import platform
-dir_repos = "repositories"
-dir_tmp = "tmp"
-
-python = sys.executable
-git = os.environ.get('GIT', "git")
-torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
-requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
-commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
-
-gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
-clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
-
-stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
-taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
-k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "f4e99857772fc3a126ba886aadf795a332774878")
-codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
-blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
-
-args = shlex.split(commandline_args)
-
def extract_arg(args, name):
return [x for x in args if x != name], name in args
-args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test')
-xformers = '--xformers' in args
-deepdanbooru = '--deepdanbooru' in args
-
-
-def repo_dir(name):
- return os.path.join(dir_repos, name)
-
-
def run(command, desc=None, errdesc=None):
if desc is not None:
print(desc)
@@ -59,23 +30,11 @@ stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.st
return result.stdout.decode(encoding="utf8", errors="ignore")
-def run_python(code, desc=None, errdesc=None):
- return run(f'"{python}" -c "{code}"', desc, errdesc)
-
-
-def run_pip(args, desc=None):
- return run(f'"{python}" -m pip {args} --prefer-binary', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
-
-
def check_run(command):
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True)
return result.returncode == 0
-def check_run_python(code):
- return check_run(f'"{python}" -c "{code}"')
-
-
def is_installed(package):
try:
spec = importlib.util.find_spec(package)
@@ -85,80 +44,117 @@ def is_installed(package):
return spec is not None
-def git_clone(url, dir, name, commithash=None):
- # TODO clone into temporary dir and move if successful
+def prepare_enviroment():
+ dir_repos = "repositories"
- if os.path.exists(dir):
- if commithash is None:
+ python = sys.executable
+ git = os.environ.get('GIT', "git")
+ torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
+ requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
+ commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
+
+ gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379")
+ clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1")
+
+ stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc")
+ taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6")
+ k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "f4e99857772fc3a126ba886aadf795a332774878")
+ codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af")
+ blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9")
+
+ args = shlex.split(commandline_args)
+
+ args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test')
+ xformers = '--xformers' in args
+ deepdanbooru = '--deepdanbooru' in args
+
+ def repo_dir(name):
+ return os.path.join(dir_repos, name)
+
+ def run_python(code, desc=None, errdesc=None):
+ return run(f'"{python}" -c "{code}"', desc, errdesc)
+
+ def run_pip(args, desc=None):
+ return run(f'"{python}" -m pip {args} --prefer-binary', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
+
+ def check_run_python(code):
+ return check_run(f'"{python}" -c "{code}"')
+
+ def git_clone(url, dir, name, commithash=None):
+ # TODO clone into temporary dir and move if successful
+
+ if os.path.exists(dir):
+ if commithash is None:
+ return
+
+ current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip()
+ if current_hash == commithash:
+ return
+
+ run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
+ run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commint for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
return
- current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip()
- if current_hash == commithash:
- return
+ run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
- run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
- run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commint for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
- return
+ if commithash is not None:
+ run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
- run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
+ try:
+ commit = run(f"{git} rev-parse HEAD").strip()
+ except Exception:
+ commit = ""
- if commithash is not None:
- run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
+ print(f"Python {sys.version}")
+ print(f"Commit hash: {commit}")
+ if not is_installed("torch") or not is_installed("torchvision"):
+ run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch")
-try:
- commit = run(f"{git} rev-parse HEAD").strip()
-except Exception:
- commit = ""
+ if not skip_torch_cuda_test:
+ run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'")
-print(f"Python {sys.version}")
-print(f"Commit hash: {commit}")
+ if not is_installed("gfpgan"):
+ run_pip(f"install {gfpgan_package}", "gfpgan")
+ if not is_installed("clip"):
+ run_pip(f"install {clip_package}", "clip")
-if not is_installed("torch") or not is_installed("torchvision"):
- run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch")
+ if not is_installed("xformers") and xformers and platform.python_version().startswith("3.10"):
+ if platform.system() == "Windows":
+ run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
+ elif platform.system() == "Linux":
+ run_pip("install xformers", "xformers")
-if not skip_torch_cuda_test:
- run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'")
+ if not is_installed("deepdanbooru") and deepdanbooru:
+ run_pip("install git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru")
-if not is_installed("gfpgan"):
- run_pip(f"install {gfpgan_package}", "gfpgan")
+ os.makedirs(dir_repos, exist_ok=True)
-if not is_installed("clip"):
- run_pip(f"install {clip_package}", "clip")
+ git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
+ git_clone("https://github.com/CompVis/taming-transformers.git", repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
+ git_clone("https://github.com/crowsonkb/k-diffusion.git", repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
+ git_clone("https://github.com/sczhou/CodeFormer.git", repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
+ git_clone("https://github.com/salesforce/BLIP.git", repo_dir('BLIP'), "BLIP", blip_commit_hash)
-if not is_installed("xformers") and xformers and platform.python_version().startswith("3.10"):
- if platform.system() == "Windows":
- run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers")
- elif platform.system() == "Linux":
- run_pip("install xformers", "xformers")
+ if not is_installed("lpips"):
+ run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
-if not is_installed("deepdanbooru") and deepdanbooru:
- run_pip("install git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru")
+ run_pip(f"install -r {requirements_file}", "requirements for Web UI")
-os.makedirs(dir_repos, exist_ok=True)
+ sys.argv += args
-git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash)
-git_clone("https://github.com/CompVis/taming-transformers.git", repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash)
-git_clone("https://github.com/crowsonkb/k-diffusion.git", repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash)
-git_clone("https://github.com/sczhou/CodeFormer.git", repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash)
-git_clone("https://github.com/salesforce/BLIP.git", repo_dir('BLIP'), "BLIP", blip_commit_hash)
+ if "--exit" in args:
+ print("Exiting because of --exit argument")
+ exit(0)
-if not is_installed("lpips"):
- run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer")
-
-run_pip(f"install -r {requirements_file}", "requirements for Web UI")
-
-sys.argv += args
-
-if "--exit" in args:
- print("Exiting because of --exit argument")
- exit(0)
def start_webui():
print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}")
import webui
webui.webui()
+
if __name__ == "__main__":
+ prepare_enviroment()
start_webui()
From bba2ac8324ccd1a67c78e5f59babae8323ec7dc6 Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 9 Oct 2022 15:22:51 +0300
Subject: [PATCH 219/460] reshuffle the code a bit in launcher to keep
functions in one place for #2069
---
launch.py | 77 +++++++++++++++++++++++++++++--------------------------
1 file changed, 41 insertions(+), 36 deletions(-)
diff --git a/launch.py b/launch.py
index d1a4fd6ae..f42f557de 100644
--- a/launch.py
+++ b/launch.py
@@ -6,6 +6,10 @@ import importlib.util
import shlex
import platform
+dir_repos = "repositories"
+python = sys.executable
+git = os.environ.get('GIT', "git")
+
def extract_arg(args, name):
return [x for x in args if x != name], name in args
@@ -44,11 +48,44 @@ def is_installed(package):
return spec is not None
-def prepare_enviroment():
- dir_repos = "repositories"
+def repo_dir(name):
+ return os.path.join(dir_repos, name)
- python = sys.executable
- git = os.environ.get('GIT', "git")
+
+def run_python(code, desc=None, errdesc=None):
+ return run(f'"{python}" -c "{code}"', desc, errdesc)
+
+
+def run_pip(args, desc=None):
+ return run(f'"{python}" -m pip {args} --prefer-binary', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
+
+
+def check_run_python(code):
+ return check_run(f'"{python}" -c "{code}"')
+
+
+def git_clone(url, dir, name, commithash=None):
+ # TODO clone into temporary dir and move if successful
+
+ if os.path.exists(dir):
+ if commithash is None:
+ return
+
+ current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip()
+ if current_hash == commithash:
+ return
+
+ run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
+ run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commint for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
+ return
+
+ run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
+
+ if commithash is not None:
+ run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
+
+
+def prepare_enviroment():
torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113")
requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt")
commandline_args = os.environ.get('COMMANDLINE_ARGS', "")
@@ -68,38 +105,6 @@ def prepare_enviroment():
xformers = '--xformers' in args
deepdanbooru = '--deepdanbooru' in args
- def repo_dir(name):
- return os.path.join(dir_repos, name)
-
- def run_python(code, desc=None, errdesc=None):
- return run(f'"{python}" -c "{code}"', desc, errdesc)
-
- def run_pip(args, desc=None):
- return run(f'"{python}" -m pip {args} --prefer-binary', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}")
-
- def check_run_python(code):
- return check_run(f'"{python}" -c "{code}"')
-
- def git_clone(url, dir, name, commithash=None):
- # TODO clone into temporary dir and move if successful
-
- if os.path.exists(dir):
- if commithash is None:
- return
-
- current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip()
- if current_hash == commithash:
- return
-
- run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}")
- run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commint for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}")
- return
-
- run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}")
-
- if commithash is not None:
- run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}")
-
try:
commit = run(f"{git} rev-parse HEAD").strip()
except Exception:
From 875ddfeecfaffad9eee24813301637cba310337d Mon Sep 17 00:00:00 2001
From: AUTOMATIC <16777216c@gmail.com>
Date: Sun, 9 Oct 2022 17:58:43 +0300
Subject: [PATCH 220/460] added guard for torch.load to prevent loading pickles
with unknown content
---
modules/paths.py | 1 +
modules/safe.py | 89 +++++++++++++++++++++++++++++++++++++++++++++++
modules/shared.py | 1 +
3 files changed, 91 insertions(+)
create mode 100644 modules/safe.py
diff --git a/modules/paths.py b/modules/paths.py
index 0519caa0a..1e7a2fbcf 100644
--- a/modules/paths.py
+++ b/modules/paths.py
@@ -1,6 +1,7 @@
import argparse
import os
import sys
+import modules.safe
script_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
models_path = os.path.join(script_path, "models")
diff --git a/modules/safe.py b/modules/safe.py
new file mode 100644
index 000000000..2d2c13716
--- /dev/null
+++ b/modules/safe.py
@@ -0,0 +1,89 @@
+# this code is adapted from the script contributed by anon from /h/
+
+import io
+import pickle
+import collections
+import sys
+import traceback
+
+import torch
+import numpy
+import _codecs
+import zipfile
+
+
+def encode(*args):
+ out = _codecs.encode(*args)
+ return out
+
+
+class RestrictedUnpickler(pickle.Unpickler):
+ def persistent_load(self, saved_id):
+ assert saved_id[0] == 'storage'
+ return torch.storage._TypedStorage()
+
+ def find_class(self, module, name):
+ if module == 'collections' and name == 'OrderedDict':
+ return getattr(collections, name)
+ if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter']:
+ return getattr(torch._utils, name)
+ if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage']:
+ return getattr(torch, name)
+ if module == 'torch.nn.modules.container' and name in ['ParameterDict']:
+ return getattr(torch.nn.modules.container, name)
+ if module == 'numpy.core.multiarray' and name == 'scalar':
+ return numpy.core.multiarray.scalar
+ if module == 'numpy' and name == 'dtype':
+ return numpy.dtype
+ if module == '_codecs' and name == 'encode':
+ return encode
+ if module == "pytorch_lightning.callbacks" and name == 'model_checkpoint':
+ import pytorch_lightning.callbacks
+ return pytorch_lightning.callbacks.model_checkpoint
+ if module == "pytorch_lightning.callbacks.model_checkpoint" and name == 'ModelCheckpoint':
+ import pytorch_lightning.callbacks.model_checkpoint
+ return pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint
+ if module == "__builtin__" and name == 'set':
+ return set
+
+ # Forbid everything else.
+ raise pickle.UnpicklingError(f"global '{module}/{name}' is forbidden")
+
+
+def check_pt(filename):
+ try:
+
+ # new pytorch format is a zip file
+ with zipfile.ZipFile(filename) as z:
+ with z.open('archive/data.pkl') as file:
+ unpickler = RestrictedUnpickler(file)
+ unpickler.load()
+
+ except zipfile.BadZipfile:
+
+ # if it's not a zip file, it's an olf pytorch format, with five objects written to pickle
+ with open(filename, "rb") as file:
+ unpickler = RestrictedUnpickler(file)
+ for i in range(5):
+ unpickler.load()
+
+
+def load(filename, *args, **kwargs):
+ from modules import shared
+
+ try:
+ if not shared.cmd_opts.disable_safe_unpickle:
+ check_pt(filename)
+
+ except Exception:
+ print(f"Error verifying pickled file from {filename}:", file=sys.stderr)
+ print(traceback.format_exc(), file=sys.stderr)
+ print(f"\nThe file may be malicious, so the program is not going to read it.", file=sys.stderr)
+ print(f"You can skip this check with --disable-safe-unpickle commandline argument.", file=sys.stderr)
+ return None
+
+ return unsafe_torch_load(filename, *args, **kwargs)
+
+
+unsafe_torch_load = torch.load
+torch.load = load
diff --git a/modules/shared.py b/modules/shared.py
index 6ecc2503a..3d7f08e14 100644
--- a/modules/shared.py
+++ b/modules/shared.py
@@ -65,6 +65,7 @@ parser.add_argument("--autolaunch", action='store_true', help="open the webui UR
parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False)
parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False)
parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False)
+parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False)
cmd_opts = parser.parse_args()
From d3cd46b0388918128af203fda37fa63461c46611 Mon Sep 17 00:00:00 2001
From: DepFA <35278260+dfaker@users.noreply.github.com>
Date: Sun, 9 Oct 2022 16:19:33 +0100
Subject: [PATCH 221/460] Update lightbox to change displayed image as soon as
generation is complete (#1933)
* add updateOnBackgroundChange
* typo fixes.
* reindent to 4 spaces
---
javascript/imageviewer.js | 168 ++++++++++++++++++++++----------------
1 file changed, 96 insertions(+), 72 deletions(-)
diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js
index 6a00c0da4..65a33dd78 100644
--- a/javascript/imageviewer.js
+++ b/javascript/imageviewer.js
@@ -1,72 +1,97 @@
// A full size 'lightbox' preview modal shown when left clicking on gallery previews
-
function closeModal() {
- gradioApp().getElementById("lightboxModal").style.display = "none";
+ gradioApp().getElementById("lightboxModal").style.display = "none";
}
function showModal(event) {
- const source = event.target || event.srcElement;
- const modalImage = gradioApp().getElementById("modalImage")
- const lb = gradioApp().getElementById("lightboxModal")
- modalImage.src = source.src
- if (modalImage.style.display === 'none') {
- lb.style.setProperty('background-image', 'url(' + source.src + ')');
- }
- lb.style.display = "block";
- lb.focus()
- event.stopPropagation()
+ const source = event.target || event.srcElement;
+ const modalImage = gradioApp().getElementById("modalImage")
+ const lb = gradioApp().getElementById("lightboxModal")
+ modalImage.src = source.src
+ if (modalImage.style.display === 'none') {
+ lb.style.setProperty('background-image', 'url(' + source.src + ')');
+ }
+ lb.style.display = "block";
+ lb.focus()
+ event.stopPropagation()
}
function negmod(n, m) {
- return ((n % m) + m) % m;
+ return ((n % m) + m) % m;
}
-function modalImageSwitch(offset){
- var allgalleryButtons = gradioApp().querySelectorAll(".gallery-item.transition-all")
- var galleryButtons = []
- allgalleryButtons.forEach(function(elem){
- if(elem.parentElement.offsetParent){
- galleryButtons.push(elem);
+function updateOnBackgroundChange() {
+ const modalImage = gradioApp().getElementById("modalImage")
+ if (modalImage && modalImage.offsetParent) {
+ let allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2")
+ let currentButton = null
+ allcurrentButtons.forEach(function(elem) {
+ if (elem.parentElement.offsetParent) {
+ currentButton = elem;
+ }
+ })
+
+ if (modalImage.src != currentButton.children[0].src) {
+ modalImage.src = currentButton.children[0].src;
+ if (modalImage.style.display === 'none') {
+ modal.style.setProperty('background-image', `url(${modalImage.src})`)
+ }
+ }
}
- })
+}
- if(galleryButtons.length>1){
- var allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2")
- var currentButton = null
- allcurrentButtons.forEach(function(elem){
- if(elem.parentElement.offsetParent){
- currentButton = elem;
+function modalImageSwitch(offset) {
+ var allgalleryButtons = gradioApp().querySelectorAll(".gallery-item.transition-all")
+ var galleryButtons = []
+ allgalleryButtons.forEach(function(elem) {
+ if (elem.parentElement.offsetParent) {
+ galleryButtons.push(elem);
}
- })
+ })
- var result = -1
- galleryButtons.forEach(function(v, i){ if(v==currentButton) { result = i } })
+ if (galleryButtons.length > 1) {
+ var allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2")
+ var currentButton = null
+ allcurrentButtons.forEach(function(elem) {
+ if (elem.parentElement.offsetParent) {
+ currentButton = elem;
+ }
+ })
- if(result != -1){
- nextButton = galleryButtons[negmod((result+offset),galleryButtons.length)]
- nextButton.click()
- const modalImage = gradioApp().getElementById("modalImage");
- const modal = gradioApp().getElementById("lightboxModal");
- modalImage.src = nextButton.children[0].src;
- if (modalImage.style.display === 'none') {
- modal.style.setProperty('background-image', `url(${modalImage.src})`)
+ var result = -1
+ galleryButtons.forEach(function(v, i) {
+ if (v == currentButton) {
+ result = i
+ }
+ })
+
+ if (result != -1) {
+ nextButton = galleryButtons[negmod((result + offset), galleryButtons.length)]
+ nextButton.click()
+ const modalImage = gradioApp().getElementById("modalImage");
+ const modal = gradioApp().getElementById("lightboxModal");
+ modalImage.src = nextButton.children[0].src;
+ if (modalImage.style.display === 'none') {
+ modal.style.setProperty('background-image', `url(${modalImage.src})`)
+ }
+ setTimeout(function() {
+ modal.focus()
+ }, 10)
}
- setTimeout( function(){modal.focus()},10)
- }
- }
+ }
}
-function modalNextImage(event){
- modalImageSwitch(1)
- event.stopPropagation()
+function modalNextImage(event) {
+ modalImageSwitch(1)
+ event.stopPropagation()
}
-function modalPrevImage(event){
- modalImageSwitch(-1)
- event.stopPropagation()
+function modalPrevImage(event) {
+ modalImageSwitch(-1)
+ event.stopPropagation()
}
-function modalKeyHandler(event){
+function modalKeyHandler(event) {
switch (event.key) {
case "ArrowLeft":
modalPrevImage(event)
@@ -80,24 +105,22 @@ function modalKeyHandler(event){
}
}
-function showGalleryImage(){
+function showGalleryImage() {
setTimeout(function() {
fullImg_preview = gradioApp().querySelectorAll('img.w-full.object-contain')
-
- if(fullImg_preview != null){
+
+ if (fullImg_preview != null) {
fullImg_preview.forEach(function function_name(e) {
if (e.dataset.modded)
return;
e.dataset.modded = true;
if(e && e.parentElement.tagName == 'DIV'){
-
e.style.cursor='pointer'
-
e.addEventListener('click', function (evt) {
if(!opts.js_modal_lightbox) return;
modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed)
showModal(evt)
- },true);
+ }, true);
}
});
}
@@ -105,21 +128,21 @@ function showGalleryImage(){
}, 100);
}
-function modalZoomSet(modalImage, enable){
- if( enable ){
+function modalZoomSet(modalImage, enable) {
+ if (enable) {
modalImage.classList.add('modalImageFullscreen');
- } else{
+ } else {
modalImage.classList.remove('modalImageFullscreen');
}
}
-function modalZoomToggle(event){
+function modalZoomToggle(event) {
modalImage = gradioApp().getElementById("modalImage");
modalZoomSet(modalImage, !modalImage.classList.contains('modalImageFullscreen'))
event.stopPropagation()
}
-function modalTileImageToggle(event){
+function modalTileImageToggle(event) {
const modalImage = gradioApp().getElementById("modalImage");
const modal = gradioApp().getElementById("lightboxModal");
const isTiling = modalImage.style.display === 'none';
@@ -134,17 +157,18 @@ function modalTileImageToggle(event){
event.stopPropagation()
}
-function galleryImageHandler(e){
- if(e && e.parentElement.tagName == 'BUTTON'){
+function galleryImageHandler(e) {
+ if (e && e.parentElement.tagName == 'BUTTON') {
e.onclick = showGalleryImage;
}
}
-onUiUpdate(function(){
+onUiUpdate(function() {
fullImg_preview = gradioApp().querySelectorAll('img.w-full')
- if(fullImg_preview != null){
- fullImg_preview.forEach(galleryImageHandler);
+ if (fullImg_preview != null) {
+ fullImg_preview.forEach(galleryImageHandler);
}
+ updateOnBackgroundChange();
})
document.addEventListener("DOMContentLoaded", function() {
@@ -152,13 +176,13 @@ document.addEventListener("DOMContentLoaded", function() {
const modal = document.createElement('div')
modal.onclick = closeModal;
modal.id = "lightboxModal";
- modal.tabIndex=0
+ modal.tabIndex = 0
modal.addEventListener('keydown', modalKeyHandler, true)
const modalControls = document.createElement('div')
modalControls.className = 'modalControls gradio-container';
modal.append(modalControls);
-
+
const modalZoom = document.createElement('span')
modalZoom.className = 'modalZoom cursor';
modalZoom.innerHTML = '⤡'
@@ -183,30 +207,30 @@ document.addEventListener("DOMContentLoaded", function() {
const modalImage = document.createElement('img')
modalImage.id = 'modalImage';
modalImage.onclick = closeModal;
- modalImage.tabIndex=0
+ modalImage.tabIndex = 0
modalImage.addEventListener('keydown', modalKeyHandler, true)
modal.appendChild(modalImage)
const modalPrev = document.createElement('a')
modalPrev.className = 'modalPrev';
modalPrev.innerHTML = '❮'
- modalPrev.tabIndex=0
- modalPrev.addEventListener('click',modalPrevImage,true);
+ modalPrev.tabIndex = 0
+ modalPrev.addEventListener('click', modalPrevImage, true);
modalPrev.addEventListener('keydown', modalKeyHandler, true)
modal.appendChild(modalPrev)
const modalNext = document.createElement('a')
modalNext.className = 'modalNext';
modalNext.innerHTML = '❯'
- modalNext.tabIndex=0
- modalNext.addEventListener('click',modalNextImage,true);
+ modalNext.tabIndex = 0
+ modalNext.addEventListener('click', modalNextImage, true);
modalNext.addEventListener('keydown', modalKeyHandler, true)
modal.appendChild(modalNext)
gradioApp().getRootNode().appendChild(modal)
-
+
document.body.appendChild(modalFragment);
-
+
});
From 9ecea0a8d6bdc434755e11128487fd62f1ff130f Mon Sep 17 00:00:00 2001
From: Artem Zagidulin
Date: Sun, 9 Oct 2022 16:14:56 +0300
Subject: [PATCH 222/460] fix missing png info when Extras Batch Process
---
modules/extras.py | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/modules/extras.py b/modules/extras.py
index 39dd38060..41e8612c7 100644
--- a/modules/extras.py
+++ b/modules/extras.py
@@ -29,7 +29,7 @@ def run_extras(extras_mode, image, image_folder, gfpgan_visibility, codeformer_v
if extras_mode == 1:
#convert file to pillow image
for img in image_folder:
- image = Image.fromarray(np.array(Image.open(img)))
+ image = Image.open(img)
imageArr.append(image)
imageNameArr.append(os.path.splitext(img.orig_name)[0])
else:
From a2d70f25bf51264d8d68f4f36937b390f79334a7 Mon Sep 17 00:00:00 2001
From: supersteve3d <39339941+supersteve3d@users.noreply.github.com>
Date: Sun, 9 Oct 2022 23:40:18 +0800
Subject: [PATCH 223/460] Add files via upload
Updated txt2img screenshot (UI as of Oct 9th) for github webui / README.md
---
txt2img_Screenshot.png | Bin 539132 -> 337094 bytes
1 file changed, 0 insertions(+), 0 deletions(-)
diff --git a/txt2img_Screenshot.png b/txt2img_Screenshot.png
index fedd538e3cc7ea14ff5bed224dfbcd9765ec35f4..6e2759a4c8aa2d05e1334e871b2a451f1104ba60 100644
GIT binary patch
literal 337094
zcmbrlXIPV2_dV>0f{G#*K$NN=3IU``FM@#dA{tsoS_BlOOCJPOM2fT!TBLUnLg+;S
zsVPz;gfdbRIzoUDLdbjLJo7xi5AV1Cb%l^}ICnW`uf5jVn`l#G-E(KIo;h~x*g1VY
zZS!NtPK+Eoc6|F3Gw?~5tXmZD>kn^pU9Dr4Se_-|%?YTck>;^u)$yzcPfh~wPruXy
zdmlT-)_L^vN6(95r(?$$1^U{Wmaptr=>Z9sp-sf-<%!8Q|7ZKtb>57$?BU@tRnm{=
z*D@SiCq0l$m6b~p$V#PAJMpaL${Sgt4_(E=Bdtzo{q^|Ub@}RO9fwP0kIk=-Yl4nf
z-xe--uOlEFqoEfq6!{|&EtG^7`eSo?BcOf5n=#pWL)uI8bRQWRxa>>$_3PbB$|>p4
zKzdBGwU+kbZg=c8q>-PXOE$aKDZ$9!PYDtc6#JVohKGN0(&LR|InJ*;txxvk->-l0
z?+A9SUfzz_YYORx_EJeFHKV26#PlALo`b6
z|Mv`rd>cLIx+PAb>^$0x{x;Y3fnH$UJt@f$`sZc>6D_4rHh9hnMOrt9PF)c|JLzY~WBUgrkmuiD)qXDkZ{9?}fB36@yGtTR0WweWzC<*JTsiybBieAW+
z3|n8UqDBS>2?n>Kwkp7W5}u~jtCm}L6&%Oa#!(fv{pzaXzT7iw877EZUJ!d&5&=Ju
zh*EYYSLW3dsUyMm#_K&RQ#WYw2nE~^^PNFXc>6+HhC4rRqqu=?FsE!^V;=WL^c&=s
zFgA%T*tF=?p;F3$RxI5Q<8Tr~MiO^xq{f2^p#3~^=fEBDslVL`TU
zj59&^ni-a%ZXQSoSH}Dzme(9yC6jfZwyPfz?B2F09Hpn+5NfsCjx>9q5PHj|{y8ks
z*(U!WdH6J!aqrhWuC?v8{v3+);rlfr>gUdQGH;2mg3Dh!%9epX$6uXW>94r_1F0)t
zZY^ejc6S@O7T-rKa0#V7Qi6W1p)53HBw4UHjn?ecxb;ArV(H3t5Bm0T`Q1~i;R5~i
zA;h;wHFR-r`;ZNn7jnM>XC@ZyHw15Y(tbuD1BWZbN8U3)e|?ggq08KrTp%8#wXY*1
zuM|^Xm7yVlto%r}0;@P!Bkt|aHRm?PM+(ij_YoDox-%O|Ll0^Bt$Y$T#lF9wVb`7>
z3L$*@jN~VsDaf+lK1*p@O%q&h#~J26~=%1C=hy$3M`_mEih*A
zFiE9;w|wR-mz_f9*vm$8azi5{LUI{z`|TnW;J)YsIq%NCJTKWq7hUJ>uuxMgM74Jy
zzj6s-ct?^aWh;wi{%qknIgx(np06^Sfe(^06&pH1RUaeOosBjYa!6%T2H&*%<}i~2#Mm-`GRM=)U*ZY=H0AURC7Mf)Rz1+yn!xl
zZJNuw8_Co~r7zyyZ6ECYvoh6Ym738t=~d`M)x>q522o#+03)bHwA-6%tm>?HVo87FzBZJxllH%2+?_i^8QJH*ZcdAqjXh7He>_>5LSI2w
z8)e>jQ;v62yRj`dl9@V3l&YwWond*nuXriUo})e1`1nQ^kD$x$T0QYG^TrAq7k}TI
zr+xB0H2cPXNNE=r3G-mY2}xiAyqyG$E2|#9ZoOea@Hm_)QYxhBe2RvyU$xM@^OsYaefdw`iB9oF9zC;^I
zZ+{HC;XgHHanmFdl#$8$z9cy@CFQbXHm2GpenCgW1x1a5Uk)V6JdzC4_?JTT<{$3gLVHDMafWgl$%`Y5$u$
z-54ChhV+;3Kj7ehFkV+t<41m5C*!(udT_%P-=1h!aLxJoxS-ej6WA?*9E_7#rW9ph@r2@pe1TidT@`
zuzmP5yC>P5O%zuDs42s(Q*w%~ZF`kJZWL__7cR?yAMWR5mLC|(SK0IX*1Gz?_sd*O
z#;km9-|gK*x-i;fU6JjZO*5g8HcPGuiFdK)H9InjHAS{o72>d@N1t0R^#)*y^2$soqZN%D8&_a}rS@UbD3vHQ2}VkoIWGwD04)r}G)
zG<~)1GXy_S>P1k69e_R$yU$4Vd?~LFs8Ebo;2WhJF10FSb}^fe<15RygQm}*w&Kz5
z2R~YMH-qKZ2M1Vh{VQ)Ki_19}r8yGRFn!(m-)UQ`GT&VZt
zv?m4PhLxF|X{q1Ley{YPj4h4y)_XGBzyJ*$KBUu}$fdD0D-9uY3p^1U=hGZfcKy2<
zj;L>$kAhaieDqbSj&IDh;3_OW4+W31K%llw*{vpC(=rg!A%VROK@L0Q-l7u-FSY8@
z7i=g$DjOdX=?UcR-x<93xseED3x4^}PhRV)UNbf+=4#7)+~n#}JLq(iq-P)WxL(6d
z%Dd;L0~~JcX}%*^y#h1?*Pg(Acro5W*lVUe8^m&_^~Hw+>YaA_&GF6L@os$u0<)5J
zvG_PgNGD91%(7p3X}vvth0B+$bADJC!_|nfuaVXNrtES}5jG^Bwy@$%7Ho!ix7`8B
zL2iaPPKXek50w#(lDF_X0p$}hlJg{I-146UVhky9(Np=4;BdP-`_wdgt`u%ObWj$O
z?)O3#!e#;|wMo(Bm_nvrsUDWal?>MNt@fa59-0V}7f#VcLhPKNf8ke^ZZBiA6C-Re
zu!B*SNmyW}dII!k_3psg+c~C`QL~_>r-2{jn06m(6`v%%<)b`R{;BA${C6EcS@{C4
z6QP`?5vQS_OS&h-hY5Ow<8E>$qgN>R-vS5luxO?$RDm
z?7nMW^1Iwb`2{XjYJ7McN
zp}+?<4jDLrNgx@!9x&k-&y!Kgara?V-!rc;UmHA~X`TEWr?AnB15TgMLbi_o@iI`d
zE*x7vuXb!xN+F+eiI}$*Ndgrn!62h+Q|XK=Q_sw+6pZ$R-$7*T-ztuI@DtXhXjiL^
zqpwAXYuBTw_f$!_>l_K*aVH@}PtYZ;t1!oXZ;4h2xrHJ#uknF_z604-@c39$WnwVy
zcBcM^j45sx@l5WtZa2*xd8N
z?n!OhZnej=s@7vq(5^WOVJTC81yJ6Wy!Y_jtUNxzyM{+q}8
zoc2)7@q1ZwF#EA*;cL)T0piWG9IM;H@dsLOJXmesOxzklBRZ7r4@wSdgVdKOUsQ))
z+_RC~t@U|tnxsUI8V}aAZnlswmxnx~+-(0vF!;XPTmLmoLUJ!kaOWHQ%6;72imLhX
z>RQjTSAxlK-vw;A+Emk4fmm#!Mea@ItUzSK3|L3_yhm
z3!nTh_#Th4_nytvfMGbnD2hf)`MwMoHR5Zn6G@5I=OJrBZe2QBIfAZ&VUa!_jlY(p
z#y^YyfUmY-v;Xo+Pt7k=wA`^dEpf3f)uE-%v8GkFO%$vU6a;egwx)sQyoUx(a?(5%u(&-}q;byyW%0{=UMoXL*;t
z92q_{3ZXaUe!6?k
zH9PY&ePDVw@EET<<+kZK+c+P_&lT*E#nyCVirm+1;x!|)6q1}Kxkr7pH)H=nrpT^q
zg&LFdo733E8gsfeoF%t?>O`Ywo)>UQ&|9FD95zOT{XV-vVfZI=D}RPqz^{thJ-0kl
zWu!G9Ps=@+--)d2BQI}{5%bK7G~e~t{<`IHfeM@aRhz8UU~`>6Xy+^vfQz;$GaP>sgv@`0^8^
z6Ho7u#n4>Dr*N8%iZ`RUOnmiE)>H8TKP8iYD#oyNqiU8NDfh16*IZStM@KKI_wGnyzY>$^tWAHU@BXZ_uEjc)J&w!o_N$4VZ`1E;11UHL?R=k=T
zM+i4+^(U%yM-^}=4cGNo+S$zcZrgS^FZs3WR((tGKI`T-M`79xr*Zt)lVSxzPQ9KV
z4v4KX#gaVy@Bu2$wU87r9g;}P)@KkBen9_;Jsaq5wR+Vun*6I%=6C2NNZb=)mHWt4
zMb|pqf*fdt4z_AfL%bkDgR2DyO&-DUM0R)I*9^xo5|_NhRJ
zxwnF>T|^6~c3DH8+!7cmRht%#LGK2?=Ja@+L4Gak*U3XHG)v*8
zrF&$04l~RxY5)zR-5~l>t4S~IlRcDEhgC1|kzYe@S&*LjC~{-ZBPHh~3XMBjrr(QS
zn_7{>tyGBDtQ3c!Tn7_gVGe!MC_@t{V%yifU{XIl=vb9^k!4!FwU><_y5t-}_D9Ih
zt`+iJ3uJPRKgK5PYJ5rtA%es?#kr0jQ8VU+9+FI>Ve|#p7FeSU<+dFbYJ{TiXfxerYhcV1xnC0HWrf`Z&Znw&tJ~*%R
zHl#j;2gNonQjy>2P*lGEce!nk34N|66{+itkXBPsJ|1#+
zxfJ8XSk@m=CGQuA=mniua&BzqNJI7|F>M?^y^e
z;vREW`44IQh>)L6;yie?cm9`#w*-64%f8(Yk{{j;E50NYRO|n;R(f8|#2Ej{jFVYw7
zbq8Ns%D1#y#O?VNuK0R64Rm8^9J*Q_zC%;u^->rX|C^ydPQY*;5=MUd7raC^^XLSF
z-^|!U@z%%})Pr%^WzR;EfBe8){G>>HytTM)=Zw-*;j9{ybd!Ov;x+|NqgG&4U`+Xh
zmxl5Exo#gJe8Ku6AGjIty($T!1!bG8E~E&Lb6R?@moJ&utsy^n_ITW@#P`9R_X@Ph
z+{m$D=5k5c4QR7o&rQMSKXqyKuz!ptxBnLp@4xQg|AQ6yMeWIzH~)KhU~0L}$o9L
zzy7fK2?dD|Nd@Dz3si@)-s;%1ojxgP6xnIQt9UNf{|y^DuFysh*;?@XSf$QRB|2Vs
zhZ0a|qg!0h9WCb?)w*pm_M(5GTY~3r0(k2n;EvK@JW6A-Hj7+y(G`pO)7Jvr%#BbaH$Kt
z>~(r?)2Ex>bG;MV+AHawDx=*F%j3M}ewAvU*h#1oi*UO*hAvOI#-ZnWxvzWx^?oc^
zs(wy!_HOpX%JfcD&i`)nnNQN^uAhR`w#$UyiO?H%TL33-x7I?*gkWuSZ`I#WZSFIfEYyBZ(N
zDeZI@1JjsOk>gy(nNd4GZpu7Qv>2WRK?JbDQ5uBrT4O9I)3NbDp9&6Fc4!*bMzGIT
z_3C@%2Oj&1L{Q~7C+9;R>kIiKR=sx&>mS2<g^pC{vxKF7^cFRUqfhui`+np&WyOXtC!jm@6GfLQCl)hi4a2bcgNo}waq~)4#w&uHnvNff90zJXvJTn2z
zuBq*FpseIOthu~U<78oF+$@{yn)E>VqC8p7c+xYqGU17RCPz{O?W`6Ef-*;2`oKzf
z#B5QWhRxs0FB{
zbX3|}omT2nZ483KVF#UvsuQm~$z+yVshNvK%KLrFA%IHlbDe#&sZE2HBA8(DG=Ixv
z6wfIch|RkPwJ6PV#{*J}-Mz})tO8Yl40OL_1+IJI)TIt%KtDkJjcAPuS$8^ZY^fYUEqhxe&<#aRRVq((CJYL7o+
z!S9piJL**-Z#1$YcNkQ}bZ#K`jNltr$(}PTSuUO|*42)P1zc=i9qlZ~Ymh1tZpEMC
zsFvW^*1b8GRiKO=0=_=4giYkIb1?&sBieK4yr4@Vt8b=oA`b;L1>990U@DPS6>>O_
z!6WJjKJin!P$}30D?&fp%k(co+AYumpu07g^Jk45(f-@w?L3ljdZHpMc=dqR$zUiW+SZ)91&EFM4iz-WH84?
z%xPpQ(-We5wzeI#Z4Qms8qX|ehn`f%3G~^D*Uf=SZ<@UYw`pWjt+fTP-}Q-eB=6<)
z2F?d%$~Y^R5dk@ig&Gx7f{MBc&I^#U<^ThIS$Mk_~)e3l2tEnaYp~!ZS(n_oZSxUdtWsHYzv(`8$e+M>a
z95?N(1L!IZjsHAc{-+%m-jV`elQpms>w3@b5{*S3I3jcgjb#jjG_4vY*klxT$8*Y$
zqFO&yWHSxA`2#B6a~k=9t@av1BIz1S#ocDlm#(HprWd>oH}rDGMn=jRy(m(D8SGJm0BosFVHfS@)BW}3YJx|mb78#h&DnbvjZXIThdi|(OH2UVS)VTLo#%=WI
zoQ?BXmy~#=NlxA1gPIKKT{x={w-P4!AlNuWz0x?lPydn3(emfqjV|PMY2)nQ{iGC-
z`tXt?LeZ7z{x$zZy2(fY@%M;l*kbmlOIR5QM#|(70v#vYD`Sy$$6b-#rLB&g!_}RY
z<)RUg34lL8@YR#q$mpJQ*TM6`9^O4bb|GecM
zm6%e4ySwl;Q8LO@p#R_pv~O|KS1t}fpdxS@?s;sFnhpO$z0YU>J!j*PciL#64{I7o
z>2hC3#L2mAUayDFSBMo?#P05buO~RxkwLt@)jlXKG(I%~*lz+Ixz;0TP){8a95Ym$
zI|s6x1N9Z(B=R)mMP5Dp*!tM6jPXleUjBiFMUri!SJZ##Pa5IQUJzdxer_Y^APZ7%
z*(r&9(t_NRHhCvQ_?WtAXc2A|482-dZDi<3)60Q`i)jm*`7T9p4?%;YSCxn7Q`4Lw
zRjGX;t)}jYA*?qC(l;h!IbxjmlvAv4sF&w-UTFoMfY{
z?Pu9g=e?D&9eYx^Tx(8C5!{g6$7GEdCG-%G2vKRTa#Gjt7@YJ16=|g{l!*4*lE7xb
z7jh=Kl}HC5BgzxbxE2C1W8{-bO>=~=-xa&^PUcbGdQEvbuK46dec2iCn0b^C8KrSC
z3ksxdQ<=m?B{DCxP%G6{7u?pQQold%lR=G>fyA8df>*J-E&&UhdXvdD6%6|k_-8d-
zUaIFX?trk=3I4)un;p8Cc{aHV~#D8ACp@jvX5D1Y`ayJu!+kH!~9n%kf|NHDr!
z0=5!v&em;MSwgJed@o7*$*d4{Cye%D3yGi`X0K5*TUuL2`ec*zO6^ju5eBe_>p!K=
zg!~lMHppEbisqGmF?&U9Ov0h>4>;bxnrFo390JxamhkjV=Rgs-k@FO)-*?z)oTTJ>
zhZ5pjX4%+y>I~|&wwCx{NUAas4CH#Y6Y#2|{H2!gXB+co7Id(M%lQc^3mUpA!+%Pe
z;7Qa5o9X*~TJXz^swqEd_9kEHdk0qLloVu63KuNU9gN{8JRchaUK`z4#%A6@@PPk4u}o_
z`WmyKjh?w^|4f|@P-Q@mnpm0&@6($KY#`$k(rew}3KffqJ)DeR
zvr%*Hod1HMH;wr}TQk0$62skmmhzQywpUhc)I7`eT7VAK@Quo)(3vgCq9u~>A3Xqy?YBICjsd;d&1g`kzqV#|=
z5nlD}#Bt0>ac6k}=r`Xn&eu%MVX83IK{uK$l3NW<%N^haYa@f;eskTi|Mrd4v;3Ud
z%(3j8q3t!d3x96oa2t0l4DsLpa8MSuQ{^;&gSdjW%DA?2_`=J#gHg;u7U7x^X6!S9
zVCZ!E{mC5W94ppp;K_eDasqw{(lwVSm)f_z$?n2YQyg|{MBBJk#wfMbLUh=_dBZZ?
zw7E9uP0K8d6roii5cmuOTb^W-?X6C}HJ}INTBZxY?u1t@-XV|n)##-c^r*%ts;N$Idn5X0bB8cOV3*UHB!(D@@|x*2`k1atV%oM{I8EA>kpA#ab#jvRvAj
zFf7*rl4hig4n}3UD2{6PDAPv#GM!^XQq?_sz-c}d_=XA+^)Ai3`7-dz;=e4SJE-DLm)Am-)>+
z@KblCb@M&@CFOf{_z5tFljDQ=J8#lyKa*S3p*3sf8OrwxH@YRG0)NHi%sKyhCpF~#
z{EFEaP!|{(UZn*>HfB+f-t~m>OGr@`7)j0Dv
zu?7(Ft^q*pcmVLVURLR>9dkGyKrHo3MkVj)bnDB$L!tppw7Y~4j#k2Y_>sWuddSzl
zjU~p})bz!>Wj?Ta^bzVa`$GU3QG~0YM~&T~QT=8f4D+!qBZWW`tSi
z?6%plz`_rE7<)2eBep;$dXa5PwoU3D*o?pUs#2bLpfY)&yhQmT%*+0HxyfNJDJ9Ug
zg;+>97Bz8S1e>@C=qTsVRiv#HF>ySasD!g^-Aq|iiVzqs4L=2>nJ(U^eQ`)9lorU2
z^+Y0iM7!sR`+>{^`v4sDL1-o~Y}_4yHOzqwWr<)*!LLWUN@Ad#-Fe9kAj3UgVor})
zb3q?JF|ED)9*|mU7F+}t>N_)XkJ_U)aBe=Wih9W}TQhCJCv!pQvP*(`p2BWT*b9AhJ$t|AVbIQ4IN+G=Wt^l^5ZbP}vgW{ZXnNzO+xCuVu!LX}`
z(~?Bmy;~y(X|9KO75?Bw!2l$~ZtLeRL-X(nm0^%~T)a1St9gE3!N2c3haaDvn=^l~
zlk%{w{{)81ElSZl_-#2{&WMv2o8rNH8n{Ez%9yQ_xoG_L+$?8ySF2B`%(Xs^^n$Vt
zl^9WNu<36SC~8IlHvtUhXrplXq*I-IKk1Ab$iUE2-={CMei)8MrUE!;ps4`wmmLvE
zfWm!^0Gwp)(J=TBkXjja9N>`&nOGUb&?lD@7sbtB8OtZE16Pqo{@6tQIQ!wsf_8+h!gHnBZprm%f%%*iZf5S
zPyyn%TvP{W1IY6tFc3$(Y0#ysEOst!#G3#$d>{?X*3=Cs#CSlz415o%oAE%zh_6y(H+nfd>E3zK8D;NlW(WR|P%PTgTd*st?mC#(&^O|RX3`f>#BS4~W0jvutj
zx$hCuVrl`ly2I{TS*mT|pH^^OpY!4GD0hFD3GnPSX?L9-Zl`qP7k=bo}&sk8IK!5OaL9-rkI2M@+(qooBH=3FBuQirHNUuAC
z`YeMnK($jdAS=QM@GKg-hHg)Boy(3V3XXKgU8|Za(lM`Snv1k{oZ~2IdR*52=lkc<
z<6jTIhISn8OTCM{D#RTLw-Uh|58wnC$FlR|>b|uja`FfeW*1!AW*;dSWwIOK^^#p5
z0Sb2x2v_u$e`Z0_MVFGNcsQ#zc)&56TJBAzHsb^af>F}tSDvJD5kUT{$b-{D;P4-8$X95j4<`1KOMue%A;)ZCx35P|fQTv)T*FkM_oUF1_vu7b
z>aq%JDTia_{;MacaH^LhzqH8-DH14ZqQaQQpbO9MhLf6w-PQl(rkn?=&58O!k?9+T
z_jK5J)vhzPAcqF@C&v-(^!D|{Dp;4+S{Q8&?}ZP$FfO;DzNFG^8q~ge4-a>7w$?o@
z_l%_VFjiLHZV<1x6+w@nN7AF{e#mHGK8;~E8L`1L&>+(+*@04#OuY!9m{J%t>S8bQ
zju{Co2I$&nY^4TAqz-8^g!)qeYb)-yDg!M3Z?gq}0B4M@Z!CcHK%59pc>b1pQb+V#
zFP8?I3Maa=>(y%8@mWfKx2*>|paDNd8sO0Y9g-GHF3=Qb@0GRGlkSzRu*ycc0^hDF
z0oGOarGG&L>Z0L7+`q@m4WR`{8_X3Co;obm}|gkb!I0;KWQc@$-^xDeF>$1
zo1WGL;y1b?zL7onZ7tUgBT$bK5^qmE1E2Or0>n2%|J#UW4PO;%*AXr|-O6g{dL#!GTPMLobaF5hrGAX22j#$MoxEalsrl`@dyxiRayU=b0!C2Y`w{z6
zypsDE&zkA*daXCnfF?)Y_`G7-8PD}e`FW>u#xs9wlu-&RUtWdQIhc6v@cUZcwbkZm
zSXf{Vb9CyWN(4x3B;d{ab(y2>DT3W%0g|7Js|g8!62FX-r~)j~>RU{$4}fMX5!TwZ
zo#+-3L9DbgCfM%Mh-qfQP$d@mL7NwlTS|VWdH>w3aQcQygb=GTuEy;;yIWzpcZQa@4%wfhCUn{s
zZdU^-9ikO(mS-NU*I2I2fP||mlH=PNS3P>$_*1i5o))bbTc(G75?h!G5Oan+FdSb?
z>D{Lre}eZ=pKe)f_0ChAW}-*bb5TX}d5MkYH3jB=M(^4W@L9zP!?AwWm57x0a>CL<
z(?MP>W8%s$5Rks~_mg;zPuKs%IV-k(n{3YqCHnkykFJ9_WJ9Q-xh+r9_Ve7=q2p7g$nqd6mp$y?m?qWb{pMDry#CGHJX)%t$
zl}^u7(=3WTx6cPmTd+SmRMaNv5TP3Dn>dQvg9Kl`0t(Zep4xmyYoMW6n@rW^D#g3)
zWo%839IYgCEsQF*f9nib|ENxsA2Vc?Lv*$VjT&l|xR`H^5r##@zvJgRhX{q~;)Wr3XU)DaL&xQO@(C5lCyZpO1c1aikPB-I39nBti6*bHk
z`LF|o=DI>d`=+0K*o+C@$%5Lap%Tigan@|w@T$J~xPUhSHmGL9sK;&ZJ!BwBSx~^2
z1y`^iky?QCIxMAMb}r;RN=ANZy#2g1&^q8F1JYfEdKBf#yWqq8C~!?TfXhnUobRjA
zPX2%;kd3lRYh;sESm$oS0EW`l*Z`tfS6Y>Q!h%T~;B*%c0mpLqO=m1@8h<<^g`>ry^r(NvCEbW5CO6L4ew7qYx?%bH;zxH23OiiHqSM
z*q(N2-ucc=G&rcrPONs1(;wv~Pp&J_ZeA7~0&!HS3QK~d>hal$AueGDUA`*9O|N%x
zv8j*NZhcJofrrNF>*IcIRVH~wv>%lm<}
zj5!DUjU`@-`H$hwR!)o6>tZqem^s8_5y{Doy0;7G4b489`#KejS7j`0?~Ker4BN)y
zyLaTOaA%53;%8zUXYNpGMjtXGHB+#FG5dmL?v6XTn}*n|y2cAoT5ZY(4M}Lz?*W
zBoQ^*B6BSk9?Ukv`r0-=--);pliw2=kMUL@&BxJ+J8O`5X9zvWsg;WUPF^h!FoDnIkpkKr
z+l=2Z#E+^H36%h;xq4)(xuIaQY;uNS^fmu2lfyps6jzg(0+|2o5YB_KAc0&(^bR=-
zM@tQVPbu7Sr~s%dXU-eipMTLagkP^;{>=EyPw+bXUbCJx=LArH`2;9+Vv-{BRc&w+
zv%U`kmBS>?Lq59+pfNn_sc|~h`qqDhtGMn%yvz&+&HKyWy|_>T2+S+?A4EXlF_Io-pY2jasD>Ew&MP>8-|}1)svs^56T#d
z#DO6%ueNgtJ}%!WICrj410JB6+aeX}HIZhAr
zr8&Qe!H|%fJG6GYPcMKUa(TJVWq7rk_JFVVdE$luJhm1jx`zkXX@djWktm4XxegX*J%>x+~UW@O_~mCJ&$f
z3cbDC4lcr-o3fJ37dF(G-Fed?=vh9uu!KLElS}?NSA-bYp%0SLBsl+yFaqnQtVzK@+4{fE&a7G0>I#vd7;P`*>u5~=czwUOg^S8
zq?r3YE0`j^nGZdOP}W0G-u#L9*%s~n3U@XO2WGoSQxg8Ly=Ce>!*YLMzh8rmIlq31
zav^nw5On`nHdus%TT8qtY&Ia@K5SXubR^(%!ejM7wjc4WPOn}bqNm?@tIkN7tnmXa
zA^EIF2!<6F`(C3wEHHHEv#Mz5GmT4nwdGR7!O)45`K5)JDYFI>-H`K5!(T(64M^Q$
zo{~6`e95)(cWuK<3Hu)4sJDh)Ek&@w7`*43CA~BhAj*#l8(Lt9Q>M0Z;`*K6n%uND
z2JqFtWkJ<>(r)_ZF|sYshiOQ_nhliBw~8AnyY#2vo{`35@4e6;?mjf95M|8kgUe;t
z-fD<_EZm{zhfZ0#PX546?k0~^WQ#sh6PK$82aL6Nr;SL;55UeQzB!-Tcix4%qi&S2
zUynIx&dU#tZhXGxK2#gjG9oE6%!-gDWJo+xx4UlSj!2&P>94<&zz~9Qs*3Cohyy#g
z=|tk%y@ErGNP)-ZG1pr*Aiyx?Jb4`RNDpAwaNLknlq{<<#%uYa&7hIh%YPIktI%M8
zl_BQ|!2ITRlLfqC@@t^+!%~LHl~`u*!;|x52gpvwVfsdtnvK$|%t75nx5eG1rLa?<
z+6=o3+kec)te#-3ZPx_LHZ|Kjq-=!#ge^bI@dNvvTjlVtd_s#K|D$ghvYm1xW}cF#
z?st5EhvB&iTX;$<@$KS)*mC~3VIV38!7feri5#C@Xqfk|47QA~OfxkX5;m!9QM?(m
zj&N8fxeOEmx*SEz`vaeWX{#L&E0{mzuED9cC$S>>f55&v63-ItZ)~t-UXLN3ji#mpRH_Al62
z;A?O305*jTnjcJ`KdteDw+%LfMx}>Ntv0`W;^U#;>g
zn;Y@R63_ro>rJAl^grfuVnvORbhWb`L+Q!o!PkGN|CYhI;#%iASb3e7q=q}Nwdn@o
zmNF|2N{McxjgEy>tZXmJo2&GQ)q1}~#O?F>6h+W~q&k|OZL)h;1Ty37_WdNRgdG~~
zVU@|EB!ZN$P|oI-F90P=+aB=!i;jqr6EVu)YnzXH68DAHCh2Ve(Yd+TK-^0AH#0(c3NzQ9|R+=nZSEGLk!;lSeq4!
z$l~96QQ&f-Zj;NrdG=vROx<$}GEvsmHl#g^-(zg_`C-LKJLI~8Ez3l#iG^df4$71S
zH#x-0ysK`|isTUN&lOVsZ4D`Xe`)iWmAL~Q7sC8UYE^e1yEFHoC(h8Ln4K)Qcz3`C
zn&EcQs;dQd2H*!>(7mCcSI@R8=`?N=CI
z=t3i8uRzJBn7d6ZsX9K#!1ew1LGuwuWovj5XDS0}@sC{B-ddE;}&f?rl19P%L<_v2IbyeCCO+s`PuLs3z$Yq|vk-s4
zPL#sUlq&~qy<8e(s7`VHK*Xy^X}aKOKhP|+TX)YFo?jL2tWP7c3GR6N!^`r{z?5eX
z0-nF#4)*zId3n*)U9F*)Z@BN0Q@LU&HEN=BXfEn!lX3jtD)FbVa$#&MY8s@hFobg!
zp=o9Vrps*n@#nn6YKZsE;XrC9!YOuVwoW9Ndf
zX~X?Ba-<;N_xkfWSGf0bx-rua?(}%ojAL1^5KL#rr3ozv%UU_Uq>D&;b{@3%$KlP@
zBYSs9$F;(=vuQ61=S`9%`V|5$Z{ND1_r-Ht=tp=sr(bcFs=paOQ~ap)tq~=*)gI$Z
zZWGVP-T?Io6;{y`>3*N6Kt##m1WWDHInK<(OOCj(2-f;cQ=esi4s*EdKsiumoCoiT
zWy0H=xB;;j&*6r-#?NlM>?*ktY;YB?F6kQ+)65iTDzT59r(N3Cn`*sryp{^ZRYL%@vk640e(=hiaj(Z9BHzJLmhqE_l)No|r3-
zJj-)xl5M5rb37epc`zJazt9~{9on0I*0{f9v(<^{?i2PYFya1M25F|xfiuL18?BbAyi0u(1$
zd_D<^4j#3XkdLmgtu*|uH_*nks*zj(xH9?hCa9G(QB^x9l^BSWCA{cu+>@*UuUu||
zT$uMc&5RjBN5Q&}(6A_4H8tY)kj2U58*@ANXFGXXI4Q1r_v^3e@%K2yyL;ynaNpDMu?tJs
zG8LSO{*<5B&&iSVdRNY@&hem|#Mhej_FP&kga1}^s>rTZfie`+S32>5V72rFs7h<_
zM#*-Y_%`>SPe*B2E-M*V2vC*Il~63o5L|$1?3FQSo%3d=%C5PP*NJ45%O-~hC~WGg
zA(F%EoVobAp+Ow{jcbZ4NwS~HDz|^-WP8wjxK_p7m(Z3at+g1SobhPwF|izWqpE?fNL4Q9V)_dash#_SQLam^jMuFm~-shO*{WE5?LhRP6qF
zb6US-zRNblgUIM8SO4bsF*eZfDn$}@jfJE~j|{stIv1$lxodA8V_cWRuyV7fnwTr^
z&P+T<2ZFEGsos{8Os7?)7r