From 9e737cbadcdc89c23b119701815275e7c209ff00 Mon Sep 17 00:00:00 2001 From: Alexandre Simard Date: Mon, 26 Sep 2022 17:18:57 -0400 Subject: [PATCH 001/460] Solve issue #962 Fix by @MrAcademy --- .gitignore | 3 ++- javascript/ui.js | 5 ++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index 9d78853af..fa1ab43e7 100644 --- a/.gitignore +++ b/.gitignore @@ -19,4 +19,5 @@ __pycache__ /webui-user.sh /interrogate /user.css -/.idea \ No newline at end of file +/.idea +/SwinIR diff --git a/javascript/ui.js b/javascript/ui.js index 076e9436c..7db4db48d 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -1,9 +1,8 @@ // various functions for interation with ui.py not large enough to warrant putting them in separate files function selected_gallery_index(){ - var gr = gradioApp() - var buttons = gradioApp().querySelectorAll(".gallery-item") - var button = gr.querySelector(".gallery-item.\\!ring-2") + var buttons = gradioApp().querySelectorAll('[style="display: block;"].tabitem .gallery-item') + var button = gradioApp().querySelector('[style="display: block;"].tabitem .gallery-item.\\!ring-2') var result = -1 buttons.forEach(function(v, i){ if(v==button) { result = i } }) From 03ee67bfd34b9e872b33eb05fef5db83410b16f3 Mon Sep 17 00:00:00 2001 From: WDevelopsWebApps <97454358+WDevelopsWebApps@users.noreply.github.com> Date: Wed, 28 Sep 2022 10:53:40 +0200 Subject: [PATCH 002/460] add advanced saving for save button --- modules/images.py | 5 ++++- modules/ui.py | 35 ++++++++++++++++++++++++++++------- 2 files changed, 32 insertions(+), 8 deletions(-) diff --git a/modules/images.py b/modules/images.py index 9458bf8d4..923f81dfb 100644 --- a/modules/images.py +++ b/modules/images.py @@ -290,7 +290,10 @@ def apply_filename_pattern(x, p, seed, prompt): x = x.replace("[cfg]", str(p.cfg_scale)) x = x.replace("[width]", str(p.width)) x = x.replace("[height]", str(p.height)) - x = x.replace("[styles]", sanitize_filename_part(", ".join(p.styles), replace_spaces=False)) + #currently disabled if using the save button, will work otherwise + # if enabled it will cause a bug because styles is not included in the save_files data dictionary + if hasattr(p, "styles"): + x = x.replace("[styles]", sanitize_filename_part(", ".join(p.styles), replace_spaces=False)) x = x.replace("[sampler]", sanitize_filename_part(sd_samplers.samplers[p.sampler_index].name, replace_spaces=False)) x = x.replace("[model_hash]", shared.sd_model.sd_model_hash) diff --git a/modules/ui.py b/modules/ui.py index 7db8edbd8..87a86a45d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -28,6 +28,7 @@ import modules.gfpgan_model import modules.codeformer_model import modules.styles import modules.generation_parameters_copypaste +from modules.images import apply_filename_pattern, get_next_sequence_number # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI mimetypes.init() @@ -90,13 +91,26 @@ def send_gradio_gallery_to_image(x): def save_files(js_data, images, index): - import csv - - os.makedirs(opts.outdir_save, exist_ok=True) - + import csv filenames = [] + #quick dictionary to class object conversion. Its neccesary due apply_filename_pattern requiring it + class MyObject: + def __init__(self, d=None): + if d is not None: + for key, value in d.items(): + setattr(self, key, value) + data = json.loads(js_data) + p = MyObject(data) + path = opts.outdir_save + save_to_dirs = opts.save_to_dirs + + if save_to_dirs: + dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, p.seed, p.prompt) + path = os.path.join(opts.outdir_save, dirname) + + os.makedirs(path, exist_ok=True) if index > -1 and opts.save_selected_only and (index > 0 or not opts.return_grid): # ensures we are looking at a specific non-grid picture, and we have save_selected_only images = [images[index]] @@ -107,11 +121,18 @@ def save_files(js_data, images, index): writer = csv.writer(file) if at_start: writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"]) + file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]" + if file_decoration != "": + file_decoration = "-" + file_decoration.lower() + file_decoration = apply_filename_pattern(file_decoration, p, p.seed, p.prompt) + truncated = (file_decoration[:240] + '..') if len(file_decoration) > 240 else file_decoration + filename_base = truncated - filename_base = str(int(time.time() * 1000)) + basecount = get_next_sequence_number(path, "") for i, filedata in enumerate(images): - filename = filename_base + ("" if len(images) == 1 else "-" + str(i + 1)) + ".png" - filepath = os.path.join(opts.outdir_save, filename) + file_number = f"{basecount+i:05}" + filename = file_number + filename_base + ".png" + filepath = os.path.join(path, filename) if filedata.startswith("data:image/png;base64,"): filedata = filedata[len("data:image/png;base64,"):] From c938679de7b87b4f14894d9f57fe0f40dd6e3c06 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Wed, 28 Sep 2022 22:14:13 -0300 Subject: [PATCH 003/460] Fix memory leak and reduce memory usage --- modules/codeformer_model.py | 6 ++++-- modules/devices.py | 3 ++- modules/extras.py | 2 ++ modules/gfpgan_model.py | 11 +++++------ modules/processing.py | 33 ++++++++++++++++++++++++++------- webui.py | 3 +++ 6 files changed, 42 insertions(+), 16 deletions(-) diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index 8fbdea249..2177291a7 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -89,7 +89,7 @@ def setup_codeformer(): output = self.net(cropped_face_t, w=w if w is not None else shared.opts.code_former_weight, adain=True)[0] restored_face = tensor2img(output, rgb2bgr=True, min_max=(-1, 1)) del output - torch.cuda.empty_cache() + devices.torch_gc() except Exception as error: print(f'\tFailed inference for CodeFormer: {error}', file=sys.stderr) restored_face = tensor2img(cropped_face_t, rgb2bgr=True, min_max=(-1, 1)) @@ -106,7 +106,9 @@ def setup_codeformer(): restored_img = cv2.resize(restored_img, (0, 0), fx=original_resolution[1]/restored_img.shape[1], fy=original_resolution[0]/restored_img.shape[0], interpolation=cv2.INTER_LINEAR) if shared.opts.face_restoration_unload: - self.net.to(devices.cpu) + self.net = None + self.face_helper = None + devices.torch_gc() return restored_img diff --git a/modules/devices.py b/modules/devices.py index 07bb23397..df63dd88e 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -1,4 +1,5 @@ import torch +import gc # has_mps is only available in nightly pytorch (for now), `getattr` for compatibility from modules import errors @@ -17,8 +18,8 @@ def get_optimal_device(): return cpu - def torch_gc(): + gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.ipc_collect() diff --git a/modules/extras.py b/modules/extras.py index 9a825530f..38b861675 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -98,6 +98,8 @@ def run_extras(extras_mode, image, image_folder, gfpgan_visibility, codeformer_v outputs.append(image) + devices.torch_gc() + return outputs, plaintext_to_html(info), '' diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index 44c5dc6ca..b1288f0ca 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -49,6 +49,7 @@ def gfpgan(): def gfpgan_fix_faces(np_image): + global loaded_gfpgan_model model = gfpgan() np_image_bgr = np_image[:, :, ::-1] @@ -56,7 +57,9 @@ def gfpgan_fix_faces(np_image): np_image = gfpgan_output_bgr[:, :, ::-1] if shared.opts.face_restoration_unload: - model.gfpgan.to(devices.cpu) + del model + loaded_gfpgan_model = None + devices.torch_gc() return np_image @@ -83,11 +86,7 @@ def setup_gfpgan(): return "GFPGAN" def restore(self, np_image): - np_image_bgr = np_image[:, :, ::-1] - cropped_faces, restored_faces, gfpgan_output_bgr = gfpgan().enhance(np_image_bgr, has_aligned=False, only_center_face=False, paste_back=True) - np_image = gfpgan_output_bgr[:, :, ::-1] - - return np_image + return gfpgan_fix_faces(np_image) shared.face_restorers.append(FaceRestorerGFPGAN()) except Exception: diff --git a/modules/processing.py b/modules/processing.py index 4ecdfcd2d..de5cda793 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -12,7 +12,7 @@ import cv2 from skimage import exposure import modules.sd_hijack -from modules import devices, prompt_parser, masking +from modules import devices, prompt_parser, masking, lowvram from modules.sd_hijack import model_hijack from modules.sd_samplers import samplers, samplers_for_img2img from modules.shared import opts, cmd_opts, state @@ -335,7 +335,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if state.job_count == -1: state.job_count = p.n_iter - for n in range(p.n_iter): + for n in range(p.n_iter): + with torch.no_grad(), precision_scope("cuda"), ema_scope(): if state.interrupted: break @@ -368,22 +369,32 @@ def process_images(p: StableDiffusionProcessing) -> Processed: x_samples_ddim = p.sd_model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) + del samples_ddim + + if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: + lowvram.send_everything_to_cpu() + + devices.torch_gc() + if opts.filter_nsfw: import modules.safety as safety x_samples_ddim = modules.safety.censor_batch(x_samples_ddim) - for i, x_sample in enumerate(x_samples_ddim): + for i, x_sample in enumerate(x_samples_ddim): + with torch.no_grad(), precision_scope("cuda"), ema_scope(): x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) - if p.restore_faces: + if p.restore_faces: + with torch.no_grad(), precision_scope("cuda"), ema_scope(): if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration: images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration") - devices.torch_gc() - x_sample = modules.face_restoration.restore_faces(x_sample) + devices.torch_gc() + + with torch.no_grad(), precision_scope("cuda"), ema_scope(): image = Image.fromarray(x_sample) if p.color_corrections is not None and i < len(p.color_corrections): @@ -411,8 +422,13 @@ def process_images(p: StableDiffusionProcessing) -> Processed: infotexts.append(infotext(n, i)) output_images.append(image) - state.nextjob() + del x_samples_ddim + devices.torch_gc() + + state.nextjob() + + with torch.no_grad(), precision_scope("cuda"), ema_scope(): p.color_corrections = None index_of_first_image = 0 @@ -648,4 +664,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): if self.mask is not None: samples = samples * self.nmask + self.init_latent * self.mask + del x + devices.torch_gc() + return samples diff --git a/webui.py b/webui.py index c70a11c7c..b61a318db 100644 --- a/webui.py +++ b/webui.py @@ -22,7 +22,10 @@ import modules.txt2img import modules.img2img import modules.swinir as swinir import modules.sd_models +from torch.nn.functional import silu +import ldm +ldm.modules.diffusionmodules.model.nonlinearity = silu modules.codeformer_model.setup_codeformer() modules.gfpgan_model.setup_gfpgan() From c2d5b29040132c171bc4d77f1f63da972306f22c Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Thu, 29 Sep 2022 01:14:54 -0300 Subject: [PATCH 004/460] Move silu to sd_hijack --- modules/sd_hijack.py | 12 +++--------- webui.py | 3 --- 2 files changed, 3 insertions(+), 12 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index bfbd07f9a..4bc58fa2b 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -12,6 +12,7 @@ from ldm.util import default from einops import rearrange import ldm.modules.attention import ldm.modules.diffusionmodules.model +from torch.nn.functional import silu # see https://github.com/basujindal/stable-diffusion/pull/117 for discussion @@ -100,14 +101,6 @@ def split_cross_attention_forward(self, x, context=None, mask=None): return self.to_out(r2) -def nonlinearity_hijack(x): - # swish - t = torch.sigmoid(x) - x *= t - del t - - return x - def cross_attention_attnblock_forward(self, x): h_ = x h_ = self.norm(h_) @@ -245,11 +238,12 @@ class StableDiffusionModelHijack: m.cond_stage_model = FrozenCLIPEmbedderWithCustomWords(m.cond_stage_model, self) self.clip = m.cond_stage_model + ldm.modules.diffusionmodules.model.nonlinearity = silu + if cmd_opts.opt_split_attention_v1: ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward_v1 elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()): ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward - ldm.modules.diffusionmodules.model.nonlinearity = nonlinearity_hijack ldm.modules.diffusionmodules.model.AttnBlock.forward = cross_attention_attnblock_forward def flatten(el): diff --git a/webui.py b/webui.py index b61a318db..c70a11c7c 100644 --- a/webui.py +++ b/webui.py @@ -22,10 +22,7 @@ import modules.txt2img import modules.img2img import modules.swinir as swinir import modules.sd_models -from torch.nn.functional import silu -import ldm -ldm.modules.diffusionmodules.model.nonlinearity = silu modules.codeformer_model.setup_codeformer() modules.gfpgan_model.setup_gfpgan() From e82ea202997cbcd2ab72891cd075d9ba270eb67d Mon Sep 17 00:00:00 2001 From: d8ahazard Date: Fri, 30 Sep 2022 15:26:18 -0500 Subject: [PATCH 005/460] Optimize model loader Child classes only get populated to __subclassess__ when they are imported. We don't actually need to import any of them to webui any more, so clean up webUI imports and make sure loader imports children. Also, fix command line paths not actually being passed to the scalers. --- modules/modelloader.py | 19 ++++++++++++++++--- webui.py | 13 +++---------- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/modules/modelloader.py b/modules/modelloader.py index 1106aeb7f..b1721671b 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -4,7 +4,6 @@ import importlib from urllib.parse import urlparse from basicsr.utils.download_util import load_file_from_url - from modules import shared from modules.upscaler import Upscaler from modules.paths import script_path, models_path @@ -120,16 +119,30 @@ def move_files(src_path: str, dest_path: str, ext_filter: str = None): def load_upscalers(): + sd = shared.script_path + # We can only do this 'magic' method to dynamically load upscalers if they are referenced, + # so we'll try to import any _model.py files before looking in __subclasses__ + modules_dir = os.path.join(sd, "modules") + for file in os.listdir(modules_dir): + if "_model.py" in file: + model_name = file.replace("_model.py", "") + full_model = f"modules.{model_name}_model" + try: + importlib.import_module(full_model) + except: + pass datas = [] + c_o = vars(shared.cmd_opts) for cls in Upscaler.__subclasses__(): name = cls.__name__ module_name = cls.__module__ module = importlib.import_module(module_name) class_ = getattr(module, name) - cmd_name = f"{name.lower().replace('upscaler', '')}-models-path" + cmd_name = f"{name.lower().replace('upscaler', '')}_models_path" opt_string = None try: - opt_string = shared.opts.__getattr__(cmd_name) + if cmd_name in c_o: + opt_string = c_o[cmd_name] except: pass scaler = class_(opt_string) diff --git a/webui.py b/webui.py index b8cccd546..ebe39a170 100644 --- a/webui.py +++ b/webui.py @@ -1,28 +1,21 @@ import os -import threading - -from modules import devices -from modules.paths import script_path import signal import threading -import modules.paths + import modules.codeformer_model as codeformer -import modules.esrgan_model as esrgan -import modules.bsrgan_model as bsrgan import modules.extras import modules.face_restoration import modules.gfpgan_model as gfpgan import modules.img2img -import modules.ldsr_model as ldsr import modules.lowvram -import modules.realesrgan_model as realesrgan +import modules.paths import modules.scripts import modules.sd_hijack import modules.sd_models import modules.shared as shared -import modules.swinir_model as swinir import modules.txt2img import modules.ui +from modules import devices from modules import modelloader from modules.paths import script_path from modules.shared import cmd_opts From 8deae077004f0332ca607fc3a5d568b1a4705bec Mon Sep 17 00:00:00 2001 From: d8ahazard Date: Fri, 30 Sep 2022 15:28:37 -0500 Subject: [PATCH 006/460] Add ScuNET DeNoiser/Upscaler Q&D Implementation of ScuNET, thanks to our handy model loader. :P https://github.com/cszn/SCUNet --- modules/scunet_model.py | 90 ++++++++++++ modules/scunet_model_arch.py | 265 +++++++++++++++++++++++++++++++++++ modules/shared.py | 1 + 3 files changed, 356 insertions(+) create mode 100644 modules/scunet_model.py create mode 100644 modules/scunet_model_arch.py diff --git a/modules/scunet_model.py b/modules/scunet_model.py new file mode 100644 index 000000000..7987ac145 --- /dev/null +++ b/modules/scunet_model.py @@ -0,0 +1,90 @@ +import os.path +import sys +import traceback + +import PIL.Image +import numpy as np +import torch +from basicsr.utils.download_util import load_file_from_url + +import modules.upscaler +from modules import shared, modelloader +from modules.paths import models_path +from modules.scunet_model_arch import SCUNet as net + + +class UpscalerScuNET(modules.upscaler.Upscaler): + def __init__(self, dirname): + self.name = "ScuNET" + self.model_path = os.path.join(models_path, self.name) + self.model_name = "ScuNET GAN" + self.model_name2 = "ScuNET PSNR" + self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_gan.pth" + self.model_url2 = "https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_psnr.pth" + self.user_path = dirname + super().__init__() + model_paths = self.find_models(ext_filter=[".pth"]) + scalers = [] + add_model2 = True + for file in model_paths: + if "http" in file: + name = self.model_name + else: + name = modelloader.friendly_name(file) + if name == self.model_name2 or file == self.model_url2: + add_model2 = False + try: + scaler_data = modules.upscaler.UpscalerData(name, file, self, 4) + scalers.append(scaler_data) + except Exception: + print(f"Error loading ScuNET model: {file}", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + if add_model2: + scaler_data2 = modules.upscaler.UpscalerData(self.model_name2, self.model_url2, self) + scalers.append(scaler_data2) + self.scalers = scalers + + def do_upscale(self, img: PIL.Image, selected_file): + torch.cuda.empty_cache() + + model = self.load_model(selected_file) + if model is None: + return img + + device = shared.device + img = np.array(img) + img = img[:, :, ::-1] + img = np.moveaxis(img, 2, 0) / 255 + img = torch.from_numpy(img).float() + img = img.unsqueeze(0).to(shared.device) + + img = img.to(device) + with torch.no_grad(): + output = model(img) + output = output.squeeze().float().cpu().clamp_(0, 1).numpy() + output = 255. * np.moveaxis(output, 0, 2) + output = output.astype(np.uint8) + output = output[:, :, ::-1] + torch.cuda.empty_cache() + return PIL.Image.fromarray(output, 'RGB') + + def load_model(self, path: str): + device = shared.device + if "http" in path: + filename = load_file_from_url(url=self.model_url, model_dir=self.model_path, file_name="%s.pth" % self.name, + progress=True) + else: + filename = path + if not os.path.exists(os.path.join(self.model_path, filename)) or filename is None: + print(f"ScuNET: Unable to load model from {filename}", file=sys.stderr) + return None + + model = net(in_nc=3, config=[4, 4, 4, 4, 4, 4, 4], dim=64) + model.load_state_dict(torch.load(filename), strict=True) + model.eval() + for k, v in model.named_parameters(): + v.requires_grad = False + model = model.to(device) + + return model + diff --git a/modules/scunet_model_arch.py b/modules/scunet_model_arch.py new file mode 100644 index 000000000..972a2639a --- /dev/null +++ b/modules/scunet_model_arch.py @@ -0,0 +1,265 @@ +# -*- coding: utf-8 -*- +import numpy as np +import torch +import torch.nn as nn +from einops import rearrange +from einops.layers.torch import Rearrange +from timm.models.layers import trunc_normal_, DropPath + + +class WMSA(nn.Module): + """ Self-attention module in Swin Transformer + """ + + def __init__(self, input_dim, output_dim, head_dim, window_size, type): + super(WMSA, self).__init__() + self.input_dim = input_dim + self.output_dim = output_dim + self.head_dim = head_dim + self.scale = self.head_dim ** -0.5 + self.n_heads = input_dim // head_dim + self.window_size = window_size + self.type = type + self.embedding_layer = nn.Linear(self.input_dim, 3 * self.input_dim, bias=True) + + self.relative_position_params = nn.Parameter( + torch.zeros((2 * window_size - 1) * (2 * window_size - 1), self.n_heads)) + + self.linear = nn.Linear(self.input_dim, self.output_dim) + + trunc_normal_(self.relative_position_params, std=.02) + self.relative_position_params = torch.nn.Parameter( + self.relative_position_params.view(2 * window_size - 1, 2 * window_size - 1, self.n_heads).transpose(1, + 2).transpose( + 0, 1)) + + def generate_mask(self, h, w, p, shift): + """ generating the mask of SW-MSA + Args: + shift: shift parameters in CyclicShift. + Returns: + attn_mask: should be (1 1 w p p), + """ + # supporting sqaure. + attn_mask = torch.zeros(h, w, p, p, p, p, dtype=torch.bool, device=self.relative_position_params.device) + if self.type == 'W': + return attn_mask + + s = p - shift + attn_mask[-1, :, :s, :, s:, :] = True + attn_mask[-1, :, s:, :, :s, :] = True + attn_mask[:, -1, :, :s, :, s:] = True + attn_mask[:, -1, :, s:, :, :s] = True + attn_mask = rearrange(attn_mask, 'w1 w2 p1 p2 p3 p4 -> 1 1 (w1 w2) (p1 p2) (p3 p4)') + return attn_mask + + def forward(self, x): + """ Forward pass of Window Multi-head Self-attention module. + Args: + x: input tensor with shape of [b h w c]; + attn_mask: attention mask, fill -inf where the value is True; + Returns: + output: tensor shape [b h w c] + """ + if self.type != 'W': x = torch.roll(x, shifts=(-(self.window_size // 2), -(self.window_size // 2)), dims=(1, 2)) + x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size) + h_windows = x.size(1) + w_windows = x.size(2) + # sqaure validation + # assert h_windows == w_windows + + x = rearrange(x, 'b w1 w2 p1 p2 c -> b (w1 w2) (p1 p2) c', p1=self.window_size, p2=self.window_size) + qkv = self.embedding_layer(x) + q, k, v = rearrange(qkv, 'b nw np (threeh c) -> threeh b nw np c', c=self.head_dim).chunk(3, dim=0) + sim = torch.einsum('hbwpc,hbwqc->hbwpq', q, k) * self.scale + # Adding learnable relative embedding + sim = sim + rearrange(self.relative_embedding(), 'h p q -> h 1 1 p q') + # Using Attn Mask to distinguish different subwindows. + if self.type != 'W': + attn_mask = self.generate_mask(h_windows, w_windows, self.window_size, shift=self.window_size // 2) + sim = sim.masked_fill_(attn_mask, float("-inf")) + + probs = nn.functional.softmax(sim, dim=-1) + output = torch.einsum('hbwij,hbwjc->hbwic', probs, v) + output = rearrange(output, 'h b w p c -> b w p (h c)') + output = self.linear(output) + output = rearrange(output, 'b (w1 w2) (p1 p2) c -> b (w1 p1) (w2 p2) c', w1=h_windows, p1=self.window_size) + + if self.type != 'W': output = torch.roll(output, shifts=(self.window_size // 2, self.window_size // 2), + dims=(1, 2)) + return output + + def relative_embedding(self): + cord = torch.tensor(np.array([[i, j] for i in range(self.window_size) for j in range(self.window_size)])) + relation = cord[:, None, :] - cord[None, :, :] + self.window_size - 1 + # negative is allowed + return self.relative_position_params[:, relation[:, :, 0].long(), relation[:, :, 1].long()] + + +class Block(nn.Module): + def __init__(self, input_dim, output_dim, head_dim, window_size, drop_path, type='W', input_resolution=None): + """ SwinTransformer Block + """ + super(Block, self).__init__() + self.input_dim = input_dim + self.output_dim = output_dim + assert type in ['W', 'SW'] + self.type = type + if input_resolution <= window_size: + self.type = 'W' + + self.ln1 = nn.LayerNorm(input_dim) + self.msa = WMSA(input_dim, input_dim, head_dim, window_size, self.type) + self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity() + self.ln2 = nn.LayerNorm(input_dim) + self.mlp = nn.Sequential( + nn.Linear(input_dim, 4 * input_dim), + nn.GELU(), + nn.Linear(4 * input_dim, output_dim), + ) + + def forward(self, x): + x = x + self.drop_path(self.msa(self.ln1(x))) + x = x + self.drop_path(self.mlp(self.ln2(x))) + return x + + +class ConvTransBlock(nn.Module): + def __init__(self, conv_dim, trans_dim, head_dim, window_size, drop_path, type='W', input_resolution=None): + """ SwinTransformer and Conv Block + """ + super(ConvTransBlock, self).__init__() + self.conv_dim = conv_dim + self.trans_dim = trans_dim + self.head_dim = head_dim + self.window_size = window_size + self.drop_path = drop_path + self.type = type + self.input_resolution = input_resolution + + assert self.type in ['W', 'SW'] + if self.input_resolution <= self.window_size: + self.type = 'W' + + self.trans_block = Block(self.trans_dim, self.trans_dim, self.head_dim, self.window_size, self.drop_path, + self.type, self.input_resolution) + self.conv1_1 = nn.Conv2d(self.conv_dim + self.trans_dim, self.conv_dim + self.trans_dim, 1, 1, 0, bias=True) + self.conv1_2 = nn.Conv2d(self.conv_dim + self.trans_dim, self.conv_dim + self.trans_dim, 1, 1, 0, bias=True) + + self.conv_block = nn.Sequential( + nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False), + nn.ReLU(True), + nn.Conv2d(self.conv_dim, self.conv_dim, 3, 1, 1, bias=False) + ) + + def forward(self, x): + conv_x, trans_x = torch.split(self.conv1_1(x), (self.conv_dim, self.trans_dim), dim=1) + conv_x = self.conv_block(conv_x) + conv_x + trans_x = Rearrange('b c h w -> b h w c')(trans_x) + trans_x = self.trans_block(trans_x) + trans_x = Rearrange('b h w c -> b c h w')(trans_x) + res = self.conv1_2(torch.cat((conv_x, trans_x), dim=1)) + x = x + res + + return x + + +class SCUNet(nn.Module): + # def __init__(self, in_nc=3, config=[2, 2, 2, 2, 2, 2, 2], dim=64, drop_path_rate=0.0, input_resolution=256): + def __init__(self, in_nc=3, config=None, dim=64, drop_path_rate=0.0, input_resolution=256): + super(SCUNet, self).__init__() + if config is None: + config = [2, 2, 2, 2, 2, 2, 2] + self.config = config + self.dim = dim + self.head_dim = 32 + self.window_size = 8 + + # drop path rate for each layer + dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(config))] + + self.m_head = [nn.Conv2d(in_nc, dim, 3, 1, 1, bias=False)] + + begin = 0 + self.m_down1 = [ConvTransBlock(dim // 2, dim // 2, self.head_dim, self.window_size, dpr[i + begin], + 'W' if not i % 2 else 'SW', input_resolution) + for i in range(config[0])] + \ + [nn.Conv2d(dim, 2 * dim, 2, 2, 0, bias=False)] + + begin += config[0] + self.m_down2 = [ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i + begin], + 'W' if not i % 2 else 'SW', input_resolution // 2) + for i in range(config[1])] + \ + [nn.Conv2d(2 * dim, 4 * dim, 2, 2, 0, bias=False)] + + begin += config[1] + self.m_down3 = [ConvTransBlock(2 * dim, 2 * dim, self.head_dim, self.window_size, dpr[i + begin], + 'W' if not i % 2 else 'SW', input_resolution // 4) + for i in range(config[2])] + \ + [nn.Conv2d(4 * dim, 8 * dim, 2, 2, 0, bias=False)] + + begin += config[2] + self.m_body = [ConvTransBlock(4 * dim, 4 * dim, self.head_dim, self.window_size, dpr[i + begin], + 'W' if not i % 2 else 'SW', input_resolution // 8) + for i in range(config[3])] + + begin += config[3] + self.m_up3 = [nn.ConvTranspose2d(8 * dim, 4 * dim, 2, 2, 0, bias=False), ] + \ + [ConvTransBlock(2 * dim, 2 * dim, self.head_dim, self.window_size, dpr[i + begin], + 'W' if not i % 2 else 'SW', input_resolution // 4) + for i in range(config[4])] + + begin += config[4] + self.m_up2 = [nn.ConvTranspose2d(4 * dim, 2 * dim, 2, 2, 0, bias=False), ] + \ + [ConvTransBlock(dim, dim, self.head_dim, self.window_size, dpr[i + begin], + 'W' if not i % 2 else 'SW', input_resolution // 2) + for i in range(config[5])] + + begin += config[5] + self.m_up1 = [nn.ConvTranspose2d(2 * dim, dim, 2, 2, 0, bias=False), ] + \ + [ConvTransBlock(dim // 2, dim // 2, self.head_dim, self.window_size, dpr[i + begin], + 'W' if not i % 2 else 'SW', input_resolution) + for i in range(config[6])] + + self.m_tail = [nn.Conv2d(dim, in_nc, 3, 1, 1, bias=False)] + + self.m_head = nn.Sequential(*self.m_head) + self.m_down1 = nn.Sequential(*self.m_down1) + self.m_down2 = nn.Sequential(*self.m_down2) + self.m_down3 = nn.Sequential(*self.m_down3) + self.m_body = nn.Sequential(*self.m_body) + self.m_up3 = nn.Sequential(*self.m_up3) + self.m_up2 = nn.Sequential(*self.m_up2) + self.m_up1 = nn.Sequential(*self.m_up1) + self.m_tail = nn.Sequential(*self.m_tail) + # self.apply(self._init_weights) + + def forward(self, x0): + + h, w = x0.size()[-2:] + paddingBottom = int(np.ceil(h / 64) * 64 - h) + paddingRight = int(np.ceil(w / 64) * 64 - w) + x0 = nn.ReplicationPad2d((0, paddingRight, 0, paddingBottom))(x0) + + x1 = self.m_head(x0) + x2 = self.m_down1(x1) + x3 = self.m_down2(x2) + x4 = self.m_down3(x3) + x = self.m_body(x4) + x = self.m_up3(x + x4) + x = self.m_up2(x + x3) + x = self.m_up1(x + x2) + x = self.m_tail(x + x1) + + x = x[..., :h, :w] + + return x + + def _init_weights(self, m): + if isinstance(m, nn.Linear): + trunc_normal_(m.weight, std=.02) + if m.bias is not None: + nn.init.constant_(m.bias, 0) + elif isinstance(m, nn.LayerNorm): + nn.init.constant_(m.bias, 0) + nn.init.constant_(m.weight, 1.0) \ No newline at end of file diff --git a/modules/shared.py b/modules/shared.py index 8428c7a38..a48b995ad 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -40,6 +40,7 @@ parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory wi parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(model_path, 'ESRGAN')) parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(model_path, 'BSRGAN')) parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(model_path, 'RealESRGAN')) +parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(model_path, 'ScuNET')) parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(model_path, 'SwinIR')) parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(model_path, 'LDSR')) parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.") From abdbf1de646f007b6d76cfb3f416fdfaadb57903 Mon Sep 17 00:00:00 2001 From: Liam Date: Thu, 29 Sep 2022 14:40:47 -0400 Subject: [PATCH 007/460] token counters now update when roll artist and style buttons are pressed https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/1194#issuecomment-1261203893 --- javascript/ui.js | 28 ++++++++++++++++++++++------ modules/ui.py | 6 +++++- 2 files changed, 27 insertions(+), 7 deletions(-) diff --git a/javascript/ui.js b/javascript/ui.js index bfe024108..88fd45ae9 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -199,12 +199,21 @@ let txt2img_textarea, img2img_textarea = undefined; let wait_time = 800 let token_timeout; -function submit_prompt(event, generate_button_id) { - if (event.altKey && event.keyCode === 13) { - event.preventDefault(); - gradioApp().getElementById(generate_button_id).click(); - return; - } +function roll_artist_txt2img(prompt_text) { + update_token_counter("txt2img_token_button") + return prompt_text; +} +function roll_artist_img2img(prompt_text) { + update_token_counter("img2img_token_button") + return prompt_text; +} +function update_style_txt2img(prompt_text, negative_prompt, style1, style2) { + update_token_counter("txt2img_token_button") + return [prompt_text, negative_prompt, style1, style2] +} +function update_style_img2img(prompt_text, negative_prompt, style1, style2) { + update_token_counter("img2img_token_button") + return [prompt_text, negative_prompt, style1, style2] } function update_token_counter(button_id) { @@ -212,3 +221,10 @@ function update_token_counter(button_id) { clearTimeout(token_timeout); token_timeout = setTimeout(() => gradioApp().getElementById(button_id)?.click(), wait_time); } +function submit_prompt(event, generate_button_id) { + if (event.altKey && event.keyCode === 13) { + event.preventDefault(); + gradioApp().getElementById(generate_button_id).click(); + return; + } +} \ No newline at end of file diff --git a/modules/ui.py b/modules/ui.py index 15572bb0a..5eea18606 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -539,6 +539,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): roll.click( fn=roll_artist, + _js="roll_artist_txt2img", inputs=[ txt2img_prompt, ], @@ -743,6 +744,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): roll.click( fn=roll_artist, + _js="roll_artist_img2img", inputs=[ img2img_prompt, ], @@ -753,6 +755,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)] + style_js_funcs = ["update_style_txt2img", "update_style_img2img"] for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): button.click( @@ -764,9 +767,10 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): outputs=[txt2img_prompt_style, img2img_prompt_style, txt2img_prompt_style2, img2img_prompt_style2], ) - for button, (prompt, negative_prompt), (style1, style2) in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns): + for button, (prompt, negative_prompt), (style1, style2), js_func in zip([txt2img_prompt_style_apply, img2img_prompt_style_apply], prompts, style_dropdowns, style_js_funcs): button.click( fn=apply_styles, + _js=js_func, inputs=[prompt, negative_prompt, style1, style2], outputs=[prompt, negative_prompt, style1, style2], ) From ff8dc1908af088d0ed43fb85baad662733c5ca9c Mon Sep 17 00:00:00 2001 From: Liam Date: Thu, 29 Sep 2022 15:47:06 -0400 Subject: [PATCH 008/460] fixed token counter for prompt editing --- modules/ui.py | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 5eea18606..6bf28562c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -11,6 +11,7 @@ import time import traceback import platform import subprocess as sp +from functools import reduce import numpy as np import torch @@ -32,6 +33,7 @@ import modules.gfpgan_model import modules.codeformer_model import modules.styles import modules.generation_parameters_copypaste +from modules.prompt_parser import get_learned_conditioning_prompt_schedules # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI mimetypes.init() @@ -345,8 +347,11 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: outputs=[seed, dummy_component] ) -def update_token_counter(text): - tokens, token_count, max_length = model_hijack.tokenize(text) +def update_token_counter(text, steps): + prompt_schedules = get_learned_conditioning_prompt_schedules([text], steps) + flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) + prompts = [prompt_text for step,prompt_text in flat_prompts] + tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1]) style_class = ' class="red"' if (token_count > max_length) else "" return f"{token_count}/{max_length}" @@ -364,8 +369,7 @@ def create_toprow(is_img2img): roll = gr.Button(value=art_symbol, elem_id="roll", visible=len(shared.artist_db.artists) > 0) paste = gr.Button(value=paste_symbol, elem_id="paste") token_counter = gr.HTML(value="", elem_id=f"{id_part}_token_counter") - hidden_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") - hidden_button.click(fn=update_token_counter, inputs=[prompt], outputs=[token_counter]) + token_button = gr.Button(visible=False, elem_id=f"{id_part}_token_button") with gr.Column(scale=10, elem_id="style_pos_col"): prompt_style = gr.Dropdown(label="Style 1", elem_id=f"{id_part}_style_index", choices=[k for k, v in shared.prompt_styles.styles.items()], value=next(iter(shared.prompt_styles.styles.keys())), visible=len(shared.prompt_styles.styles) > 1) @@ -396,7 +400,7 @@ def create_toprow(is_img2img): prompt_style_apply = gr.Button('Apply style', elem_id="style_apply") save_style = gr.Button('Create style', elem_id="style_create") - return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste + return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste, token_counter, token_button def setup_progressbar(progressbar, preview, id_part): @@ -419,7 +423,7 @@ def setup_progressbar(progressbar, preview, id_part): def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste = create_toprow(is_img2img=False) + txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) with gr.Row(elem_id='txt2img_progress_row'): @@ -568,9 +572,10 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): (hr_options, lambda d: gr.Row.update(visible="Denoising strength" in d)), ] modules.generation_parameters_copypaste.connect_paste(paste, txt2img_paste_fields, txt2img_prompt) + token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter]) with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste = create_toprow(is_img2img=True) + img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True) with gr.Row(elem_id='img2img_progress_row'): with gr.Column(scale=1): @@ -793,6 +798,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): (denoising_strength, "Denoising strength"), ] modules.generation_parameters_copypaste.connect_paste(paste, img2img_paste_fields, img2img_prompt) + token_button.click(fn=update_token_counter, inputs=[img2img_prompt, steps], outputs=[token_counter]) with gr.Blocks(analytics_enabled=False) as extras_interface: with gr.Row().style(equal_height=False): From 3c6a049fc3c6b54ada3736710a7e86663ea7f3d9 Mon Sep 17 00:00:00 2001 From: Liam Date: Fri, 30 Sep 2022 12:12:44 -0400 Subject: [PATCH 009/460] consolidated token counter functions --- javascript/ui.js | 21 +++++++++------------ modules/ui.py | 6 +++--- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/javascript/ui.js b/javascript/ui.js index 88fd45ae9..f94ed081d 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -199,21 +199,18 @@ let txt2img_textarea, img2img_textarea = undefined; let wait_time = 800 let token_timeout; -function roll_artist_txt2img(prompt_text) { +function update_txt2img_tokens(...args) { update_token_counter("txt2img_token_button") - return prompt_text; + if (args.length == 2) + return args[0] + return args; } -function roll_artist_img2img(prompt_text) { + +function update_img2img_tokens(...args) { update_token_counter("img2img_token_button") - return prompt_text; -} -function update_style_txt2img(prompt_text, negative_prompt, style1, style2) { - update_token_counter("txt2img_token_button") - return [prompt_text, negative_prompt, style1, style2] -} -function update_style_img2img(prompt_text, negative_prompt, style1, style2) { - update_token_counter("img2img_token_button") - return [prompt_text, negative_prompt, style1, style2] + if (args.length == 2) + return args[0] + return args; } function update_token_counter(button_id) { diff --git a/modules/ui.py b/modules/ui.py index 6bf28562c..40c089841 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -543,7 +543,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): roll.click( fn=roll_artist, - _js="roll_artist_txt2img", + _js="update_txt2img_tokens", inputs=[ txt2img_prompt, ], @@ -749,7 +749,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): roll.click( fn=roll_artist, - _js="roll_artist_img2img", + _js="update_img2img_tokens", inputs=[ img2img_prompt, ], @@ -760,7 +760,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): prompts = [(txt2img_prompt, txt2img_negative_prompt), (img2img_prompt, img2img_negative_prompt)] style_dropdowns = [(txt2img_prompt_style, txt2img_prompt_style2), (img2img_prompt_style, img2img_prompt_style2)] - style_js_funcs = ["update_style_txt2img", "update_style_img2img"] + style_js_funcs = ["update_txt2img_tokens", "update_img2img_tokens"] for button, (prompt, negative_prompt) in zip([txt2img_save_style, img2img_save_style], prompts): button.click( From bdaa36c84470adbdce3e98c01a69af5e95adfb02 Mon Sep 17 00:00:00 2001 From: brkirch Date: Fri, 30 Sep 2022 23:53:25 -0400 Subject: [PATCH 010/460] When device is MPS, use CPU for GFPGAN instead GFPGAN will not work if the device is MPS, so default to CPU instead. --- modules/devices.py | 2 +- modules/gfpgan_model.py | 6 +++--- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/devices.py b/modules/devices.py index 07bb23397..08bb26d6f 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -34,7 +34,7 @@ errors.run(enable_tf32, "Enabling TF32") device = get_optimal_device() -device_codeformer = cpu if has_mps else device +device_gfpgan = device_codeformer = cpu if device.type == 'mps' else device def randn(seed, shape): diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index bb30d7330..fcd8544a5 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -21,7 +21,7 @@ def gfpgann(): global loaded_gfpgan_model global model_path if loaded_gfpgan_model is not None: - loaded_gfpgan_model.gfpgan.to(shared.device) + loaded_gfpgan_model.gfpgan.to(devices.device_gfpgan) return loaded_gfpgan_model if gfpgan_constructor is None: @@ -36,8 +36,8 @@ def gfpgann(): else: print("Unable to load gfpgan model!") return None - model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None) - model.gfpgan.to(shared.device) + model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None, device=devices.device_gfpgan) + model.gfpgan.to(devices.device_gfpgan) loaded_gfpgan_model = model return model From 4c2478a68a4f11959fe4887d38e0436eac19f97e Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 1 Oct 2022 18:30:53 +0100 Subject: [PATCH 011/460] add script reload method --- modules/scripts.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/modules/scripts.py b/modules/scripts.py index 7c3bd5e74..3c14b9e32 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -165,3 +165,12 @@ class ScriptRunner: scripts_txt2img = ScriptRunner() scripts_img2img = ScriptRunner() + +def reload_scripts(basedir): + global scripts_txt2img,scripts_img2img + + scripts_data.clear() + load_scripts(basedir) + + scripts_txt2img = ScriptRunner() + scripts_img2img = ScriptRunner() From 95f35d04ab1636e08f69ca9c0ae2446714870e80 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 1 Oct 2022 18:31:58 +0100 Subject: [PATCH 012/460] Host busy thread, check for reload --- webui.py | 46 +++++++++++++++++++++++++++++++--------------- 1 file changed, 31 insertions(+), 15 deletions(-) diff --git a/webui.py b/webui.py index b8cccd546..4948c394f 100644 --- a/webui.py +++ b/webui.py @@ -86,22 +86,38 @@ def webui(): signal.signal(signal.SIGINT, sigint_handler) - demo = modules.ui.create_ui( - txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img), - img2img=wrap_gradio_gpu_call(modules.img2img.img2img), - run_extras=wrap_gradio_gpu_call(modules.extras.run_extras), - run_pnginfo=modules.extras.run_pnginfo, - run_modelmerger=modules.extras.run_modelmerger - ) + while 1: - demo.launch( - share=cmd_opts.share, - server_name="0.0.0.0" if cmd_opts.listen else None, - server_port=cmd_opts.port, - debug=cmd_opts.gradio_debug, - auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None, - inbrowser=cmd_opts.autolaunch, - ) + demo = modules.ui.create_ui( + txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img), + img2img=wrap_gradio_gpu_call(modules.img2img.img2img), + run_extras=wrap_gradio_gpu_call(modules.extras.run_extras), + run_pnginfo=modules.extras.run_pnginfo, + run_modelmerger=modules.extras.run_modelmerger + ) + + + demo.launch( + share=cmd_opts.share, + server_name="0.0.0.0" if cmd_opts.listen else None, + server_port=cmd_opts.port, + debug=cmd_opts.gradio_debug, + auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None, + inbrowser=cmd_opts.autolaunch, + prevent_thread_lock=True + ) + + while 1: + time.sleep(0.5) + if getattr(demo,'do_restart',False): + time.sleep(0.5) + demo.close() + time.sleep(0.5) + break + + print('Reloading Scripts') + modules.scripts.reload_scripts(os.path.join(script_path, "scripts")) + print('Restarting Gradio') if __name__ == "__main__": From 4f8490cd5630823ac44de8b5c5e4325bdbbea7fa Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 1 Oct 2022 18:33:31 +0100 Subject: [PATCH 013/460] add restart button --- modules/ui.py | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index 15572bb0a..ec6aaa288 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1002,6 +1002,17 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): _js='function(){}' ) + def request_restart(): + settings_interface.gradio_ref.do_restart = True + + restart_gradio = gr.Button(value='Restart Gradio and Refresh Scripts') + restart_gradio.click( + fn=request_restart, + inputs=[], + outputs=[], + _js='function(){document.body.innerHTML=\'

Reloading

\';setTimeout(function(){location.reload()},2000)}' + ) + if column is not None: column.__exit__() @@ -1026,7 +1037,9 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): css += css_hide_progressbar with gr.Blocks(css=css, analytics_enabled=False, title="Stable Diffusion") as demo: - + + settings_interface.gradio_ref = demo + with gr.Tabs() as tabs: for interface, label, ifid in interfaces: with gr.TabItem(label, id=ifid): From 121ed7d36febe94995774973b5edc1ba2ba84aad Mon Sep 17 00:00:00 2001 From: Alexandre Simard Date: Sat, 1 Oct 2022 14:04:20 -0400 Subject: [PATCH 014/460] Add progress bar for SwinIR in cmd I do not know how to add them to the UI... --- modules/swinir_model.py | 25 ++++++++++++++----------- webui-user.bat | 2 +- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/modules/swinir_model.py b/modules/swinir_model.py index 41fda5a7c..9bd454c69 100644 --- a/modules/swinir_model.py +++ b/modules/swinir_model.py @@ -5,6 +5,7 @@ import numpy as np import torch from PIL import Image from basicsr.utils.download_util import load_file_from_url +from tqdm import tqdm from modules import modelloader from modules.paths import models_path @@ -122,18 +123,20 @@ def inference(img, model, tile, tile_overlap, window_size, scale): E = torch.zeros(b, c, h * sf, w * sf, dtype=torch.half, device=device).type_as(img) W = torch.zeros_like(E, dtype=torch.half, device=device) - for h_idx in h_idx_list: - for w_idx in w_idx_list: - in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile] - out_patch = model(in_patch) - out_patch_mask = torch.ones_like(out_patch) + with tqdm(total=len(h_idx_list) * len(w_idx_list), desc="SwinIR tiles") as pbar: + for h_idx in h_idx_list: + for w_idx in w_idx_list: + in_patch = img[..., h_idx: h_idx + tile, w_idx: w_idx + tile] + out_patch = model(in_patch) + out_patch_mask = torch.ones_like(out_patch) - E[ - ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf - ].add_(out_patch) - W[ - ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf - ].add_(out_patch_mask) + E[ + ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf + ].add_(out_patch) + W[ + ..., h_idx * sf: (h_idx + tile) * sf, w_idx * sf: (w_idx + tile) * sf + ].add_(out_patch_mask) + pbar.update(1) output = E.div_(W) return output diff --git a/webui-user.bat b/webui-user.bat index e5a257bef..5c7789535 100644 --- a/webui-user.bat +++ b/webui-user.bat @@ -3,6 +3,6 @@ set PYTHON= set GIT= set VENV_DIR= -set COMMANDLINE_ARGS= +set COMMANDLINE_ARGS=--autolaunch call webui.bat From b8a2b0453b62e4e99d0e5c049313402bc79056b5 Mon Sep 17 00:00:00 2001 From: Alexandre Simard Date: Sat, 1 Oct 2022 14:07:20 -0400 Subject: [PATCH 015/460] Set launch options to default --- webui-user.bat | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/webui-user.bat b/webui-user.bat index 5c7789535..e5a257bef 100644 --- a/webui-user.bat +++ b/webui-user.bat @@ -3,6 +3,6 @@ set PYTHON= set GIT= set VENV_DIR= -set COMMANDLINE_ARGS=--autolaunch +set COMMANDLINE_ARGS= call webui.bat From a9044475c06204deb886d2a69467d0d3a9f5c9be Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 1 Oct 2022 21:47:42 +0100 Subject: [PATCH 016/460] add time import --- webui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/webui.py b/webui.py index 4948c394f..e2c4c2baa 100644 --- a/webui.py +++ b/webui.py @@ -1,5 +1,6 @@ import os import threading +import time from modules import devices from modules.paths import script_path From afaa03c5fd05f48ed9c9f15558ea6f0bc4f61628 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 1 Oct 2022 22:43:45 +0100 Subject: [PATCH 017/460] add redefinition guard to gradio_routes_templates_response --- modules/ui.py | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index ec6aaa288..fd057916e 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1219,12 +1219,13 @@ for filename in sorted(os.listdir(jsdir)): javascript += f"\n" -def template_response(*args, **kwargs): - res = gradio_routes_templates_response(*args, **kwargs) - res.body = res.body.replace(b'', f'{javascript}'.encode("utf8")) - res.init_headers() - return res +if 'gradio_routes_templates_response' not in globals(): + def template_response(*args, **kwargs): + res = gradio_routes_templates_response(*args, **kwargs) + res.body = res.body.replace(b'', f'{javascript}'.encode("utf8")) + res.init_headers() + return res + gradio_routes_templates_response = gradio.routes.templates.TemplateResponse + gradio.routes.templates.TemplateResponse = template_response -gradio_routes_templates_response = gradio.routes.templates.TemplateResponse -gradio.routes.templates.TemplateResponse = template_response From 30f2e3565840544dd66470c6ef216ec664db6432 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 1 Oct 2022 22:50:03 +0100 Subject: [PATCH 018/460] add importlib.reload --- webui.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/webui.py b/webui.py index e2c4c2baa..ab200045a 100644 --- a/webui.py +++ b/webui.py @@ -1,7 +1,7 @@ import os import threading import time - +import importlib from modules import devices from modules.paths import script_path import signal @@ -116,8 +116,10 @@ def webui(): time.sleep(0.5) break - print('Reloading Scripts') + print('Reloading Custom Scripts') modules.scripts.reload_scripts(os.path.join(script_path, "scripts")) + print('Reloading modules: modules.ui') + importlib.reload(modules.ui) print('Restarting Gradio') From 6048002dade91b82b1ce9fea3c6ff5b5c1f8c990 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 1 Oct 2022 23:10:07 +0100 Subject: [PATCH 019/460] Add scope warning to refresh button --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index fd057916e..72846a122 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1005,7 +1005,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): def request_restart(): settings_interface.gradio_ref.do_restart = True - restart_gradio = gr.Button(value='Restart Gradio and Refresh Scripts') + restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary') restart_gradio.click( fn=request_restart, inputs=[], From 027c5aae5546ff3650347cb3c2b87df4415ab900 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 1 Oct 2022 23:29:26 +0100 Subject: [PATCH 020/460] update reloading message style --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index 72846a122..7b2359c20 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1010,7 +1010,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): fn=request_restart, inputs=[], outputs=[], - _js='function(){document.body.innerHTML=\'

Reloading

\';setTimeout(function(){location.reload()},2000)}' + _js='function(){document.body.innerHTML=\'

Reloading...

\';setTimeout(function(){location.reload()},2000)}' ) if column is not None: From 55b046312c51bb7b2329d3b5b7f1c05956f821bf Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 2 Oct 2022 00:12:49 +0100 Subject: [PATCH 021/460] move JavaScript into ui.js --- javascript/ui.js | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/javascript/ui.js b/javascript/ui.js index bfe024108..e8f289b44 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -212,3 +212,8 @@ function update_token_counter(button_id) { clearTimeout(token_timeout); token_timeout = setTimeout(() => gradioApp().getElementById(button_id)?.click(), wait_time); } + +function restart_reload(){ + document.body.innerHTML='

Reloading...

'; + setTimeout(function(){location.reload()},2000) +} From 0aa354bd5e811e2b41b17a3052cf5d4c8190d533 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 2 Oct 2022 00:13:47 +0100 Subject: [PATCH 022/460] remove styling from python side --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index 7b2359c20..cb859ac45 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1010,7 +1010,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): fn=request_restart, inputs=[], outputs=[], - _js='function(){document.body.innerHTML=\'

Reloading...

\';setTimeout(function(){location.reload()},2000)}' + _js='function(){restart_reload()}' ) if column is not None: From cf33268d686986a24f2e04eb615f01ed53bfe308 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 2 Oct 2022 01:18:42 +0100 Subject: [PATCH 023/460] add script body only refresh --- modules/scripts.py | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/modules/scripts.py b/modules/scripts.py index 3c14b9e32..788397f53 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -162,10 +162,33 @@ class ScriptRunner: return processed + def reload_sources(self): + for si,script in list(enumerate(self.scripts)): + with open(script.filename, "r", encoding="utf8") as file: + args_from = script.args_from + args_to = script.args_to + filename = script.filename + text = file.read() + + from types import ModuleType + compiled = compile(text, filename, 'exec') + module = ModuleType(script.filename) + exec(compiled, module.__dict__) + + for key, script_class in module.__dict__.items(): + if type(script_class) == type and issubclass(script_class, Script): + self.scripts[si] = script_class() + self.scripts[si].filename = filename + self.scripts[si].args_from = args_from + self.scripts[si].args_to = args_to scripts_txt2img = ScriptRunner() scripts_img2img = ScriptRunner() +def reload_script_body_only(): + scripts_txt2img.reload_sources() + scripts_img2img.reload_sources() + def reload_scripts(basedir): global scripts_txt2img,scripts_img2img From 07e40ad7f23472fc1c781fe1cc6c1ee403413918 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 2 Oct 2022 01:19:55 +0100 Subject: [PATCH 024/460] add custom script body only refresh option --- modules/ui.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/modules/ui.py b/modules/ui.py index cb859ac45..eb7c05852 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1012,6 +1012,17 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): outputs=[], _js='function(){restart_reload()}' ) + + def reload_scripts(): + modules.scripts.reload_script_body_only() + + reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='primary') + reload_script_bodies.click( + fn=reload_scripts, + inputs=[], + outputs=[], + _js='function(){}' + ) if column is not None: column.__exit__() From 2deea867814272f1f089b60e9ba8d587c16b2fb1 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 2 Oct 2022 01:36:30 +0100 Subject: [PATCH 025/460] Put reload buttons in row and add secondary style --- modules/ui.py | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index eb7c05852..963a2c611 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1002,27 +1002,30 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): _js='function(){}' ) - def request_restart(): - settings_interface.gradio_ref.do_restart = True + with gr.Row(): + reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary') + restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary') - restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary') - restart_gradio.click( - fn=request_restart, - inputs=[], - outputs=[], - _js='function(){restart_reload()}' - ) def reload_scripts(): modules.scripts.reload_script_body_only() - reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='primary') reload_script_bodies.click( fn=reload_scripts, inputs=[], outputs=[], _js='function(){}' ) + + def request_restart(): + settings_interface.gradio_ref.do_restart = True + + restart_gradio.click( + fn=request_restart, + inputs=[], + outputs=[], + _js='function(){restart_reload()}' + ) if column is not None: column.__exit__() From 3cf1a96006daffedb8ecd0ae142eca4c4da06105 Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Sat, 1 Oct 2022 21:11:03 -0700 Subject: [PATCH 026/460] added safety for blank directory naming patterns --- modules/images.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/images.py b/modules/images.py index f1aed5d6b..e7894b4cd 100644 --- a/modules/images.py +++ b/modules/images.py @@ -311,7 +311,7 @@ def apply_filename_pattern(x, p, seed, prompt): x = x.replace("[cfg]", str(p.cfg_scale)) x = x.replace("[width]", str(p.width)) x = x.replace("[height]", str(p.height)) - x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"]), replace_spaces=False)) + x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"]) or "No styles", replace_spaces=False)) x = x.replace("[sampler]", sanitize_filename_part(sd_samplers.samplers[p.sampler_index].name, replace_spaces=False)) x = x.replace("[model_hash]", shared.sd_model.sd_model_hash) @@ -374,7 +374,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt) if save_to_dirs: - dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt) + dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt).strip('\\ ') path = os.path.join(path, dirname) os.makedirs(path, exist_ok=True) From 70f526704721a303ae045f6406439dcceee4302e Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Sat, 1 Oct 2022 21:18:15 -0700 Subject: [PATCH 027/460] use os.path.normpath for better safety checking --- modules/images.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/images.py b/modules/images.py index e7894b4cd..5ef7eb926 100644 --- a/modules/images.py +++ b/modules/images.py @@ -374,8 +374,8 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt) if save_to_dirs: - dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt).strip('\\ ') - path = os.path.join(path, dirname) + dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt) + path = os.path.normpath(os.path.join(path, dirname)) os.makedirs(path, exist_ok=True) From 32edf1732f27a1fad5133667c22b948adda1b070 Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Sat, 1 Oct 2022 21:37:14 -0700 Subject: [PATCH 028/460] os.path.normpath wasn't working, reverting to manual strip --- modules/images.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/images.py b/modules/images.py index 5ef7eb926..4998e92cf 100644 --- a/modules/images.py +++ b/modules/images.py @@ -374,8 +374,8 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt) if save_to_dirs: - dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt) - path = os.path.normpath(os.path.join(path, dirname)) + dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt).strip('\\ /') + path = os.path.join(path, dirname) os.makedirs(path, exist_ok=True) From 820f1dc96b1979d7e92170c161db281ee8bd988b Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 15:03:39 +0300 Subject: [PATCH 029/460] initial support for training textual inversion --- .gitignore | 1 + javascript/progressbar.js | 1 + javascript/textualInversion.js | 8 + modules/devices.py | 3 +- modules/processing.py | 13 +- modules/sd_hijack.py | 324 +++--------------- modules/sd_hijack_optimizations.py | 164 +++++++++ modules/sd_models.py | 4 +- modules/shared.py | 3 +- modules/textual_inversion/dataset.py | 76 ++++ .../textual_inversion/textual_inversion.py | 258 ++++++++++++++ modules/textual_inversion/ui.py | 32 ++ modules/ui.py | 139 +++++++- style.css | 10 +- textual_inversion_templates/style.txt | 19 + .../style_filewords.txt | 19 + textual_inversion_templates/subject.txt | 27 ++ .../subject_filewords.txt | 27 ++ webui.py | 15 +- 19 files changed, 828 insertions(+), 315 deletions(-) create mode 100644 javascript/textualInversion.js create mode 100644 modules/sd_hijack_optimizations.py create mode 100644 modules/textual_inversion/dataset.py create mode 100644 modules/textual_inversion/textual_inversion.py create mode 100644 modules/textual_inversion/ui.py create mode 100644 textual_inversion_templates/style.txt create mode 100644 textual_inversion_templates/style_filewords.txt create mode 100644 textual_inversion_templates/subject.txt create mode 100644 textual_inversion_templates/subject_filewords.txt diff --git a/.gitignore b/.gitignore index 3532dab37..7afc93953 100644 --- a/.gitignore +++ b/.gitignore @@ -25,3 +25,4 @@ __pycache__ /.idea notification.mp3 /SwinIR +/textual_inversion diff --git a/javascript/progressbar.js b/javascript/progressbar.js index 21f25b38d..1e297abbe 100644 --- a/javascript/progressbar.js +++ b/javascript/progressbar.js @@ -30,6 +30,7 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_inte onUiUpdate(function(){ check_progressbar('txt2img', 'txt2img_progressbar', 'txt2img_progress_span', 'txt2img_interrupt', 'txt2img_preview', 'txt2img_gallery') check_progressbar('img2img', 'img2img_progressbar', 'img2img_progress_span', 'img2img_interrupt', 'img2img_preview', 'img2img_gallery') + check_progressbar('ti', 'ti_progressbar', 'ti_progress_span', 'ti_interrupt', 'ti_preview', 'ti_gallery') }) function requestMoreProgress(id_part, id_progressbar_span, id_interrupt){ diff --git a/javascript/textualInversion.js b/javascript/textualInversion.js new file mode 100644 index 000000000..8061be089 --- /dev/null +++ b/javascript/textualInversion.js @@ -0,0 +1,8 @@ + + +function start_training_textual_inversion(){ + requestProgress('ti') + gradioApp().querySelector('#ti_error').innerHTML='' + + return args_to_array(arguments) +} diff --git a/modules/devices.py b/modules/devices.py index 07bb23397..ff82f2f64 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -32,10 +32,9 @@ def enable_tf32(): errors.run(enable_tf32, "Enabling TF32") - device = get_optimal_device() device_codeformer = cpu if has_mps else device - +dtype = torch.float16 def randn(seed, shape): # Pytorch currently doesn't handle setting randomness correctly when the metal backend is used. diff --git a/modules/processing.py b/modules/processing.py index 7eeb5191c..8223423ab 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -56,7 +56,7 @@ class StableDiffusionProcessing: self.prompt: str = prompt self.prompt_for_display: str = None self.negative_prompt: str = (negative_prompt or "") - self.styles: str = styles + self.styles: list = styles or [] self.seed: int = seed self.subseed: int = subseed self.subseed_strength: float = subseed_strength @@ -271,7 +271,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength), "Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), "Denoising strength": getattr(p, 'denoising_strength', None), - "Eta": (None if p.sampler.eta == p.sampler.default_eta else p.sampler.eta), + "Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta), } generation_params.update(p.extra_generation_params) @@ -295,8 +295,11 @@ def process_images(p: StableDiffusionProcessing) -> Processed: fix_seed(p) - os.makedirs(p.outpath_samples, exist_ok=True) - os.makedirs(p.outpath_grids, exist_ok=True) + if p.outpath_samples is not None: + os.makedirs(p.outpath_samples, exist_ok=True) + + if p.outpath_grids is not None: + os.makedirs(p.outpath_grids, exist_ok=True) modules.sd_hijack.model_hijack.apply_circular(p.tiling) @@ -323,7 +326,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch) if os.path.exists(cmd_opts.embeddings_dir): - model_hijack.load_textual_inversion_embeddings(cmd_opts.embeddings_dir, p.sd_model) + model_hijack.embedding_db.load_textual_inversion_embeddings() infotexts = [] output_images = [] diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index fa7eaeb89..fd57e5c54 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -6,244 +6,41 @@ import torch import numpy as np from torch import einsum -from modules import prompt_parser +import modules.textual_inversion.textual_inversion +from modules import prompt_parser, devices, sd_hijack_optimizations, shared from modules.shared import opts, device, cmd_opts -from ldm.util import default -from einops import rearrange import ldm.modules.attention import ldm.modules.diffusionmodules.model - -# see https://github.com/basujindal/stable-diffusion/pull/117 for discussion -def split_cross_attention_forward_v1(self, x, context=None, mask=None): - h = self.heads - - q = self.to_q(x) - context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) - del context, x - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - - r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device) - for i in range(0, q.shape[0], 2): - end = i + 2 - s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end]) - s1 *= self.scale - - s2 = s1.softmax(dim=-1) - del s1 - - r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end]) - del s2 - - r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) - del r1 - - return self.to_out(r2) +attention_CrossAttention_forward = ldm.modules.attention.CrossAttention.forward +diffusionmodules_model_nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity +diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.AttnBlock.forward -# taken from https://github.com/Doggettx/stable-diffusion -def split_cross_attention_forward(self, x, context=None, mask=None): - h = self.heads +def apply_optimizations(): + if cmd_opts.opt_split_attention_v1: + ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 + elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()): + ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward + ldm.modules.diffusionmodules.model.nonlinearity = sd_hijack_optimizations.nonlinearity_hijack + ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward - q_in = self.to_q(x) - context = default(context, x) - k_in = self.to_k(context) * self.scale - v_in = self.to_v(context) - del context, x - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) - del q_in, k_in, v_in +def undo_optimizations(): + ldm.modules.attention.CrossAttention.forward = attention_CrossAttention_forward + ldm.modules.diffusionmodules.model.nonlinearity = diffusionmodules_model_nonlinearity + ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward - r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) - - stats = torch.cuda.memory_stats(q.device) - mem_active = stats['active_bytes.all.current'] - mem_reserved = stats['reserved_bytes.all.current'] - mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device()) - mem_free_torch = mem_reserved - mem_active - mem_free_total = mem_free_cuda + mem_free_torch - - gb = 1024 ** 3 - tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() - modifier = 3 if q.element_size() == 2 else 2.5 - mem_required = tensor_size * modifier - steps = 1 - - if mem_required > mem_free_total: - steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2))) - # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB " - # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}") - - if steps > 64: - max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64 - raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). ' - f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free') - - slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] - for i in range(0, q.shape[1], slice_size): - end = i + slice_size - s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) - - s2 = s1.softmax(dim=-1, dtype=q.dtype) - del s1 - - r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) - del s2 - - del q, k, v - - r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) - del r1 - - return self.to_out(r2) - -def nonlinearity_hijack(x): - # swish - t = torch.sigmoid(x) - x *= t - del t - - return x - -def cross_attention_attnblock_forward(self, x): - h_ = x - h_ = self.norm(h_) - q1 = self.q(h_) - k1 = self.k(h_) - v = self.v(h_) - - # compute attention - b, c, h, w = q1.shape - - q2 = q1.reshape(b, c, h*w) - del q1 - - q = q2.permute(0, 2, 1) # b,hw,c - del q2 - - k = k1.reshape(b, c, h*w) # b,c,hw - del k1 - - h_ = torch.zeros_like(k, device=q.device) - - stats = torch.cuda.memory_stats(q.device) - mem_active = stats['active_bytes.all.current'] - mem_reserved = stats['reserved_bytes.all.current'] - mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device()) - mem_free_torch = mem_reserved - mem_active - mem_free_total = mem_free_cuda + mem_free_torch - - tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * q.element_size() - mem_required = tensor_size * 2.5 - steps = 1 - - if mem_required > mem_free_total: - steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2))) - - slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] - for i in range(0, q.shape[1], slice_size): - end = i + slice_size - - w1 = torch.bmm(q[:, i:end], k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] - w2 = w1 * (int(c)**(-0.5)) - del w1 - w3 = torch.nn.functional.softmax(w2, dim=2, dtype=q.dtype) - del w2 - - # attend to values - v1 = v.reshape(b, c, h*w) - w4 = w3.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q) - del w3 - - h_[:, :, i:end] = torch.bmm(v1, w4) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] - del v1, w4 - - h2 = h_.reshape(b, c, h, w) - del h_ - - h3 = self.proj_out(h2) - del h2 - - h3 += x - - return h3 class StableDiffusionModelHijack: - ids_lookup = {} - word_embeddings = {} - word_embeddings_checksums = {} fixes = None comments = [] - dir_mtime = None layers = None circular_enabled = False clip = None - def load_textual_inversion_embeddings(self, dirname, model): - mt = os.path.getmtime(dirname) - if self.dir_mtime is not None and mt <= self.dir_mtime: - return - - self.dir_mtime = mt - self.ids_lookup.clear() - self.word_embeddings.clear() - - tokenizer = model.cond_stage_model.tokenizer - - def const_hash(a): - r = 0 - for v in a: - r = (r * 281 ^ int(v) * 997) & 0xFFFFFFFF - return r - - def process_file(path, filename): - name = os.path.splitext(filename)[0] - - data = torch.load(path, map_location="cpu") - - # textual inversion embeddings - if 'string_to_param' in data: - param_dict = data['string_to_param'] - if hasattr(param_dict, '_parameters'): - param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11 - assert len(param_dict) == 1, 'embedding file has multiple terms in it' - emb = next(iter(param_dict.items()))[1] - # diffuser concepts - elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: - assert len(data.keys()) == 1, 'embedding file has multiple terms in it' - - emb = next(iter(data.values())) - if len(emb.shape) == 1: - emb = emb.unsqueeze(0) - - self.word_embeddings[name] = emb.detach().to(device) - self.word_embeddings_checksums[name] = f'{const_hash(emb.reshape(-1)*100)&0xffff:04x}' - - ids = tokenizer([name], add_special_tokens=False)['input_ids'][0] - - first_id = ids[0] - if first_id not in self.ids_lookup: - self.ids_lookup[first_id] = [] - self.ids_lookup[first_id].append((ids, name)) - - for fn in os.listdir(dirname): - try: - fullfn = os.path.join(dirname, fn) - - if os.stat(fullfn).st_size == 0: - continue - - process_file(fullfn, fn) - except Exception: - print(f"Error loading emedding {fn}:", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - continue - - print(f"Loaded a total of {len(self.word_embeddings)} textual inversion embeddings.") + embedding_db = modules.textual_inversion.textual_inversion.EmbeddingDatabase(cmd_opts.embeddings_dir) def hijack(self, m): model_embeddings = m.cond_stage_model.transformer.text_model.embeddings @@ -253,12 +50,7 @@ class StableDiffusionModelHijack: self.clip = m.cond_stage_model - if cmd_opts.opt_split_attention_v1: - ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward_v1 - elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()): - ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward - ldm.modules.diffusionmodules.model.nonlinearity = nonlinearity_hijack - ldm.modules.diffusionmodules.model.AttnBlock.forward = cross_attention_attnblock_forward + apply_optimizations() def flatten(el): flattened = [flatten(children) for children in el.children()] @@ -296,7 +88,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): def __init__(self, wrapped, hijack): super().__init__() self.wrapped = wrapped - self.hijack = hijack + self.hijack: StableDiffusionModelHijack = hijack self.tokenizer = wrapped.tokenizer self.max_length = wrapped.max_length self.token_mults = {} @@ -317,7 +109,6 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): if mult != 1.0: self.token_mults[ident] = mult - def tokenize_line(self, line, used_custom_terms, hijack_comments): id_start = self.wrapped.tokenizer.bos_token_id id_end = self.wrapped.tokenizer.eos_token_id @@ -339,28 +130,19 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): while i < len(tokens): token = tokens[i] - possible_matches = self.hijack.ids_lookup.get(token, None) + embedding = self.hijack.embedding_db.find_embedding_at_position(tokens, i) - if possible_matches is None: + if embedding is None: remade_tokens.append(token) multipliers.append(weight) + i += 1 else: - found = False - for ids, word in possible_matches: - if tokens[i:i + len(ids)] == ids: - emb_len = int(self.hijack.word_embeddings[word].shape[0]) - fixes.append((len(remade_tokens), word)) - remade_tokens += [0] * emb_len - multipliers += [weight] * emb_len - i += len(ids) - 1 - found = True - used_custom_terms.append((word, self.hijack.word_embeddings_checksums[word])) - break - - if not found: - remade_tokens.append(token) - multipliers.append(weight) - i += 1 + emb_len = int(embedding.vec.shape[0]) + fixes.append((len(remade_tokens), embedding)) + remade_tokens += [0] * emb_len + multipliers += [weight] * emb_len + used_custom_terms.append((embedding.name, embedding.checksum())) + i += emb_len if len(remade_tokens) > maxlen - 2: vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()} @@ -431,32 +213,23 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): while i < len(tokens): token = tokens[i] - possible_matches = self.hijack.ids_lookup.get(token, None) + embedding = self.hijack.embedding_db.find_embedding_at_position(tokens, i) mult_change = self.token_mults.get(token) if opts.enable_emphasis else None if mult_change is not None: mult *= mult_change - elif possible_matches is None: + i += 1 + elif embedding is None: remade_tokens.append(token) multipliers.append(mult) + i += 1 else: - found = False - for ids, word in possible_matches: - if tokens[i:i+len(ids)] == ids: - emb_len = int(self.hijack.word_embeddings[word].shape[0]) - fixes.append((len(remade_tokens), word)) - remade_tokens += [0] * emb_len - multipliers += [mult] * emb_len - i += len(ids) - 1 - found = True - used_custom_terms.append((word, self.hijack.word_embeddings_checksums[word])) - break - - if not found: - remade_tokens.append(token) - multipliers.append(mult) - - i += 1 + emb_len = int(embedding.vec.shape[0]) + fixes.append((len(remade_tokens), embedding)) + remade_tokens += [0] * emb_len + multipliers += [mult] * emb_len + used_custom_terms.append((embedding.name, embedding.checksum())) + i += emb_len if len(remade_tokens) > maxlen - 2: vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()} @@ -464,6 +237,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): overflowing_words = [vocab.get(int(x), "") for x in ovf] overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words)) hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n") + token_count = len(remade_tokens) remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens)) remade_tokens = [id_start] + remade_tokens[0:maxlen-2] + [id_end] @@ -484,7 +258,6 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): else: batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text) - self.hijack.fixes = hijack_fixes self.hijack.comments = hijack_comments @@ -517,14 +290,19 @@ class EmbeddingsWithFixes(torch.nn.Module): inputs_embeds = self.wrapped(input_ids) - if batch_fixes is not None: - for fixes, tensor in zip(batch_fixes, inputs_embeds): - for offset, word in fixes: - emb = self.embeddings.word_embeddings[word] - emb_len = min(tensor.shape[0]-offset-1, emb.shape[0]) - tensor[offset+1:offset+1+emb_len] = self.embeddings.word_embeddings[word][0:emb_len] + if batch_fixes is None or len(batch_fixes) == 0 or max([len(x) for x in batch_fixes]) == 0: + return inputs_embeds - return inputs_embeds + vecs = [] + for fixes, tensor in zip(batch_fixes, inputs_embeds): + for offset, embedding in fixes: + emb = embedding.vec + emb_len = min(tensor.shape[0]-offset-1, emb.shape[0]) + tensor = torch.cat([tensor[0:offset+1], emb[0:emb_len], tensor[offset+1+emb_len:]]) + + vecs.append(tensor) + + return torch.stack(vecs) def add_circular_option_to_conv_2d(): diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py new file mode 100644 index 000000000..9c079e578 --- /dev/null +++ b/modules/sd_hijack_optimizations.py @@ -0,0 +1,164 @@ +import math +import torch +from torch import einsum + +from ldm.util import default +from einops import rearrange + + +# see https://github.com/basujindal/stable-diffusion/pull/117 for discussion +def split_cross_attention_forward_v1(self, x, context=None, mask=None): + h = self.heads + + q = self.to_q(x) + context = default(context, x) + k = self.to_k(context) + v = self.to_v(context) + del context, x + + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + + r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device) + for i in range(0, q.shape[0], 2): + end = i + 2 + s1 = einsum('b i d, b j d -> b i j', q[i:end], k[i:end]) + s1 *= self.scale + + s2 = s1.softmax(dim=-1) + del s1 + + r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end]) + del s2 + + r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) + del r1 + + return self.to_out(r2) + + +# taken from https://github.com/Doggettx/stable-diffusion +def split_cross_attention_forward(self, x, context=None, mask=None): + h = self.heads + + q_in = self.to_q(x) + context = default(context, x) + k_in = self.to_k(context) * self.scale + v_in = self.to_v(context) + del context, x + + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) + del q_in, k_in, v_in + + r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device, dtype=q.dtype) + + stats = torch.cuda.memory_stats(q.device) + mem_active = stats['active_bytes.all.current'] + mem_reserved = stats['reserved_bytes.all.current'] + mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device()) + mem_free_torch = mem_reserved - mem_active + mem_free_total = mem_free_cuda + mem_free_torch + + gb = 1024 ** 3 + tensor_size = q.shape[0] * q.shape[1] * k.shape[1] * q.element_size() + modifier = 3 if q.element_size() == 2 else 2.5 + mem_required = tensor_size * modifier + steps = 1 + + if mem_required > mem_free_total: + steps = 2 ** (math.ceil(math.log(mem_required / mem_free_total, 2))) + # print(f"Expected tensor size:{tensor_size/gb:0.1f}GB, cuda free:{mem_free_cuda/gb:0.1f}GB " + # f"torch free:{mem_free_torch/gb:0.1f} total:{mem_free_total/gb:0.1f} steps:{steps}") + + if steps > 64: + max_res = math.floor(math.sqrt(math.sqrt(mem_free_total / 2.5)) / 8) * 64 + raise RuntimeError(f'Not enough memory, use lower resolution (max approx. {max_res}x{max_res}). ' + f'Need: {mem_required / 64 / gb:0.1f}GB free, Have:{mem_free_total / gb:0.1f}GB free') + + slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] + for i in range(0, q.shape[1], slice_size): + end = i + slice_size + s1 = einsum('b i d, b j d -> b i j', q[:, i:end], k) + + s2 = s1.softmax(dim=-1, dtype=q.dtype) + del s1 + + r1[:, i:end] = einsum('b i j, b j d -> b i d', s2, v) + del s2 + + del q, k, v + + r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) + del r1 + + return self.to_out(r2) + +def nonlinearity_hijack(x): + # swish + t = torch.sigmoid(x) + x *= t + del t + + return x + +def cross_attention_attnblock_forward(self, x): + h_ = x + h_ = self.norm(h_) + q1 = self.q(h_) + k1 = self.k(h_) + v = self.v(h_) + + # compute attention + b, c, h, w = q1.shape + + q2 = q1.reshape(b, c, h*w) + del q1 + + q = q2.permute(0, 2, 1) # b,hw,c + del q2 + + k = k1.reshape(b, c, h*w) # b,c,hw + del k1 + + h_ = torch.zeros_like(k, device=q.device) + + stats = torch.cuda.memory_stats(q.device) + mem_active = stats['active_bytes.all.current'] + mem_reserved = stats['reserved_bytes.all.current'] + mem_free_cuda, _ = torch.cuda.mem_get_info(torch.cuda.current_device()) + mem_free_torch = mem_reserved - mem_active + mem_free_total = mem_free_cuda + mem_free_torch + + tensor_size = q.shape[0] * q.shape[1] * k.shape[2] * q.element_size() + mem_required = tensor_size * 2.5 + steps = 1 + + if mem_required > mem_free_total: + steps = 2**(math.ceil(math.log(mem_required / mem_free_total, 2))) + + slice_size = q.shape[1] // steps if (q.shape[1] % steps) == 0 else q.shape[1] + for i in range(0, q.shape[1], slice_size): + end = i + slice_size + + w1 = torch.bmm(q[:, i:end], k) # b,hw,hw w[b,i,j]=sum_c q[b,i,c]k[b,c,j] + w2 = w1 * (int(c)**(-0.5)) + del w1 + w3 = torch.nn.functional.softmax(w2, dim=2, dtype=q.dtype) + del w2 + + # attend to values + v1 = v.reshape(b, c, h*w) + w4 = w3.permute(0, 2, 1) # b,hw,hw (first hw of k, second of q) + del w3 + + h_[:, :, i:end] = torch.bmm(v1, w4) # b, c,hw (hw of q) h_[b,c,j] = sum_i v[b,c,i] w_[b,i,j] + del v1, w4 + + h2 = h_.reshape(b, c, h, w) + del h_ + + h3 = self.proj_out(h2) + del h2 + + h3 += x + + return h3 diff --git a/modules/sd_models.py b/modules/sd_models.py index 2539f14cd..5b3dbdc79 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -8,7 +8,7 @@ from omegaconf import OmegaConf from ldm.util import instantiate_from_config -from modules import shared, modelloader +from modules import shared, modelloader, devices from modules.paths import models_path model_dir = "Stable-diffusion" @@ -134,6 +134,8 @@ def load_model_weights(model, checkpoint_file, sd_model_hash): if not shared.cmd_opts.no_half: model.half() + devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16 + model.sd_model_hash = sd_model_hash model.sd_model_checkpint = checkpoint_file diff --git a/modules/shared.py b/modules/shared.py index ac968b2d2..ac0bc480c 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -78,6 +78,7 @@ class State: current_latent = None current_image = None current_image_sampling_step = 0 + textinfo = None def interrupt(self): self.interrupted = True @@ -88,7 +89,7 @@ class State: self.current_image_sampling_step = 0 def get_job_timestamp(self): - return datetime.datetime.now().strftime("%Y%m%d%H%M%S") + return datetime.datetime.now().strftime("%Y%m%d%H%M%S") # shouldn't this return job_timestamp? state = State() diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py new file mode 100644 index 000000000..7e134a08f --- /dev/null +++ b/modules/textual_inversion/dataset.py @@ -0,0 +1,76 @@ +import os +import numpy as np +import PIL +import torch +from PIL import Image +from torch.utils.data import Dataset +from torchvision import transforms + +import random +import tqdm + + +class PersonalizedBase(Dataset): + def __init__(self, data_root, size=None, repeats=100, flip_p=0.5, placeholder_token="*", width=512, height=512, model=None, device=None, template_file=None): + + self.placeholder_token = placeholder_token + + self.size = size + self.width = width + self.height = height + self.flip = transforms.RandomHorizontalFlip(p=flip_p) + + self.dataset = [] + + with open(template_file, "r") as file: + lines = [x.strip() for x in file.readlines()] + + self.lines = lines + + assert data_root, 'dataset directory not specified' + + self.image_paths = [os.path.join(data_root, file_path) for file_path in os.listdir(data_root)] + print("Preparing dataset...") + for path in tqdm.tqdm(self.image_paths): + image = Image.open(path) + image = image.convert('RGB') + image = image.resize((self.width, self.height), PIL.Image.BICUBIC) + + filename = os.path.basename(path) + filename_tokens = os.path.splitext(filename)[0].replace('_', '-').replace(' ', '-').split('-') + filename_tokens = [token for token in filename_tokens if token.isalpha()] + + npimage = np.array(image).astype(np.uint8) + npimage = (npimage / 127.5 - 1.0).astype(np.float32) + + torchdata = torch.from_numpy(npimage).to(device=device, dtype=torch.float32) + torchdata = torch.moveaxis(torchdata, 2, 0) + + init_latent = model.get_first_stage_encoding(model.encode_first_stage(torchdata.unsqueeze(dim=0))).squeeze() + + self.dataset.append((init_latent, filename_tokens)) + + self.length = len(self.dataset) * repeats + + self.initial_indexes = np.arange(self.length) % len(self.dataset) + self.indexes = None + self.shuffle() + + def shuffle(self): + self.indexes = self.initial_indexes[torch.randperm(self.initial_indexes.shape[0])] + + def __len__(self): + return self.length + + def __getitem__(self, i): + if i % len(self.dataset) == 0: + self.shuffle() + + index = self.indexes[i % len(self.indexes)] + x, filename_tokens = self.dataset[index] + + text = random.choice(self.lines) + text = text.replace("[name]", self.placeholder_token) + text = text.replace("[filewords]", ' '.join(filename_tokens)) + + return x, text diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py new file mode 100644 index 000000000..c0baaace2 --- /dev/null +++ b/modules/textual_inversion/textual_inversion.py @@ -0,0 +1,258 @@ +import os +import sys +import traceback + +import torch +import tqdm +import html +import datetime + +from modules import shared, devices, sd_hijack, processing +import modules.textual_inversion.dataset + + +class Embedding: + def __init__(self, vec, name, step=None): + self.vec = vec + self.name = name + self.step = step + self.cached_checksum = None + + def save(self, filename): + embedding_data = { + "string_to_token": {"*": 265}, + "string_to_param": {"*": self.vec}, + "name": self.name, + "step": self.step, + } + + torch.save(embedding_data, filename) + + def checksum(self): + if self.cached_checksum is not None: + return self.cached_checksum + + def const_hash(a): + r = 0 + for v in a: + r = (r * 281 ^ int(v) * 997) & 0xFFFFFFFF + return r + + self.cached_checksum = f'{const_hash(self.vec.reshape(-1) * 100) & 0xffff:04x}' + return self.cached_checksum + +class EmbeddingDatabase: + def __init__(self, embeddings_dir): + self.ids_lookup = {} + self.word_embeddings = {} + self.dir_mtime = None + self.embeddings_dir = embeddings_dir + + def register_embedding(self, embedding, model): + + self.word_embeddings[embedding.name] = embedding + + ids = model.cond_stage_model.tokenizer([embedding.name], add_special_tokens=False)['input_ids'][0] + + first_id = ids[0] + if first_id not in self.ids_lookup: + self.ids_lookup[first_id] = [] + self.ids_lookup[first_id].append((ids, embedding)) + + return embedding + + def load_textual_inversion_embeddings(self): + mt = os.path.getmtime(self.embeddings_dir) + if self.dir_mtime is not None and mt <= self.dir_mtime: + return + + self.dir_mtime = mt + self.ids_lookup.clear() + self.word_embeddings.clear() + + def process_file(path, filename): + name = os.path.splitext(filename)[0] + + data = torch.load(path, map_location="cpu") + + # textual inversion embeddings + if 'string_to_param' in data: + param_dict = data['string_to_param'] + if hasattr(param_dict, '_parameters'): + param_dict = getattr(param_dict, '_parameters') # fix for torch 1.12.1 loading saved file from torch 1.11 + assert len(param_dict) == 1, 'embedding file has multiple terms in it' + emb = next(iter(param_dict.items()))[1] + # diffuser concepts + elif type(data) == dict and type(next(iter(data.values()))) == torch.Tensor: + assert len(data.keys()) == 1, 'embedding file has multiple terms in it' + + emb = next(iter(data.values())) + if len(emb.shape) == 1: + emb = emb.unsqueeze(0) + else: + raise Exception(f"Couldn't identify {filename} as neither textual inversion embedding nor diffuser concept.") + + vec = emb.detach().to(devices.device, dtype=torch.float32) + embedding = Embedding(vec, name) + embedding.step = data.get('step', None) + self.register_embedding(embedding, shared.sd_model) + + for fn in os.listdir(self.embeddings_dir): + try: + fullfn = os.path.join(self.embeddings_dir, fn) + + if os.stat(fullfn).st_size == 0: + continue + + process_file(fullfn, fn) + except Exception: + print(f"Error loading emedding {fn}:", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + continue + + print(f"Loaded a total of {len(self.word_embeddings)} textual inversion embeddings.") + + def find_embedding_at_position(self, tokens, offset): + token = tokens[offset] + possible_matches = self.ids_lookup.get(token, None) + + if possible_matches is None: + return None + + for ids, embedding in possible_matches: + if tokens[offset:offset + len(ids)] == ids: + return embedding + + return None + + + +def create_embedding(name, num_vectors_per_token): + init_text = '*' + + cond_model = shared.sd_model.cond_stage_model + embedding_layer = cond_model.wrapped.transformer.text_model.embeddings + + ids = cond_model.tokenizer(init_text, max_length=num_vectors_per_token, return_tensors="pt", add_special_tokens=False)["input_ids"] + embedded = embedding_layer(ids.to(devices.device)).squeeze(0) + vec = torch.zeros((num_vectors_per_token, embedded.shape[1]), device=devices.device) + + for i in range(num_vectors_per_token): + vec[i] = embedded[i * int(embedded.shape[0]) // num_vectors_per_token] + + fn = os.path.join(shared.cmd_opts.embeddings_dir, f"{name}.pt") + assert not os.path.exists(fn), f"file {fn} already exists" + + embedding = Embedding(vec, name) + embedding.step = 0 + embedding.save(fn) + + return fn + + +def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps, create_image_every, save_embedding_every, template_file): + assert embedding_name, 'embedding not selected' + + shared.state.textinfo = "Initializing textual inversion training..." + shared.state.job_count = steps + + filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') + + log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%d-%m"), embedding_name) + + if save_embedding_every > 0: + embedding_dir = os.path.join(log_directory, "embeddings") + os.makedirs(embedding_dir, exist_ok=True) + else: + embedding_dir = None + + if create_image_every > 0: + images_dir = os.path.join(log_directory, "images") + os.makedirs(images_dir, exist_ok=True) + else: + images_dir = None + + cond_model = shared.sd_model.cond_stage_model + + shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." + with torch.autocast("cuda"): + ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, size=512, placeholder_token=embedding_name, model=shared.sd_model, device=devices.device, template_file=template_file) + + hijack = sd_hijack.model_hijack + + embedding = hijack.embedding_db.word_embeddings[embedding_name] + embedding.vec.requires_grad = True + + optimizer = torch.optim.AdamW([embedding.vec], lr=learn_rate) + + losses = torch.zeros((32,)) + + last_saved_file = "" + last_saved_image = "" + + ititial_step = embedding.step or 0 + if ititial_step > steps: + return embedding, filename + + pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step) + for i, (x, text) in pbar: + embedding.step = i + ititial_step + + if embedding.step > steps: + break + + if shared.state.interrupted: + break + + with torch.autocast("cuda"): + c = cond_model([text]) + loss = shared.sd_model(x.unsqueeze(0), c)[0] + + losses[embedding.step % losses.shape[0]] = loss.item() + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + pbar.set_description(f"loss: {losses.mean():.7f}") + + if embedding.step > 0 and embedding_dir is not None and embedding.step % save_embedding_every == 0: + last_saved_file = os.path.join(embedding_dir, f'{embedding_name}-{embedding.step}.pt') + embedding.save(last_saved_file) + + if embedding.step > 0 and images_dir is not None and embedding.step % create_image_every == 0: + last_saved_image = os.path.join(images_dir, f'{embedding_name}-{embedding.step}.png') + + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + prompt=text, + steps=20, + do_not_save_grid=True, + do_not_save_samples=True, + ) + + processed = processing.process_images(p) + image = processed.images[0] + + shared.state.current_image = image + image.save(last_saved_image) + + last_saved_image += f", prompt: {text}" + + shared.state.job_no = embedding.step + + shared.state.textinfo = f""" +

+Loss: {losses.mean():.7f}
+Step: {embedding.step}
+Last prompt: {html.escape(text)}
+Last saved embedding: {html.escape(last_saved_file)}
+Last saved image: {html.escape(last_saved_image)}
+

+""" + + embedding.cached_checksum = None + embedding.save(filename) + + return embedding, filename + diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py new file mode 100644 index 000000000..ce3677a98 --- /dev/null +++ b/modules/textual_inversion/ui.py @@ -0,0 +1,32 @@ +import html + +import gradio as gr + +import modules.textual_inversion.textual_inversion as ti +from modules import sd_hijack, shared + + +def create_embedding(name, nvpt): + filename = ti.create_embedding(name, nvpt) + + sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() + + return gr.Dropdown.update(choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())), f"Created: {filename}", "" + + +def train_embedding(*args): + + try: + sd_hijack.undo_optimizations() + + embedding, filename = ti.train_embedding(*args) + + res = f""" +Training {'interrupted' if shared.state.interrupted else 'finished'} after {embedding.step} steps. +Embedding saved to {html.escape(filename)} +""" + return res, "" + except Exception: + raise + finally: + sd_hijack.apply_optimizations() diff --git a/modules/ui.py b/modules/ui.py index 15572bb0a..57aef6ff1 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -21,6 +21,7 @@ import gradio as gr import gradio.utils import gradio.routes +from modules import sd_hijack from modules.paths import script_path from modules.shared import opts, cmd_opts import modules.shared as shared @@ -32,6 +33,7 @@ import modules.gfpgan_model import modules.codeformer_model import modules.styles import modules.generation_parameters_copypaste +import modules.textual_inversion.ui # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI mimetypes.init() @@ -142,8 +144,8 @@ def save_files(js_data, images, index): return '', '', plaintext_to_html(f"Saved: {filenames[0]}") -def wrap_gradio_call(func): - def f(*args, **kwargs): +def wrap_gradio_call(func, extra_outputs=None): + def f(*args, extra_outputs_array=extra_outputs, **kwargs): run_memmon = opts.memmon_poll_rate > 0 and not shared.mem_mon.disabled if run_memmon: shared.mem_mon.monitor() @@ -159,7 +161,10 @@ def wrap_gradio_call(func): shared.state.job = "" shared.state.job_count = 0 - res = [None, '', f"
{plaintext_to_html(type(e).__name__+': '+str(e))}
"] + if extra_outputs_array is None: + extra_outputs_array = [None, ''] + + res = extra_outputs_array + [f"
{plaintext_to_html(type(e).__name__+': '+str(e))}
"] elapsed = time.perf_counter() - t @@ -179,6 +184,7 @@ def wrap_gradio_call(func): res[-1] += f"

Time taken: {elapsed:.2f}s

{vram_html}
" shared.state.interrupted = False + shared.state.job_count = 0 return tuple(res) @@ -187,7 +193,7 @@ def wrap_gradio_call(func): def check_progress_call(id_part): if shared.state.job_count == 0: - return "", gr_show(False), gr_show(False) + return "", gr_show(False), gr_show(False), gr_show(False) progress = 0 @@ -219,13 +225,19 @@ def check_progress_call(id_part): else: preview_visibility = gr_show(True) - return f"

{progressbar}

", preview_visibility, image + if shared.state.textinfo is not None: + textinfo_result = gr.HTML.update(value=shared.state.textinfo, visible=True) + else: + textinfo_result = gr_show(False) + + return f"

{progressbar}

", preview_visibility, image, textinfo_result def check_progress_call_initial(id_part): shared.state.job_count = -1 shared.state.current_latent = None shared.state.current_image = None + shared.state.textinfo = None return check_progress_call(id_part) @@ -399,13 +411,16 @@ def create_toprow(is_img2img): return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste -def setup_progressbar(progressbar, preview, id_part): +def setup_progressbar(progressbar, preview, id_part, textinfo=None): + if textinfo is None: + textinfo = gr.HTML(visible=False) + check_progress = gr.Button('Check progress', elem_id=f"{id_part}_check_progress", visible=False) check_progress.click( fn=lambda: check_progress_call(id_part), show_progress=False, inputs=[], - outputs=[progressbar, preview, preview], + outputs=[progressbar, preview, preview, textinfo], ) check_progress_initial = gr.Button('Check progress (first)', elem_id=f"{id_part}_check_progress_initial", visible=False) @@ -413,11 +428,14 @@ def setup_progressbar(progressbar, preview, id_part): fn=lambda: check_progress_call_initial(id_part), show_progress=False, inputs=[], - outputs=[progressbar, preview, preview], + outputs=[progressbar, preview, preview, textinfo], ) -def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): +def create_ui(wrap_gradio_gpu_call): + import modules.img2img + import modules.txt2img + with gr.Blocks(analytics_enabled=False) as txt2img_interface: txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) @@ -483,7 +501,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): connect_reuse_seed(subseed, reuse_subseed, generation_info, dummy_component, is_subseed=True) txt2img_args = dict( - fn=txt2img, + fn=wrap_gradio_gpu_call(modules.txt2img.txt2img), _js="submit", inputs=[ txt2img_prompt, @@ -675,7 +693,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): ) img2img_args = dict( - fn=img2img, + fn=wrap_gradio_gpu_call(modules.img2img.img2img), _js="submit_img2img", inputs=[ dummy_component, @@ -828,7 +846,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): open_extras_folder = gr.Button('Open output directory', elem_id=button_id) submit.click( - fn=run_extras, + fn=wrap_gradio_gpu_call(modules.extras.run_extras), _js="get_extras_tab_index", inputs=[ dummy_component, @@ -878,7 +896,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): pnginfo_send_to_img2img = gr.Button('Send to img2img') image.change( - fn=wrap_gradio_call(run_pnginfo), + fn=wrap_gradio_call(modules.extras.run_pnginfo), inputs=[image], outputs=[html, generation_info, html2], ) @@ -887,7 +905,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): with gr.Row().style(equal_height=False): with gr.Column(variant='panel'): gr.HTML(value="

A merger of the two checkpoints will be generated in your checkpoint directory.

") - + with gr.Row(): primary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_primary_model_name", label="Primary Model Name") secondary_model_name = gr.Dropdown(modules.sd_models.checkpoint_tiles(), elem_id="modelmerger_secondary_model_name", label="Secondary Model Name") @@ -896,10 +914,96 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method") save_as_half = gr.Checkbox(value=False, label="Safe as float16") modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary') - + with gr.Column(variant='panel'): submit_result = gr.Textbox(elem_id="modelmerger_result", show_label=False) + sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() + + with gr.Blocks() as textual_inversion_interface: + with gr.Row().style(equal_height=False): + with gr.Column(): + with gr.Group(): + gr.HTML(value="

Create a new embedding

") + + new_embedding_name = gr.Textbox(label="Name") + nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1) + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + create_embedding = gr.Button(value="Create", variant='primary') + + with gr.Group(): + gr.HTML(value="

Train an embedding; must specify a directory with a set of 512x512 images

") + train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) + learn_rate = gr.Number(label='Learning rate', value=5.0e-03) + dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images") + log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion") + template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt")) + steps = gr.Number(label='Max steps', value=100000, precision=0) + create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=1000, precision=0) + save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=1000, precision=0) + + with gr.Row(): + with gr.Column(scale=2): + gr.HTML(value="") + + with gr.Column(): + with gr.Row(): + interrupt_training = gr.Button(value="Interrupt") + train_embedding = gr.Button(value="Train", variant='primary') + + with gr.Column(): + progressbar = gr.HTML(elem_id="ti_progressbar") + ti_output = gr.Text(elem_id="ti_output", value="", show_label=False) + + ti_gallery = gr.Gallery(label='Output', show_label=False, elem_id='ti_gallery').style(grid=4) + ti_preview = gr.Image(elem_id='ti_preview', visible=False) + ti_progress = gr.HTML(elem_id="ti_progress", value="") + ti_outcome = gr.HTML(elem_id="ti_error", value="") + setup_progressbar(progressbar, ti_preview, 'ti', textinfo=ti_progress) + + create_embedding.click( + fn=modules.textual_inversion.ui.create_embedding, + inputs=[ + new_embedding_name, + nvpt, + ], + outputs=[ + train_embedding_name, + ti_output, + ti_outcome, + ] + ) + + train_embedding.click( + fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + train_embedding_name, + learn_rate, + dataset_directory, + log_directory, + steps, + create_image_every, + save_embedding_every, + template_file, + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + + interrupt_training.click( + fn=lambda: shared.state.interrupt(), + inputs=[], + outputs=[], + ) + def create_setting_component(key): def fun(): return opts.data[key] if key in opts.data else opts.data_labels[key].default @@ -1011,6 +1115,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): (extras_interface, "Extras", "extras"), (pnginfo_interface, "PNG Info", "pnginfo"), (modelmerger_interface, "Checkpoint Merger", "modelmerger"), + (textual_inversion_interface, "Textual inversion", "ti"), (settings_interface, "Settings", "settings"), ] @@ -1044,11 +1149,11 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): def modelmerger(*args): try: - results = run_modelmerger(*args) + results = modules.extras.run_modelmerger(*args) except Exception as e: print("Error loading/saving model file:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) - modules.sd_models.list_models() #To remove the potentially missing models from the list + modules.sd_models.list_models() # to remove the potentially missing models from the list return ["Error loading/saving model file. It doesn't exist or the name contains illegal characters"] + [gr.Dropdown.update(choices=modules.sd_models.checkpoint_tiles()) for _ in range(3)] return results diff --git a/style.css b/style.css index 79d6bb0dc..39586bf18 100644 --- a/style.css +++ b/style.css @@ -157,7 +157,7 @@ button{ max-width: 10em; } -#txt2img_preview, #img2img_preview{ +#txt2img_preview, #img2img_preview, #ti_preview{ position: absolute; width: 320px; left: 0; @@ -172,18 +172,18 @@ button{ } @media screen and (min-width: 768px) { - #txt2img_preview, #img2img_preview { + #txt2img_preview, #img2img_preview, #ti_preview { position: absolute; } } @media screen and (max-width: 767px) { - #txt2img_preview, #img2img_preview { + #txt2img_preview, #img2img_preview, #ti_preview { position: relative; } } -#txt2img_preview div.left-0.top-0, #img2img_preview div.left-0.top-0{ +#txt2img_preview div.left-0.top-0, #img2img_preview div.left-0.top-0, #ti_preview div.left-0.top-0{ display: none; } @@ -247,7 +247,7 @@ input[type="range"]{ #txt2img_negative_prompt, #img2img_negative_prompt{ } -#txt2img_progressbar, #img2img_progressbar{ +#txt2img_progressbar, #img2img_progressbar, #ti_progressbar{ position: absolute; z-index: 1000; right: 0; diff --git a/textual_inversion_templates/style.txt b/textual_inversion_templates/style.txt new file mode 100644 index 000000000..15af2d6b8 --- /dev/null +++ b/textual_inversion_templates/style.txt @@ -0,0 +1,19 @@ +a painting, art by [name] +a rendering, art by [name] +a cropped painting, art by [name] +the painting, art by [name] +a clean painting, art by [name] +a dirty painting, art by [name] +a dark painting, art by [name] +a picture, art by [name] +a cool painting, art by [name] +a close-up painting, art by [name] +a bright painting, art by [name] +a cropped painting, art by [name] +a good painting, art by [name] +a close-up painting, art by [name] +a rendition, art by [name] +a nice painting, art by [name] +a small painting, art by [name] +a weird painting, art by [name] +a large painting, art by [name] diff --git a/textual_inversion_templates/style_filewords.txt b/textual_inversion_templates/style_filewords.txt new file mode 100644 index 000000000..b3a8159a8 --- /dev/null +++ b/textual_inversion_templates/style_filewords.txt @@ -0,0 +1,19 @@ +a painting of [filewords], art by [name] +a rendering of [filewords], art by [name] +a cropped painting of [filewords], art by [name] +the painting of [filewords], art by [name] +a clean painting of [filewords], art by [name] +a dirty painting of [filewords], art by [name] +a dark painting of [filewords], art by [name] +a picture of [filewords], art by [name] +a cool painting of [filewords], art by [name] +a close-up painting of [filewords], art by [name] +a bright painting of [filewords], art by [name] +a cropped painting of [filewords], art by [name] +a good painting of [filewords], art by [name] +a close-up painting of [filewords], art by [name] +a rendition of [filewords], art by [name] +a nice painting of [filewords], art by [name] +a small painting of [filewords], art by [name] +a weird painting of [filewords], art by [name] +a large painting of [filewords], art by [name] diff --git a/textual_inversion_templates/subject.txt b/textual_inversion_templates/subject.txt new file mode 100644 index 000000000..79f36aa05 --- /dev/null +++ b/textual_inversion_templates/subject.txt @@ -0,0 +1,27 @@ +a photo of a [name] +a rendering of a [name] +a cropped photo of the [name] +the photo of a [name] +a photo of a clean [name] +a photo of a dirty [name] +a dark photo of the [name] +a photo of my [name] +a photo of the cool [name] +a close-up photo of a [name] +a bright photo of the [name] +a cropped photo of a [name] +a photo of the [name] +a good photo of the [name] +a photo of one [name] +a close-up photo of the [name] +a rendition of the [name] +a photo of the clean [name] +a rendition of a [name] +a photo of a nice [name] +a good photo of a [name] +a photo of the nice [name] +a photo of the small [name] +a photo of the weird [name] +a photo of the large [name] +a photo of a cool [name] +a photo of a small [name] diff --git a/textual_inversion_templates/subject_filewords.txt b/textual_inversion_templates/subject_filewords.txt new file mode 100644 index 000000000..008652a6b --- /dev/null +++ b/textual_inversion_templates/subject_filewords.txt @@ -0,0 +1,27 @@ +a photo of a [name], [filewords] +a rendering of a [name], [filewords] +a cropped photo of the [name], [filewords] +the photo of a [name], [filewords] +a photo of a clean [name], [filewords] +a photo of a dirty [name], [filewords] +a dark photo of the [name], [filewords] +a photo of my [name], [filewords] +a photo of the cool [name], [filewords] +a close-up photo of a [name], [filewords] +a bright photo of the [name], [filewords] +a cropped photo of a [name], [filewords] +a photo of the [name], [filewords] +a good photo of the [name], [filewords] +a photo of one [name], [filewords] +a close-up photo of the [name], [filewords] +a rendition of the [name], [filewords] +a photo of the clean [name], [filewords] +a rendition of a [name], [filewords] +a photo of a nice [name], [filewords] +a good photo of a [name], [filewords] +a photo of the nice [name], [filewords] +a photo of the small [name], [filewords] +a photo of the weird [name], [filewords] +a photo of the large [name], [filewords] +a photo of a cool [name], [filewords] +a photo of a small [name], [filewords] diff --git a/webui.py b/webui.py index b8cccd546..19fdcdd4d 100644 --- a/webui.py +++ b/webui.py @@ -12,7 +12,6 @@ import modules.bsrgan_model as bsrgan import modules.extras import modules.face_restoration import modules.gfpgan_model as gfpgan -import modules.img2img import modules.ldsr_model as ldsr import modules.lowvram import modules.realesrgan_model as realesrgan @@ -21,7 +20,6 @@ import modules.sd_hijack import modules.sd_models import modules.shared as shared import modules.swinir_model as swinir -import modules.txt2img import modules.ui from modules import modelloader from modules.paths import script_path @@ -46,7 +44,7 @@ def wrap_queued_call(func): return f -def wrap_gradio_gpu_call(func): +def wrap_gradio_gpu_call(func, extra_outputs=None): def f(*args, **kwargs): devices.torch_gc() @@ -58,6 +56,7 @@ def wrap_gradio_gpu_call(func): shared.state.current_image = None shared.state.current_image_sampling_step = 0 shared.state.interrupted = False + shared.state.textinfo = None with queue_lock: res = func(*args, **kwargs) @@ -69,7 +68,7 @@ def wrap_gradio_gpu_call(func): return res - return modules.ui.wrap_gradio_call(f) + return modules.ui.wrap_gradio_call(f, extra_outputs=extra_outputs) modules.scripts.load_scripts(os.path.join(script_path, "scripts")) @@ -86,13 +85,7 @@ def webui(): signal.signal(signal.SIGINT, sigint_handler) - demo = modules.ui.create_ui( - txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img), - img2img=wrap_gradio_gpu_call(modules.img2img.img2img), - run_extras=wrap_gradio_gpu_call(modules.extras.run_extras), - run_pnginfo=modules.extras.run_pnginfo, - run_modelmerger=modules.extras.run_modelmerger - ) + demo = modules.ui.create_ui(wrap_gradio_gpu_call=wrap_gradio_gpu_call) demo.launch( share=cmd_opts.share, From 0114057ad672a581bd0b598870b58b674b1a3624 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 15:49:42 +0300 Subject: [PATCH 030/460] fix incorrect use of glob in modelloader for #1410 --- modules/modelloader.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/modelloader.py b/modules/modelloader.py index 8c862b42f..015aeafa3 100644 --- a/modules/modelloader.py +++ b/modules/modelloader.py @@ -43,7 +43,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None for place in places: if os.path.exists(place): for file in glob.iglob(place + '**/**', recursive=True): - full_path = os.path.join(place, file) + full_path = file if os.path.isdir(full_path): continue if len(ext_filter) != 0: From 4e72a1aab6d1b3a8d8c09fadc81843a07c05cc18 Mon Sep 17 00:00:00 2001 From: ClashSAN <98228077+ClashSAN@users.noreply.github.com> Date: Sat, 1 Oct 2022 00:15:43 +0000 Subject: [PATCH 031/460] Grammar Fix --- README.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 5ded94f98..15e224e8f 100644 --- a/README.md +++ b/README.md @@ -11,12 +11,12 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web - One click install and run script (but you still must install python and git) - Outpainting - Inpainting -- Prompt -- Stable Diffusion upscale +- Prompt Matrix +- Stable Diffusion Upscale - Attention, specify parts of text that the model should pay more attention to - - a man in a ((txuedo)) - will pay more attentinoto tuxedo - - a man in a (txuedo:1.21) - alternative syntax -- Loopback, run img2img procvessing multiple times + - a man in a ((tuxedo)) - will pay more attention to tuxedo + - a man in a (tuxedo:1.21) - alternative syntax +- Loopback, run img2img processing multiple times - X/Y plot, a way to draw a 2 dimensional plot of images with different parameters - Textual Inversion - have as many embeddings as you want and use any names you like for them @@ -35,15 +35,15 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web - 4GB video card support (also reports of 2GB working) - Correct seeds for batches - Prompt length validation - - get length of prompt in tokensas you type - - get a warning after geenration if some text was truncated + - get length of prompt in tokens as you type + - get a warning after generation if some text was truncated - Generation parameters - parameters you used to generate images are saved with that image - in PNG chunks for PNG, in EXIF for JPEG - can drag the image to PNG info tab to restore generation parameters and automatically copy them into UI - can be disabled in settings - Settings page -- Running arbitrary python code from UI (must run with commandline flag to enable) +- Running arbitrary python code from UI (must run with --allow-code to enable) - Mouseover hints for most UI elements - Possible to change defaults/mix/max/step values for UI elements via text config - Random artist button From 0758f6e641b5790ce566a998d43e0ea74a627766 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 17:24:50 +0300 Subject: [PATCH 032/460] fix --ckpt option breaking model selection --- modules/sd_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 5b3dbdc79..9259d69e7 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -69,7 +69,7 @@ def list_models(): h = model_hash(cmd_ckpt) title, short_model_name = modeltitle(cmd_ckpt, h) checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name) - shared.opts.sd_model_checkpoint = title + shared.opts.data['sd_model_checkpoint'] = title elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file: print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr) for filename in model_list: From 53a3dc601fb734ce433505b1ca68770919106bad Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 18:21:56 +0300 Subject: [PATCH 033/460] move CLIP out of requirements and into launcher to make it possible to launch the program offline --- launch.py | 4 ++++ requirements.txt | 2 -- requirements_versions.txt | 1 - 3 files changed, 4 insertions(+), 3 deletions(-) diff --git a/launch.py b/launch.py index d2793ed20..57405feab 100644 --- a/launch.py +++ b/launch.py @@ -15,6 +15,7 @@ requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") commandline_args = os.environ.get('COMMANDLINE_ARGS', "") gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") +clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc") taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6") @@ -111,6 +112,9 @@ if not skip_torch_cuda_test: if not is_installed("gfpgan"): run_pip(f"install {gfpgan_package}", "gfpgan") +if not is_installed("clip"): + run_pip(f"install {clip_package}", "clip") + os.makedirs(dir_repos, exist_ok=True) git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash) diff --git a/requirements.txt b/requirements.txt index 7cb9d3293..d4b337fce 100644 --- a/requirements.txt +++ b/requirements.txt @@ -13,14 +13,12 @@ Pillow pytorch_lightning realesrgan scikit-image>=0.19 -git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379 timm==0.4.12 transformers==4.19.2 torch einops jsonmerge clean-fid -git+https://github.com/openai/CLIP@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1 resize-right torchdiffeq kornia diff --git a/requirements_versions.txt b/requirements_versions.txt index 1e8006e05..8a9acf205 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -18,7 +18,6 @@ piexif==1.1.3 einops==0.4.1 jsonmerge==1.8.0 clean-fid==0.1.29 -git+https://github.com/openai/CLIP@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1 resize-right==0.0.2 torchdiffeq==0.2.3 kornia==0.6.7 From 88ec0cf5571883d84abd09196652b3679e359f2e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 19:40:51 +0300 Subject: [PATCH 034/460] fix for incorrect embedding token length calculation (will break seeds that use embeddings, you're welcome!) add option to input initialization text for embeddings --- modules/sd_hijack.py | 8 ++++---- modules/textual_inversion/textual_inversion.py | 13 +++++-------- modules/textual_inversion/ui.py | 4 ++-- modules/ui.py | 2 ++ 4 files changed, 13 insertions(+), 14 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index fd57e5c54..3fa062422 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -130,7 +130,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): while i < len(tokens): token = tokens[i] - embedding = self.hijack.embedding_db.find_embedding_at_position(tokens, i) + embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) if embedding is None: remade_tokens.append(token) @@ -142,7 +142,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): remade_tokens += [0] * emb_len multipliers += [weight] * emb_len used_custom_terms.append((embedding.name, embedding.checksum())) - i += emb_len + i += embedding_length_in_tokens if len(remade_tokens) > maxlen - 2: vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()} @@ -213,7 +213,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): while i < len(tokens): token = tokens[i] - embedding = self.hijack.embedding_db.find_embedding_at_position(tokens, i) + embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) mult_change = self.token_mults.get(token) if opts.enable_emphasis else None if mult_change is not None: @@ -229,7 +229,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): remade_tokens += [0] * emb_len multipliers += [mult] * emb_len used_custom_terms.append((embedding.name, embedding.checksum())) - i += emb_len + i += embedding_length_in_tokens if len(remade_tokens) > maxlen - 2: vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()} diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index c0baaace2..0c50161db 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -117,24 +117,21 @@ class EmbeddingDatabase: possible_matches = self.ids_lookup.get(token, None) if possible_matches is None: - return None + return None, None for ids, embedding in possible_matches: if tokens[offset:offset + len(ids)] == ids: - return embedding + return embedding, len(ids) - return None + return None, None - -def create_embedding(name, num_vectors_per_token): - init_text = '*' - +def create_embedding(name, num_vectors_per_token, init_text='*'): cond_model = shared.sd_model.cond_stage_model embedding_layer = cond_model.wrapped.transformer.text_model.embeddings ids = cond_model.tokenizer(init_text, max_length=num_vectors_per_token, return_tensors="pt", add_special_tokens=False)["input_ids"] - embedded = embedding_layer(ids.to(devices.device)).squeeze(0) + embedded = embedding_layer.token_embedding.wrapped(ids.to(devices.device)).squeeze(0) vec = torch.zeros((num_vectors_per_token, embedded.shape[1]), device=devices.device) for i in range(num_vectors_per_token): diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py index ce3677a98..66c43ffbe 100644 --- a/modules/textual_inversion/ui.py +++ b/modules/textual_inversion/ui.py @@ -6,8 +6,8 @@ import modules.textual_inversion.textual_inversion as ti from modules import sd_hijack, shared -def create_embedding(name, nvpt): - filename = ti.create_embedding(name, nvpt) +def create_embedding(name, initialization_text, nvpt): + filename = ti.create_embedding(name, nvpt, init_text=initialization_text) sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() diff --git a/modules/ui.py b/modules/ui.py index 3b81a4f74..eca50df0f 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -954,6 +954,7 @@ def create_ui(wrap_gradio_gpu_call): gr.HTML(value="

Create a new embedding

") new_embedding_name = gr.Textbox(label="Name") + initialization_text = gr.Textbox(label="Initialization text", value="*") nvpt = gr.Slider(label="Number of vectors per token", minimum=1, maximum=75, step=1, value=1) with gr.Row(): @@ -997,6 +998,7 @@ def create_ui(wrap_gradio_gpu_call): fn=modules.textual_inversion.ui.create_embedding, inputs=[ new_embedding_name, + initialization_text, nvpt, ], outputs=[ From 71fe7fa49f5eb1a2c89932a9d217ed153c12fc8b Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 19:56:37 +0300 Subject: [PATCH 035/460] fix using aaaa-100 embedding when the prompt has aaaa-10000 and you have both aaaa-100 and aaaa-10000 in the directory with embeddings. --- modules/textual_inversion/textual_inversion.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 0c50161db..9d2241cef 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -57,7 +57,8 @@ class EmbeddingDatabase: first_id = ids[0] if first_id not in self.ids_lookup: self.ids_lookup[first_id] = [] - self.ids_lookup[first_id].append((ids, embedding)) + + self.ids_lookup[first_id] = sorted(self.ids_lookup[first_id] + [(ids, embedding)], key=lambda x: len(x[0]), reverse=True) return embedding From 4ec4af6e0b7addeee5221a03f32d117ccdc875d9 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 20:15:25 +0300 Subject: [PATCH 036/460] add checkpoint info to saved embeddings --- modules/textual_inversion/textual_inversion.py | 13 ++++++++++++- 1 file changed, 12 insertions(+), 1 deletion(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 9d2241cef..1183aab76 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -7,7 +7,7 @@ import tqdm import html import datetime -from modules import shared, devices, sd_hijack, processing +from modules import shared, devices, sd_hijack, processing, sd_models import modules.textual_inversion.dataset @@ -17,6 +17,8 @@ class Embedding: self.name = name self.step = step self.cached_checksum = None + self.sd_checkpoint = None + self.sd_checkpoint_name = None def save(self, filename): embedding_data = { @@ -24,6 +26,8 @@ class Embedding: "string_to_param": {"*": self.vec}, "name": self.name, "step": self.step, + "sd_checkpoint": self.sd_checkpoint, + "sd_checkpoint_name": self.sd_checkpoint_name, } torch.save(embedding_data, filename) @@ -41,6 +45,7 @@ class Embedding: self.cached_checksum = f'{const_hash(self.vec.reshape(-1) * 100) & 0xffff:04x}' return self.cached_checksum + class EmbeddingDatabase: def __init__(self, embeddings_dir): self.ids_lookup = {} @@ -96,6 +101,8 @@ class EmbeddingDatabase: vec = emb.detach().to(devices.device, dtype=torch.float32) embedding = Embedding(vec, name) embedding.step = data.get('step', None) + embedding.sd_checkpoint = data.get('hash', None) + embedding.sd_checkpoint_name = data.get('sd_checkpoint_name', None) self.register_embedding(embedding, shared.sd_model) for fn in os.listdir(self.embeddings_dir): @@ -249,6 +256,10 @@ Last saved image: {html.escape(last_saved_image)}

""" + checkpoint = sd_models.select_checkpoint() + + embedding.sd_checkpoint = checkpoint.hash + embedding.sd_checkpoint_name = checkpoint.model_name embedding.cached_checksum = None embedding.save(filename) From 3ff0de2c594b786ef948a89efb1814c59bb42117 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 20:23:40 +0300 Subject: [PATCH 037/460] added --disable-console-progressbars to disable progressbars in console disabled printing prompts to console by default, enabled by --enable-console-prompts --- modules/img2img.py | 4 +++- modules/sd_samplers.py | 8 ++++++-- modules/shared.py | 7 +++++-- modules/txt2img.py | 4 +++- 4 files changed, 17 insertions(+), 6 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index 03e934e96..f4455c90f 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -103,7 +103,9 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro inpaint_full_res_padding=inpaint_full_res_padding, inpainting_mask_invert=inpainting_mask_invert, ) - print(f"\nimg2img: {prompt}", file=shared.progress_print_out) + + if shared.cmd_opts.enable_console_prompts: + print(f"\nimg2img: {prompt}", file=shared.progress_print_out) p.extra_generation_params["Mask blur"] = mask_blur diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 925222148..9316875ab 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -77,7 +77,9 @@ def extended_tdqm(sequence, *args, desc=None, **kwargs): state.sampling_steps = len(sequence) state.sampling_step = 0 - for x in tqdm.tqdm(sequence, *args, desc=state.job, file=shared.progress_print_out, **kwargs): + seq = sequence if cmd_opts.disable_console_progressbars else tqdm.tqdm(sequence, *args, desc=state.job, file=shared.progress_print_out, **kwargs) + + for x in seq: if state.interrupted: break @@ -207,7 +209,9 @@ def extended_trange(sampler, count, *args, **kwargs): state.sampling_steps = count state.sampling_step = 0 - for x in tqdm.trange(count, *args, desc=state.job, file=shared.progress_print_out, **kwargs): + seq = range(count) if cmd_opts.disable_console_progressbars else tqdm.trange(count, *args, desc=state.job, file=shared.progress_print_out, **kwargs) + + for x in seq: if state.interrupted: break diff --git a/modules/shared.py b/modules/shared.py index 5a591dc99..1bf7a6c14 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -58,6 +58,9 @@ parser.add_argument("--opt-channelslast", action='store_true', help="change memo parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv')) parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False) parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False) +parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False) +parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False) + cmd_opts = parser.parse_args() device = get_optimal_device() @@ -320,14 +323,14 @@ class TotalTQDM: ) def update(self): - if not opts.multiple_tqdm: + if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: return if self._tqdm is None: self.reset() self._tqdm.update() def updateTotal(self, new_total): - if not opts.multiple_tqdm: + if not opts.multiple_tqdm or cmd_opts.disable_console_progressbars: return if self._tqdm is None: self.reset() diff --git a/modules/txt2img.py b/modules/txt2img.py index 5368e4d00..d4406c3c0 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -34,7 +34,9 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: denoising_strength=denoising_strength if enable_hr else None, ) - print(f"\ntxt2img: {prompt}", file=shared.progress_print_out) + if cmd_opts.enable_console_prompts: + print(f"\ntxt2img: {prompt}", file=shared.progress_print_out) + processed = modules.scripts.scripts_txt2img.run(p, *args) if processed is None: From 6365a41f5981efa506dfe4e8fa878b43ca2d8d0c Mon Sep 17 00:00:00 2001 From: d8ahazard Date: Sun, 2 Oct 2022 12:58:17 -0500 Subject: [PATCH 038/460] Update esrgan_model.py Use alternate ESRGAN Model download path. --- modules/esrgan_model.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index ea91abfe8..4aed9283c 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -73,8 +73,8 @@ def fix_model_layers(crt_model, pretrained_net): class UpscalerESRGAN(Upscaler): def __init__(self, dirname): self.name = "ESRGAN" - self.model_url = "https://drive.google.com/u/0/uc?id=1TPrz5QKd8DHHt1k8SRtm6tMiPjz_Qene&export=download" - self.model_name = "ESRGAN 4x" + self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/ESRGAN.pth" + self.model_name = "ESRGAN_4x" self.scalers = [] self.user_path = dirname self.model_path = os.path.join(models_path, self.name) From a1cde7e6468f80584030525a1b07cbf0f4ee42eb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 21:09:10 +0300 Subject: [PATCH 039/460] disabled SD model download after multiple complaints --- modules/sd_models.py | 18 ++++++++---------- modules/textual_inversion/ui.py | 2 +- webui.py | 2 +- 3 files changed, 10 insertions(+), 12 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 9259d69e7..9a6b568f0 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -13,9 +13,6 @@ from modules.paths import models_path model_dir = "Stable-diffusion" model_path = os.path.abspath(os.path.join(models_path, model_dir)) -model_name = "sd-v1-4.ckpt" -model_url = "https://drive.yerf.org/wl/?id=EBfTrmcCCUAGaQBXVIj5lJmEhjoP1tgl&mode=grid&download=1" -user_dir = None CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name']) checkpoints_list = {} @@ -30,12 +27,10 @@ except Exception: pass -def setup_model(dirname): - global user_dir - user_dir = dirname +def setup_model(): if not os.path.exists(model_path): os.makedirs(model_path) - checkpoints_list.clear() + list_models() @@ -45,7 +40,7 @@ def checkpoint_tiles(): def list_models(): checkpoints_list.clear() - model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=user_dir, ext_filter=[".ckpt"], download_name=model_name) + model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt"]) def modeltitle(path, shorthash): abspath = os.path.abspath(path) @@ -106,8 +101,11 @@ def select_checkpoint(): if len(checkpoints_list) == 0: print(f"No checkpoints found. When searching for checkpoints, looked at:", file=sys.stderr) - print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr) - print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr) + if shared.cmd_opts.ckpt is not None: + print(f" - file {os.path.abspath(shared.cmd_opts.ckpt)}", file=sys.stderr) + print(f" - directory {model_path}", file=sys.stderr) + if shared.cmd_opts.ckpt_dir is not None: + print(f" - directory {os.path.abspath(shared.cmd_opts.ckpt_dir)}", file=sys.stderr) print(f"Can't run without a checkpoint. Find and place a .ckpt file into any of those locations. The program will exit.", file=sys.stderr) exit(1) diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py index 66c43ffbe..633037d8e 100644 --- a/modules/textual_inversion/ui.py +++ b/modules/textual_inversion/ui.py @@ -22,7 +22,7 @@ def train_embedding(*args): embedding, filename = ti.train_embedding(*args) res = f""" -Training {'interrupted' if shared.state.interrupted else 'finished'} after {embedding.step} steps. +Training {'interrupted' if shared.state.interrupted else 'finished'} at {embedding.step} steps. Embedding saved to {html.escape(filename)} """ return res, "" diff --git a/webui.py b/webui.py index 424ab9751..dc72ceb8a 100644 --- a/webui.py +++ b/webui.py @@ -23,7 +23,7 @@ from modules.paths import script_path from modules.shared import cmd_opts modelloader.cleanup_models() -modules.sd_models.setup_model(cmd_opts.ckpt_dir) +modules.sd_models.setup_model() codeformer.setup_model(cmd_opts.codeformer_models_path) gfpgan.setup_model(cmd_opts.gfpgan_models_path) shared.face_restorers.append(modules.face_restoration.FaceRestoration()) From 852fd90c0dcda9cb5fbbfdf0c7308ce58034935c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 21:22:20 +0300 Subject: [PATCH 040/460] emergency fix for disabling SD model download after multiple complaints --- modules/sd_models.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 9a6b568f0..5f9920647 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -45,8 +45,8 @@ def list_models(): def modeltitle(path, shorthash): abspath = os.path.abspath(path) - if user_dir is not None and abspath.startswith(user_dir): - name = abspath.replace(user_dir, '') + if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir): + name = abspath.replace(shared.cmd_opts.ckpt_dir, '') elif abspath.startswith(model_path): name = abspath.replace(model_path, '') else: From e808096cf641d868f88465515d70d40fc46125d4 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 2 Oct 2022 19:26:06 +0100 Subject: [PATCH 041/460] correct indent --- modules/scripts.py | 48 ++++++++++++++++++++++++---------------------- modules/ui.py | 23 +++++++++++----------- 2 files changed, 36 insertions(+), 35 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index 788397f53..45230f9a1 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -163,37 +163,39 @@ class ScriptRunner: return processed def reload_sources(self): - for si,script in list(enumerate(self.scripts)): - with open(script.filename, "r", encoding="utf8") as file: - args_from = script.args_from - args_to = script.args_to - filename = script.filename - text = file.read() + for si, script in list(enumerate(self.scripts)): + with open(script.filename, "r", encoding="utf8") as file: + args_from = script.args_from + args_to = script.args_to + filename = script.filename + text = file.read() - from types import ModuleType - compiled = compile(text, filename, 'exec') - module = ModuleType(script.filename) - exec(compiled, module.__dict__) + from types import ModuleType - for key, script_class in module.__dict__.items(): - if type(script_class) == type and issubclass(script_class, Script): - self.scripts[si] = script_class() - self.scripts[si].filename = filename - self.scripts[si].args_from = args_from - self.scripts[si].args_to = args_to + compiled = compile(text, filename, 'exec') + module = ModuleType(script.filename) + exec(compiled, module.__dict__) + + for key, script_class in module.__dict__.items(): + if type(script_class) == type and issubclass(script_class, Script): + self.scripts[si] = script_class() + self.scripts[si].filename = filename + self.scripts[si].args_from = args_from + self.scripts[si].args_to = args_to scripts_txt2img = ScriptRunner() scripts_img2img = ScriptRunner() def reload_script_body_only(): - scripts_txt2img.reload_sources() - scripts_img2img.reload_sources() + scripts_txt2img.reload_sources() + scripts_img2img.reload_sources() + def reload_scripts(basedir): - global scripts_txt2img,scripts_img2img + global scripts_txt2img, scripts_img2img - scripts_data.clear() - load_scripts(basedir) + scripts_data.clear() + load_scripts(basedir) - scripts_txt2img = ScriptRunner() - scripts_img2img = ScriptRunner() + scripts_txt2img = ScriptRunner() + scripts_img2img = ScriptRunner() diff --git a/modules/ui.py b/modules/ui.py index 963a2c611..6b30f84ba 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1003,12 +1003,12 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): ) with gr.Row(): - reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary') - restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary') + reload_script_bodies = gr.Button(value='Reload custom script bodies (No ui updates, No restart)', variant='secondary') + restart_gradio = gr.Button(value='Restart Gradio and Refresh components (Custom Scripts, ui.py, js and css only)', variant='primary') def reload_scripts(): - modules.scripts.reload_script_body_only() + modules.scripts.reload_script_body_only() reload_script_bodies.click( fn=reload_scripts, @@ -1018,7 +1018,7 @@ def create_ui(txt2img, img2img, run_extras, run_pnginfo, run_modelmerger): ) def request_restart(): - settings_interface.gradio_ref.do_restart = True + settings_interface.gradio_ref.do_restart = True restart_gradio.click( fn=request_restart, @@ -1234,12 +1234,11 @@ for filename in sorted(os.listdir(jsdir)): if 'gradio_routes_templates_response' not in globals(): - def template_response(*args, **kwargs): - res = gradio_routes_templates_response(*args, **kwargs) - res.body = res.body.replace(b'', f'{javascript}'.encode("utf8")) - res.init_headers() - return res - - gradio_routes_templates_response = gradio.routes.templates.TemplateResponse - gradio.routes.templates.TemplateResponse = template_response + def template_response(*args, **kwargs): + res = gradio_routes_templates_response(*args, **kwargs) + res.body = res.body.replace(b'', f'{javascript}'.encode("utf8")) + res.init_headers() + return res + gradio_routes_templates_response = gradio.routes.templates.TemplateResponse + gradio.routes.templates.TemplateResponse = template_response From a634c3226fd69486ce96df56f95f3fd63172305c Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 2 Oct 2022 19:26:38 +0100 Subject: [PATCH 042/460] correct indent --- webui.py | 56 ++++++++++++++++++++++++++++---------------------------- 1 file changed, 28 insertions(+), 28 deletions(-) diff --git a/webui.py b/webui.py index ab200045a..140040ca1 100644 --- a/webui.py +++ b/webui.py @@ -89,38 +89,38 @@ def webui(): while 1: - demo = modules.ui.create_ui( - txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img), - img2img=wrap_gradio_gpu_call(modules.img2img.img2img), - run_extras=wrap_gradio_gpu_call(modules.extras.run_extras), - run_pnginfo=modules.extras.run_pnginfo, - run_modelmerger=modules.extras.run_modelmerger - ) + demo = modules.ui.create_ui( + txt2img=wrap_gradio_gpu_call(modules.txt2img.txt2img), + img2img=wrap_gradio_gpu_call(modules.img2img.img2img), + run_extras=wrap_gradio_gpu_call(modules.extras.run_extras), + run_pnginfo=modules.extras.run_pnginfo, + run_modelmerger=modules.extras.run_modelmerger + ) - demo.launch( - share=cmd_opts.share, - server_name="0.0.0.0" if cmd_opts.listen else None, - server_port=cmd_opts.port, - debug=cmd_opts.gradio_debug, - auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None, - inbrowser=cmd_opts.autolaunch, - prevent_thread_lock=True - ) + demo.launch( + share=cmd_opts.share, + server_name="0.0.0.0" if cmd_opts.listen else None, + server_port=cmd_opts.port, + debug=cmd_opts.gradio_debug, + auth=[tuple(cred.split(':')) for cred in cmd_opts.gradio_auth.strip('"').split(',')] if cmd_opts.gradio_auth else None, + inbrowser=cmd_opts.autolaunch, + prevent_thread_lock=True + ) - while 1: - time.sleep(0.5) - if getattr(demo,'do_restart',False): - time.sleep(0.5) - demo.close() - time.sleep(0.5) - break + while 1: + time.sleep(0.5) + if getattr(demo,'do_restart',False): + time.sleep(0.5) + demo.close() + time.sleep(0.5) + break - print('Reloading Custom Scripts') - modules.scripts.reload_scripts(os.path.join(script_path, "scripts")) - print('Reloading modules: modules.ui') - importlib.reload(modules.ui) - print('Restarting Gradio') + print('Reloading Custom Scripts') + modules.scripts.reload_scripts(os.path.join(script_path, "scripts")) + print('Reloading modules: modules.ui') + importlib.reload(modules.ui) + print('Restarting Gradio') if __name__ == "__main__": From c0389eb3071870240bc158263e5dfb4351ec8eba Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 21:35:29 +0300 Subject: [PATCH 043/460] hello --- webui.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/webui.py b/webui.py index 634956978..47848ba58 100644 --- a/webui.py +++ b/webui.py @@ -103,11 +103,11 @@ def webui(): while 1: time.sleep(0.5) - if getattr(demo,'do_restart',False): - time.sleep(0.5) - demo.close() - time.sleep(0.5) - break + if getattr(demo, 'do_restart', False): + time.sleep(0.5) + demo.close() + time.sleep(0.5) + break print('Reloading Custom Scripts') modules.scripts.reload_scripts(os.path.join(script_path, "scripts")) From 2ef69df9a7c7b6793401f29ced71fb8a781fad4c Mon Sep 17 00:00:00 2001 From: Jocke Date: Sun, 2 Oct 2022 16:10:41 +0200 Subject: [PATCH 044/460] Prevent upscaling when None is selected for SD upscale --- scripts/sd_upscale.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/scripts/sd_upscale.py b/scripts/sd_upscale.py index 2653e2d40..cb37ff7e8 100644 --- a/scripts/sd_upscale.py +++ b/scripts/sd_upscale.py @@ -34,7 +34,11 @@ class Script(scripts.Script): seed = p.seed init_img = p.init_images[0] - img = upscaler.scaler.upscale(init_img, 2, upscaler.data_path) + + if(upscaler.name != "None"): + img = upscaler.scaler.upscale(init_img, 2, upscaler.data_path) + else: + img = init_img devices.torch_gc() From 91f327f22bb2feb780c424c74723cc0629dc34a1 Mon Sep 17 00:00:00 2001 From: Lopyter Date: Sun, 2 Oct 2022 18:15:31 +0200 Subject: [PATCH 045/460] make save to dirs optional for imgs saved from ui --- modules/shared.py | 1 + modules/ui.py | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 1bf7a6c14..785e7af6f 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -173,6 +173,7 @@ options_templates.update(options_section(('saving-to-dirs', "Saving to a directo "grid_save_to_dirs": OptionInfo(False, "Save grids to subdirectory"), "directories_filename_pattern": OptionInfo("", "Directory name pattern"), "directories_max_prompt_words": OptionInfo(8, "Max prompt words", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}), + "use_save_to_dirs_for_ui": OptionInfo(False, "Use \"Save images to a subdirectory\" option for images saved from UI"), })) options_templates.update(options_section(('upscaling', "Upscaling"), { diff --git a/modules/ui.py b/modules/ui.py index 78a15d83a..8912deff4 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -113,7 +113,7 @@ def save_files(js_data, images, index): p = MyObject(data) path = opts.outdir_save - save_to_dirs = opts.save_to_dirs + save_to_dirs = opts.use_save_to_dirs_for_ui if save_to_dirs: dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, p.seed, p.prompt) From c4445225f79f1c57afe52358ff4b205864eb7aac Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 21:50:14 +0300 Subject: [PATCH 046/460] change wording for options --- modules/shared.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 785e7af6f..7246eadc6 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -170,10 +170,10 @@ options_templates.update(options_section(('saving-paths', "Paths for saving"), { options_templates.update(options_section(('saving-to-dirs', "Saving to a directory"), { "save_to_dirs": OptionInfo(False, "Save images to a subdirectory"), - "grid_save_to_dirs": OptionInfo(False, "Save grids to subdirectory"), + "grid_save_to_dirs": OptionInfo(False, "Save grids to a subdirectory"), + "use_save_to_dirs_for_ui": OptionInfo(False, "When using \"Save\" button, save images to a subdirectory"), "directories_filename_pattern": OptionInfo("", "Directory name pattern"), - "directories_max_prompt_words": OptionInfo(8, "Max prompt words", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}), - "use_save_to_dirs_for_ui": OptionInfo(False, "Use \"Save images to a subdirectory\" option for images saved from UI"), + "directories_max_prompt_words": OptionInfo(8, "Max prompt words for [prompt_words] pattern", gr.Slider, {"minimum": 1, "maximum": 20, "step": 1}), })) options_templates.update(options_section(('upscaling', "Upscaling"), { From c7543d4940da672d970124ae8f2fec9de7bdc1da Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 22:41:21 +0300 Subject: [PATCH 047/460] preprocessing for textual inversion added --- modules/interrogate.py | 1 + modules/textual_inversion/preprocess.py | 75 +++++++++++++++++++ .../textual_inversion/textual_inversion.py | 1 + modules/textual_inversion/ui.py | 14 +++- modules/ui.py | 36 +++++++++ 5 files changed, 124 insertions(+), 3 deletions(-) create mode 100644 modules/textual_inversion/preprocess.py diff --git a/modules/interrogate.py b/modules/interrogate.py index f62a47458..eed87144f 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -21,6 +21,7 @@ Category = namedtuple("Category", ["name", "topn", "items"]) re_topn = re.compile(r"\.top(\d+)\.") + class InterrogateModels: blip_model = None clip_model = None diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py new file mode 100644 index 000000000..209e928ff --- /dev/null +++ b/modules/textual_inversion/preprocess.py @@ -0,0 +1,75 @@ +import os +from PIL import Image, ImageOps +import tqdm + +from modules import shared, images + + +def preprocess(process_src, process_dst, process_flip, process_split, process_caption): + size = 512 + src = os.path.abspath(process_src) + dst = os.path.abspath(process_dst) + + assert src != dst, 'same directory specified as source and desitnation' + + os.makedirs(dst, exist_ok=True) + + files = os.listdir(src) + + shared.state.textinfo = "Preprocessing..." + shared.state.job_count = len(files) + + if process_caption: + shared.interrogator.load() + + def save_pic_with_caption(image, index): + if process_caption: + caption = "-" + shared.interrogator.generate_caption(image) + else: + caption = "" + + image.save(os.path.join(dst, f"{index:05}-{subindex[0]}{caption}.png")) + subindex[0] += 1 + + def save_pic(image, index): + save_pic_with_caption(image, index) + + if process_flip: + save_pic_with_caption(ImageOps.mirror(image), index) + + for index, imagefile in enumerate(tqdm.tqdm(files)): + subindex = [0] + filename = os.path.join(src, imagefile) + img = Image.open(filename).convert("RGB") + + if shared.state.interrupted: + break + + ratio = img.height / img.width + is_tall = ratio > 1.35 + is_wide = ratio < 1 / 1.35 + + if process_split and is_tall: + img = img.resize((size, size * img.height // img.width)) + + top = img.crop((0, 0, size, size)) + save_pic(top, index) + + bot = img.crop((0, img.height - size, size, img.height)) + save_pic(bot, index) + elif process_split and is_wide: + img = img.resize((size * img.width // img.height, size)) + + left = img.crop((0, 0, size, size)) + save_pic(left, index) + + right = img.crop((img.width - size, 0, img.width, size)) + save_pic(right, index) + else: + img = images.resize_image(1, img, size, size) + save_pic(img, index) + + shared.state.nextjob() + + if process_caption: + shared.interrogator.send_blip_to_ram() diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 1183aab76..d4e250d87 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -7,6 +7,7 @@ import tqdm import html import datetime + from modules import shared, devices, sd_hijack, processing, sd_models import modules.textual_inversion.dataset diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py index 633037d8e..f19ac5e02 100644 --- a/modules/textual_inversion/ui.py +++ b/modules/textual_inversion/ui.py @@ -2,24 +2,31 @@ import html import gradio as gr -import modules.textual_inversion.textual_inversion as ti +import modules.textual_inversion.textual_inversion +import modules.textual_inversion.preprocess from modules import sd_hijack, shared def create_embedding(name, initialization_text, nvpt): - filename = ti.create_embedding(name, nvpt, init_text=initialization_text) + filename = modules.textual_inversion.textual_inversion.create_embedding(name, nvpt, init_text=initialization_text) sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings() return gr.Dropdown.update(choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())), f"Created: {filename}", "" +def preprocess(*args): + modules.textual_inversion.preprocess.preprocess(*args) + + return "Preprocessing finished.", "" + + def train_embedding(*args): try: sd_hijack.undo_optimizations() - embedding, filename = ti.train_embedding(*args) + embedding, filename = modules.textual_inversion.textual_inversion.train_embedding(*args) res = f""" Training {'interrupted' if shared.state.interrupted else 'finished'} at {embedding.step} steps. @@ -30,3 +37,4 @@ Embedding saved to {html.escape(filename)} raise finally: sd_hijack.apply_optimizations() + diff --git a/modules/ui.py b/modules/ui.py index 8912deff4..e7bde53bf 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -961,6 +961,8 @@ def create_ui(wrap_gradio_gpu_call): with gr.Row().style(equal_height=False): with gr.Column(): with gr.Group(): + gr.HTML(value="

See wiki for detailed explanation.

") + gr.HTML(value="

Create a new embedding

") new_embedding_name = gr.Textbox(label="Name") @@ -974,6 +976,24 @@ def create_ui(wrap_gradio_gpu_call): with gr.Column(): create_embedding = gr.Button(value="Create", variant='primary') + with gr.Group(): + gr.HTML(value="

Preprocess images

") + + process_src = gr.Textbox(label='Source directory') + process_dst = gr.Textbox(label='Destination directory') + + with gr.Row(): + process_flip = gr.Checkbox(label='Flip') + process_split = gr.Checkbox(label='Split into two') + process_caption = gr.Checkbox(label='Add caption') + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + run_preprocess = gr.Button(value="Preprocess", variant='primary') + with gr.Group(): gr.HTML(value="

Train an embedding; must specify a directory with a set of 512x512 images

") train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) @@ -1018,6 +1038,22 @@ def create_ui(wrap_gradio_gpu_call): ] ) + run_preprocess.click( + fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + process_src, + process_dst, + process_flip, + process_split, + process_caption, + ], + outputs=[ + ti_output, + ti_outcome, + ], + ) + train_embedding.click( fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.train_embedding, extra_outputs=[gr.update()]), _js="start_training_textual_inversion", From 6785331e22d6a488fbf5905fab56d7fec867e038 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 22:59:01 +0300 Subject: [PATCH 048/460] keep textual inversion dataset latents in CPU memory to save a bit of VRAM --- modules/textual_inversion/dataset.py | 2 ++ modules/textual_inversion/textual_inversion.py | 3 +++ modules/ui.py | 4 ++-- 3 files changed, 7 insertions(+), 2 deletions(-) diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index 7e134a08f..e8394ff65 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -8,6 +8,7 @@ from torchvision import transforms import random import tqdm +from modules import devices class PersonalizedBase(Dataset): @@ -47,6 +48,7 @@ class PersonalizedBase(Dataset): torchdata = torch.moveaxis(torchdata, 2, 0) init_latent = model.get_first_stage_encoding(model.encode_first_stage(torchdata.unsqueeze(dim=0))).squeeze() + init_latent = init_latent.to(devices.cpu) self.dataset.append((init_latent, filename_tokens)) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index d4e250d87..8686f5347 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -212,7 +212,10 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps, with torch.autocast("cuda"): c = cond_model([text]) + + x = x.to(devices.device) loss = shared.sd_model(x.unsqueeze(0), c)[0] + del x losses[embedding.step % losses.shape[0]] = loss.item() diff --git a/modules/ui.py b/modules/ui.py index e7bde53bf..d9d02ecef 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1002,8 +1002,8 @@ def create_ui(wrap_gradio_gpu_call): log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion") template_file = gr.Textbox(label='Prompt template file', value=os.path.join(script_path, "textual_inversion_templates", "style_filewords.txt")) steps = gr.Number(label='Max steps', value=100000, precision=0) - create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=1000, precision=0) - save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=1000, precision=0) + create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0) + save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0) with gr.Row(): with gr.Column(scale=2): From 166283653cfe7521a422c91e8fb801f3ecb4adc8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 23:18:13 +0300 Subject: [PATCH 049/460] remove LDSR warning --- modules/paths.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/paths.py b/modules/paths.py index ceb804171..606f7d666 100644 --- a/modules/paths.py +++ b/modules/paths.py @@ -20,7 +20,6 @@ path_dirs = [ (os.path.join(sd_path, '../taming-transformers'), 'taming', 'Taming Transformers', []), (os.path.join(sd_path, '../CodeFormer'), 'inference_codeformer.py', 'CodeFormer', []), (os.path.join(sd_path, '../BLIP'), 'models/blip.py', 'BLIP', []), - (os.path.join(sd_path, '../latent-diffusion'), 'LDSR.py', 'LDSR', []), (os.path.join(sd_path, '../k-diffusion'), 'k_diffusion/sampling.py', 'k_diffusion', ["atstart"]), ] From 4c2eccf8e96825333ed400f8a8a2be78141ed8ec Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 2 Oct 2022 23:22:48 +0300 Subject: [PATCH 050/460] credit Rinon Gal --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 15e224e8f..ec3d7532d 100644 --- a/README.md +++ b/README.md @@ -113,6 +113,7 @@ The documentation was moved from this README over to the project's [wiki](https: - LDSR - https://github.com/Hafiidz/latent-diffusion - Ideas for optimizations - https://github.com/basujindal/stable-diffusion - Doggettx - Cross Attention layer optimization - https://github.com/Doggettx/stable-diffusion, original idea for prompt editing. +- Rinon Gal - Textual Inversion - https://github.com/rinongal/textual_inversion (we're not using his code, but we are using his ideas). - Idea for SD upscale - https://github.com/jquesnelle/txt2imghd - Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot - CLIP interrogator idea and borrowing some code - https://github.com/pharmapsychotic/clip-interrogator From 138662734c25dab4e73e632b7eaff9ad9c0ce2b4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 3 Oct 2022 07:57:59 +0300 Subject: [PATCH 051/460] use dropdown instead of radio for img2img upscaler selection --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 7246eadc6..2a599e9cf 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -183,7 +183,7 @@ options_templates.update(options_section(('upscaling', "Upscaling"), { "SWIN_tile": OptionInfo(192, "Tile size for all SwinIR.", gr.Slider, {"minimum": 16, "maximum": 512, "step": 16}), "SWIN_tile_overlap": OptionInfo(8, "Tile overlap, in pixels for SwinIR. Low values = visible seam.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}), "ldsr_steps": OptionInfo(100, "LDSR processing steps. Lower = faster", gr.Slider, {"minimum": 1, "maximum": 200, "step": 1}), - "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Radio, lambda: {"choices": [x.name for x in sd_upscalers]}), + "upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in sd_upscalers]}), })) options_templates.update(options_section(('face-restoration', "Face restoration"), { From e615d4f9d101e2712c7c2d0e3e8feb19cb430c74 Mon Sep 17 00:00:00 2001 From: Hanusz Leszek Date: Sun, 2 Oct 2022 21:08:23 +0200 Subject: [PATCH 052/460] Convert folder icon surrogate pair to valid utf8 --- javascript/hints.js | 2 +- modules/ui.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index 84694eeb3..e72e93381 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -15,7 +15,7 @@ titles = { "\u267b\ufe0f": "Reuse seed from last generation, mostly useful if it was randomed", "\u{1f3a8}": "Add a random artist to the prompt.", "\u2199\ufe0f": "Read generation parameters from prompt into user interface.", - "\uD83D\uDCC2": "Open images output directory", + "\u{1f4c2}": "Open images output directory", "Inpaint a part of image": "Draw a mask over an image, and the script will regenerate the masked area with content according to prompt", "SD upscale": "Upscale image normally, split result into tiles, improve each tile using img2img, merge whole image back", diff --git a/modules/ui.py b/modules/ui.py index d9d02ecef..164321512 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -69,7 +69,7 @@ random_symbol = '\U0001f3b2\ufe0f' # 🎲️ reuse_symbol = '\u267b\ufe0f' # ♻️ art_symbol = '\U0001f3a8' # 🎨 paste_symbol = '\u2199\ufe0f' # ↙ -folder_symbol = '\uD83D\uDCC2' +folder_symbol = '\U0001f4c2' # 📂 def plaintext_to_html(text): text = "

" + "
\n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "

" From 34c638142eaa57f89b86545ba3c72085036398bb Mon Sep 17 00:00:00 2001 From: hentailord85ez <112723046+hentailord85ez@users.noreply.github.com> Date: Fri, 30 Sep 2022 22:38:14 +0100 Subject: [PATCH 053/460] Fixed when eta = 0 Unexpected behavior when using eta = 0 in something like XY, but your default eta was set to something not 0. --- modules/sd_samplers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 9316875ab..dbf570d2c 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -127,7 +127,7 @@ class VanillaStableDiffusionSampler: return res def initialize(self, p): - self.eta = p.eta or opts.eta_ddim + self.eta = p.eta if p.eta is not None else opts.eta_ddim for fieldname in ['p_sample_ddim', 'p_sample_plms']: if hasattr(self.sampler, fieldname): From 36ea4ac0f5844e5c8dec124edbdb714ccdd6013c Mon Sep 17 00:00:00 2001 From: RnDMonkey Date: Sun, 2 Oct 2022 22:21:16 -0700 Subject: [PATCH 054/460] moved no-style return outside join function --- modules/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index bba55158e..1a046aca6 100644 --- a/modules/images.py +++ b/modules/images.py @@ -315,7 +315,7 @@ def apply_filename_pattern(x, p, seed, prompt): #currently disabled if using the save button, will work otherwise # if enabled it will cause a bug because styles is not included in the save_files data dictionary if hasattr(p, "styles"): - x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"] or "None"), replace_spaces=False)) + x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"]) or "None", replace_spaces=False)) x = x.replace("[sampler]", sanitize_filename_part(sd_samplers.samplers[p.sampler_index].name, replace_spaces=False)) From 6491b09c24ea77f1f69990ea80a216f9ce319589 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 3 Oct 2022 08:53:52 +0300 Subject: [PATCH 055/460] use existing function for gfpgan --- modules/gfpgan_model.py | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index bb30d7330..dd3fbcab1 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -97,11 +97,7 @@ def setup_model(dirname): return "GFPGAN" def restore(self, np_image): - np_image_bgr = np_image[:, :, ::-1] - cropped_faces, restored_faces, gfpgan_output_bgr = gfpgann().enhance(np_image_bgr, has_aligned=False, only_center_face=False, paste_back=True) - np_image = gfpgan_output_bgr[:, :, ::-1] - - return np_image + return gfpgan_fix_faces(np_image) shared.face_restorers.append(FaceRestorerGFPGAN()) except Exception: From 43a74fa595003321200a40bd2431e56c245e75ed Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 3 Oct 2022 11:48:19 +0300 Subject: [PATCH 056/460] batch processing for img2img with an empty output directory, by request --- modules/img2img.py | 7 +++++-- modules/ui.py | 2 +- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index f4455c90f..2ff8e2617 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -23,8 +23,10 @@ def process_batch(p, input_dir, output_dir, args): print(f"Will process {len(images)} images, creating {p.n_iter * p.batch_size} new images for each.") + save_normally = output_dir == '' + p.do_not_save_grid = True - p.do_not_save_samples = True + p.do_not_save_samples = not save_normally state.job_count = len(images) * p.n_iter @@ -48,7 +50,8 @@ def process_batch(p, input_dir, output_dir, args): left, right = os.path.splitext(filename) filename = f"{left}-{n}{right}" - processed_image.save(os.path.join(output_dir, filename)) + if not save_normally: + processed_image.save(os.path.join(output_dir, filename)) def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: str, init_img, init_img_with_mask, init_img_inpaint, init_mask_inpaint, mask_mode, steps: int, sampler_index: int, mask_blur: int, inpainting_fill: int, restore_faces: bool, tiling: bool, n_iter: int, batch_size: int, cfg_scale: float, denoising_strength: float, seed: int, subseed: int, subseed_strength: float, seed_resize_from_h: int, seed_resize_from_w: int, seed_enable_extras: bool, height: int, width: int, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, *args): diff --git a/modules/ui.py b/modules/ui.py index 164321512..55f7aa953 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -658,7 +658,7 @@ def create_ui(wrap_gradio_gpu_call): with gr.TabItem('Batch img2img', id='batch'): hidden = '
Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else '' - gr.HTML(f"

Process images in a directory on the same machine where the server is running.{hidden}

") + gr.HTML(f"

Process images in a directory on the same machine where the server is running.
Use an empty output directory to save pictures normally instead of writing to the output directory.{hidden}

") img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs) img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs) From 2865ef4b9ab16d56326cc805541bebcf01d099bc Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Mon, 3 Oct 2022 13:10:03 +0300 Subject: [PATCH 057/460] fix broken date in TI --- modules/textual_inversion/textual_inversion.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 8686f5347..cd9f34984 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -164,7 +164,7 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps, filename = os.path.join(shared.cmd_opts.embeddings_dir, f'{embedding_name}.pt') - log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%d-%m"), embedding_name) + log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), embedding_name) if save_embedding_every > 0: embedding_dir = os.path.join(log_directory, "embeddings") From 2a7f48cdb8dcf9acb02610cccae0d1ee5d260bc2 Mon Sep 17 00:00:00 2001 From: fuzzytent Date: Fri, 30 Sep 2022 16:02:16 +0200 Subject: [PATCH 058/460] Improve styling of gallery items, particularly in dark mode --- style.css | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/style.css b/style.css index 9709c4eec..e11316b96 100644 --- a/style.css +++ b/style.css @@ -403,3 +403,7 @@ input[type="range"]{ .red { color: red; } + +.gallery-item { + --tw-bg-opacity: 0 !important; +} From 5ef0baf5eaec7f21a1666af424405cbee19f3764 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 08:52:11 +0300 Subject: [PATCH 059/460] add support for gelbooru tags in filenames for textual inversion --- modules/textual_inversion/dataset.py | 7 +++++-- modules/textual_inversion/preprocess.py | 4 +++- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/modules/textual_inversion/dataset.py b/modules/textual_inversion/dataset.py index e8394ff65..7c44ea5be 100644 --- a/modules/textual_inversion/dataset.py +++ b/modules/textual_inversion/dataset.py @@ -9,6 +9,9 @@ from torchvision import transforms import random import tqdm from modules import devices +import re + +re_tag = re.compile(r"[a-zA-Z][_\w\d()]+") class PersonalizedBase(Dataset): @@ -38,8 +41,8 @@ class PersonalizedBase(Dataset): image = image.resize((self.width, self.height), PIL.Image.BICUBIC) filename = os.path.basename(path) - filename_tokens = os.path.splitext(filename)[0].replace('_', '-').replace(' ', '-').split('-') - filename_tokens = [token for token in filename_tokens if token.isalpha()] + filename_tokens = os.path.splitext(filename)[0] + filename_tokens = re_tag.findall(filename_tokens) npimage = np.array(image).astype(np.uint8) npimage = (npimage / 127.5 - 1.0).astype(np.float32) diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 209e928ff..f545a9937 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -26,7 +26,9 @@ def preprocess(process_src, process_dst, process_flip, process_split, process_ca if process_caption: caption = "-" + shared.interrogator.generate_caption(image) else: - caption = "" + caption = filename + caption = os.path.splitext(caption)[0] + caption = os.path.basename(caption) image.save(os.path.join(dst, f"{index:05}-{subindex[0]}{caption}.png")) subindex[0] += 1 From 1c5604791da7e57f40880698666b6617a1754c65 Mon Sep 17 00:00:00 2001 From: DoTheSneedful Date: Mon, 3 Oct 2022 22:20:09 -0400 Subject: [PATCH 060/460] Add a prompt order option to XY plot script --- scripts/xy_grid.py | 40 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 38 insertions(+), 2 deletions(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 146663b0a..044c30e61 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -1,5 +1,6 @@ from collections import namedtuple from copy import copy +from itertools import permutations import random from PIL import Image @@ -28,6 +29,27 @@ def apply_prompt(p, x, xs): p.prompt = p.prompt.replace(xs[0], x) p.negative_prompt = p.negative_prompt.replace(xs[0], x) +def apply_order(p, x, xs): + token_order = [] + + # Initally grab the tokens from the prompt so they can be later be replaced in order of earliest seen in the prompt + for token in x: + token_order.append((p.prompt.find(token), token)) + + token_order.sort(key=lambda t: t[0]) + + search_from_pos = 0 + for idx, token in enumerate(x): + original_pos, old_token = token_order[idx] + + # Get position of the token again as it will likely change as tokens are being replaced + pos = p.prompt.find(old_token) + if original_pos >= 0: + # Avoid trying to replace what was just replaced by searching later in the prompt string + p.prompt = p.prompt[0:search_from_pos] + p.prompt[search_from_pos:].replace(old_token, token, 1) + + search_from_pos = pos + len(token) + samplers_dict = {} for i, sampler in enumerate(modules.sd_samplers.samplers): @@ -60,7 +82,8 @@ def format_value_add_label(p, opt, x): def format_value(p, opt, x): if type(x) == float: x = round(x, 8) - + if type(x) == type(list()): + x = str(x) return x def do_nothing(p, x, xs): @@ -89,6 +112,7 @@ axis_options = [ AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label), AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label), AxisOption("Eta", float, apply_field("eta"), format_value_add_label), + AxisOption("Prompt order", type(list()), apply_order, format_value), AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label), # as it is now all AxisOptionImg2Img items must go after AxisOption ones ] @@ -159,7 +183,11 @@ class Script(scripts.Script): if opt.label == 'Nothing': return [0] - valslist = [x.strip() for x in vals.split(",")] + if opt.type == type(list()): + valslist = [x for x in vals] + else: + valslist = [x.strip() for x in vals.split(",")] + if opt.type == int: valslist_ext = [] @@ -212,9 +240,17 @@ class Script(scripts.Script): return valslist x_opt = axis_options[x_type] + + if x_opt.label == "Prompt order": + x_values = list(permutations([x.strip() for x in x_values.split(",")])) + xs = process_axis(x_opt, x_values) y_opt = axis_options[y_type] + + if y_opt.label == "Prompt order": + y_values = list(permutations([y.strip() for y in y_values.split(",")])) + ys = process_axis(y_opt, y_values) def fix_axis_seeds(axis_opt, axis_list): From 1a6d40db35656083d5bf9d3a3430b45fda4e85eb Mon Sep 17 00:00:00 2001 From: DoTheSneedful Date: Tue, 4 Oct 2022 00:18:15 -0400 Subject: [PATCH 061/460] Fix token ordering in prompt order XY plot --- scripts/xy_grid.py | 13 +++++-------- 1 file changed, 5 insertions(+), 8 deletions(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 044c30e61..5bcd39217 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -32,24 +32,21 @@ def apply_prompt(p, x, xs): def apply_order(p, x, xs): token_order = [] - # Initally grab the tokens from the prompt so they can be later be replaced in order of earliest seen in the prompt + # Initally grab the tokens from the prompt so they can be be replaced in order of earliest seen for token in x: token_order.append((p.prompt.find(token), token)) token_order.sort(key=lambda t: t[0]) search_from_pos = 0 - for idx, token in enumerate(x): - original_pos, old_token = token_order[idx] - + for idx, (original_pos, old_token) in enumerate(token_order): # Get position of the token again as it will likely change as tokens are being replaced - pos = p.prompt.find(old_token) + pos = search_from_pos + p.prompt[search_from_pos:].find(old_token) if original_pos >= 0: # Avoid trying to replace what was just replaced by searching later in the prompt string - p.prompt = p.prompt[0:search_from_pos] + p.prompt[search_from_pos:].replace(old_token, token, 1) - - search_from_pos = pos + len(token) + p.prompt = p.prompt[0:search_from_pos] + p.prompt[search_from_pos:].replace(old_token, x[idx], 1) + search_from_pos = pos + len(x[idx]) samplers_dict = {} for i, sampler in enumerate(modules.sd_samplers.samplers): From 56371153b545e3a43c3a5f206264019af361f3af Mon Sep 17 00:00:00 2001 From: DoTheSneedful Date: Tue, 4 Oct 2022 01:07:36 -0400 Subject: [PATCH 062/460] XY plot prompt order simplify logic --- scripts/xy_grid.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 5bcd39217..7def47f57 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -38,15 +38,21 @@ def apply_order(p, x, xs): token_order.sort(key=lambda t: t[0]) - search_from_pos = 0 - for idx, (original_pos, old_token) in enumerate(token_order): - # Get position of the token again as it will likely change as tokens are being replaced - pos = search_from_pos + p.prompt[search_from_pos:].find(old_token) - if original_pos >= 0: - # Avoid trying to replace what was just replaced by searching later in the prompt string - p.prompt = p.prompt[0:search_from_pos] + p.prompt[search_from_pos:].replace(old_token, x[idx], 1) + prompt_parts = [] - search_from_pos = pos + len(x[idx]) + # Split the prompt up, taking out the tokens + for _, token in token_order: + n = p.prompt.find(token) + prompt_parts.append(p.prompt[0:n]) + p.prompt = p.prompt[n + len(token):] + + # Rebuild the prompt with the tokens in the order we want + prompt_tmp = "" + for idx, part in enumerate(prompt_parts): + prompt_tmp += part + prompt_tmp += x[idx] + p.prompt = prompt_tmp + p.prompt + samplers_dict = {} for i, sampler in enumerate(modules.sd_samplers.samplers): From 556c36b9607e3f4eacdddc85f8e7a78b29476ea7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 09:18:00 +0300 Subject: [PATCH 063/460] add hint, refactor code for #1607 --- javascript/hints.js | 1 + scripts/xy_grid.py | 35 ++++++++++++++++++----------------- 2 files changed, 19 insertions(+), 17 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index e72e93381..8adcd983e 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -47,6 +47,7 @@ titles = { "Custom code": "Run Python code. Advanced user only. Must run program with --allow-code for this to work", "Prompt S/R": "Separate a list of words with commas, and the first word will be used as a keyword: script will search for this word in the prompt, and replace it with others", + "Prompt order": "Separate a list of words with commas, and the script will make a variation of prompt with those words for their every possible order", "Tiling": "Produce an image that can be tiled.", "Tile overlap": "For SD upscale, how much overlap in pixels should there be between tiles. Tiles overlap so that when they are merged back into one picture, there is no clearly visible seam.", diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 7def47f57..1237e754d 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -29,10 +29,11 @@ def apply_prompt(p, x, xs): p.prompt = p.prompt.replace(xs[0], x) p.negative_prompt = p.negative_prompt.replace(xs[0], x) + def apply_order(p, x, xs): token_order = [] - # Initally grab the tokens from the prompt so they can be be replaced in order of earliest seen + # Initally grab the tokens from the prompt, so they can be replaced in order of earliest seen for token in x: token_order.append((p.prompt.find(token), token)) @@ -85,17 +86,26 @@ def format_value_add_label(p, opt, x): def format_value(p, opt, x): if type(x) == float: x = round(x, 8) - if type(x) == type(list()): - x = str(x) return x + +def format_value_join_list(p, opt, x): + return ", ".join(x) + + def do_nothing(p, x, xs): pass + def format_nothing(p, opt, x): return "" +def str_permutations(x): + """dummy function for specifying it in AxisOption's type when you want to get a list of permutations""" + return x + + AxisOption = namedtuple("AxisOption", ["label", "type", "apply", "format_value"]) AxisOptionImg2Img = namedtuple("AxisOptionImg2Img", ["label", "type", "apply", "format_value"]) @@ -108,6 +118,7 @@ axis_options = [ AxisOption("Steps", int, apply_field("steps"), format_value_add_label), AxisOption("CFG Scale", float, apply_field("cfg_scale"), format_value_add_label), AxisOption("Prompt S/R", str, apply_prompt, format_value), + AxisOption("Prompt order", str_permutations, apply_order, format_value_join_list), AxisOption("Sampler", str, apply_sampler, format_value), AxisOption("Checkpoint name", str, apply_checkpoint, format_value), AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label), @@ -115,7 +126,6 @@ axis_options = [ AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label), AxisOption("Sigma noise", float, apply_field("s_noise"), format_value_add_label), AxisOption("Eta", float, apply_field("eta"), format_value_add_label), - AxisOption("Prompt order", type(list()), apply_order, format_value), AxisOptionImg2Img("Denoising", float, apply_field("denoising_strength"), format_value_add_label), # as it is now all AxisOptionImg2Img items must go after AxisOption ones ] @@ -158,6 +168,7 @@ re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*") re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*") + class Script(scripts.Script): def title(self): return "X/Y plot" @@ -186,11 +197,7 @@ class Script(scripts.Script): if opt.label == 'Nothing': return [0] - if opt.type == type(list()): - valslist = [x for x in vals] - else: - valslist = [x.strip() for x in vals.split(",")] - + valslist = [x.strip() for x in vals.split(",")] if opt.type == int: valslist_ext = [] @@ -237,23 +244,17 @@ class Script(scripts.Script): valslist_ext.append(val) valslist = valslist_ext + elif opt.type == str_permutations: + valslist = list(permutations(valslist)) valslist = [opt.type(x) for x in valslist] return valslist x_opt = axis_options[x_type] - - if x_opt.label == "Prompt order": - x_values = list(permutations([x.strip() for x in x_values.split(",")])) - xs = process_axis(x_opt, x_values) y_opt = axis_options[y_type] - - if y_opt.label == "Prompt order": - y_values = list(permutations([y.strip() for y in y_values.split(",")])) - ys = process_axis(y_opt, y_values) def fix_axis_seeds(axis_opt, axis_list): From eeab7aedf532680a6ae9058ee272450bb07e41eb Mon Sep 17 00:00:00 2001 From: brkirch Date: Tue, 4 Oct 2022 04:24:35 -0400 Subject: [PATCH 064/460] Add --use-cpu command line option Remove MPS detection to use CPU for GFPGAN / CodeFormer and add a --use-cpu command line option. --- modules/devices.py | 5 ++--- modules/esrgan_model.py | 9 ++++----- modules/scunet_model.py | 8 ++++---- modules/shared.py | 9 +++++++-- 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/modules/devices.py b/modules/devices.py index 5d9c7a076..b5a0cd29e 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -1,8 +1,8 @@ import torch -# has_mps is only available in nightly pytorch (for now), `getattr` for compatibility from modules import errors +# has_mps is only available in nightly pytorch (for now), `getattr` for compatibility has_mps = getattr(torch, 'has_mps', False) cpu = torch.device("cpu") @@ -32,8 +32,7 @@ def enable_tf32(): errors.run(enable_tf32, "Enabling TF32") -device = get_optimal_device() -device_gfpgan = device_codeformer = cpu if device.type == 'mps' else device +device = device_gfpgan = device_esrgan = device_scunet = device_codeformer = get_optimal_device() dtype = torch.float16 def randn(seed, shape): diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index 4aed9283c..d17e730f9 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -6,8 +6,7 @@ from PIL import Image from basicsr.utils.download_util import load_file_from_url import modules.esrgam_model_arch as arch -from modules import shared, modelloader, images -from modules.devices import has_mps +from modules import shared, modelloader, images, devices from modules.paths import models_path from modules.upscaler import Upscaler, UpscalerData from modules.shared import opts @@ -97,7 +96,7 @@ class UpscalerESRGAN(Upscaler): model = self.load_model(selected_model) if model is None: return img - model.to(shared.device) + model.to(devices.device_esrgan) img = esrgan_upscale(model, img) return img @@ -112,7 +111,7 @@ class UpscalerESRGAN(Upscaler): print("Unable to load %s from %s" % (self.model_path, filename)) return None - pretrained_net = torch.load(filename, map_location='cpu' if has_mps else None) + pretrained_net = torch.load(filename, map_location='cpu' if shared.device.type == 'mps' else None) crt_model = arch.RRDBNet(3, 3, 64, 23, gc=32) pretrained_net = fix_model_layers(crt_model, pretrained_net) @@ -127,7 +126,7 @@ def upscale_without_tiling(model, img): img = img[:, :, ::-1] img = np.moveaxis(img, 2, 0) / 255 img = torch.from_numpy(img).float() - img = img.unsqueeze(0).to(shared.device) + img = img.unsqueeze(0).to(devices.device_esrgan) with torch.no_grad(): output = model(img) output = output.squeeze().float().cpu().clamp_(0, 1).numpy() diff --git a/modules/scunet_model.py b/modules/scunet_model.py index 7987ac145..fb64b7409 100644 --- a/modules/scunet_model.py +++ b/modules/scunet_model.py @@ -8,7 +8,7 @@ import torch from basicsr.utils.download_util import load_file_from_url import modules.upscaler -from modules import shared, modelloader +from modules import devices, modelloader from modules.paths import models_path from modules.scunet_model_arch import SCUNet as net @@ -51,12 +51,12 @@ class UpscalerScuNET(modules.upscaler.Upscaler): if model is None: return img - device = shared.device + device = devices.device_scunet img = np.array(img) img = img[:, :, ::-1] img = np.moveaxis(img, 2, 0) / 255 img = torch.from_numpy(img).float() - img = img.unsqueeze(0).to(shared.device) + img = img.unsqueeze(0).to(device) img = img.to(device) with torch.no_grad(): @@ -69,7 +69,7 @@ class UpscalerScuNET(modules.upscaler.Upscaler): return PIL.Image.fromarray(output, 'RGB') def load_model(self, path: str): - device = shared.device + device = devices.device_scunet if "http" in path: filename = load_file_from_url(url=self.model_url, model_dir=self.model_path, file_name="%s.pth" % self.name, progress=True) diff --git a/modules/shared.py b/modules/shared.py index 2a599e9cf..7899ab8d1 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -12,7 +12,7 @@ import modules.interrogate import modules.memmon import modules.sd_models import modules.styles -from modules.devices import get_optimal_device +import modules.devices as devices from modules.paths import script_path, sd_path sd_model_file = os.path.join(script_path, 'model.ckpt') @@ -46,6 +46,7 @@ parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.") parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization") parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") +parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU for specified modules", default=[]) parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None) parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False) @@ -63,7 +64,11 @@ parser.add_argument("--enable-console-prompts", action='store_true', help="print cmd_opts = parser.parse_args() -device = get_optimal_device() + +devices.device, devices.device_gfpgan, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \ +(devices.cpu if x in cmd_opts.use_cpu else devices.get_optimal_device() for x in ['SD', 'GFPGAN', 'ESRGAN', 'SCUNet', 'CodeFormer']) + +device = devices.device batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram From 27ddc24fdee1fbe709054a43235ab7f9c51b3e9f Mon Sep 17 00:00:00 2001 From: brkirch Date: Tue, 4 Oct 2022 05:18:17 -0400 Subject: [PATCH 065/460] Add BSRGAN to --add-cpu --- modules/bsrgan_model.py | 6 +++--- modules/devices.py | 2 +- modules/shared.py | 6 +++--- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/modules/bsrgan_model.py b/modules/bsrgan_model.py index e62c66577..3bd80791a 100644 --- a/modules/bsrgan_model.py +++ b/modules/bsrgan_model.py @@ -8,7 +8,7 @@ import torch from basicsr.utils.download_util import load_file_from_url import modules.upscaler -from modules import shared, modelloader +from modules import devices, modelloader from modules.bsrgan_model_arch import RRDBNet from modules.paths import models_path @@ -44,13 +44,13 @@ class UpscalerBSRGAN(modules.upscaler.Upscaler): model = self.load_model(selected_file) if model is None: return img - model.to(shared.device) + model.to(devices.device_bsrgan) torch.cuda.empty_cache() img = np.array(img) img = img[:, :, ::-1] img = np.moveaxis(img, 2, 0) / 255 img = torch.from_numpy(img).float() - img = img.unsqueeze(0).to(shared.device) + img = img.unsqueeze(0).to(devices.device_bsrgan) with torch.no_grad(): output = model(img) output = output.squeeze().float().cpu().clamp_(0, 1).numpy() diff --git a/modules/devices.py b/modules/devices.py index b5a0cd29e..b78996322 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -32,7 +32,7 @@ def enable_tf32(): errors.run(enable_tf32, "Enabling TF32") -device = device_gfpgan = device_esrgan = device_scunet = device_codeformer = get_optimal_device() +device = device_gfpgan = device_bsrgan = device_esrgan = device_scunet = device_codeformer = get_optimal_device() dtype = torch.float16 def randn(seed, shape): diff --git a/modules/shared.py b/modules/shared.py index 7899ab8d1..95b98a06e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -46,7 +46,7 @@ parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.") parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization") parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") -parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU for specified modules", default=[]) +parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU for specified modules", default=[]) parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None) parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False) @@ -65,8 +65,8 @@ parser.add_argument("--enable-console-prompts", action='store_true', help="print cmd_opts = parser.parse_args() -devices.device, devices.device_gfpgan, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \ -(devices.cpu if x in cmd_opts.use_cpu else devices.get_optimal_device() for x in ['SD', 'GFPGAN', 'ESRGAN', 'SCUNet', 'CodeFormer']) +devices.device, devices.device_gfpgan, devices.device_bsrgan, devices.device_esrgan, devices.device_scunet, devices.device_codeformer = \ +(devices.cpu if x in cmd_opts.use_cpu else devices.get_optimal_device() for x in ['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer']) device = devices.device From dc9c5a97742e3a34d37da7108642d8adc0dc5858 Mon Sep 17 00:00:00 2001 From: brkirch Date: Tue, 4 Oct 2022 05:22:50 -0400 Subject: [PATCH 066/460] Modify --add-cpu description --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 95b98a06e..25aff5b0e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -46,7 +46,7 @@ parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.") parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization") parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") -parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU for specified modules", default=[]) +parser.add_argument("--use-cpu", nargs='+',choices=['SD', 'GFPGAN', 'BSRGAN', 'ESRGAN', 'SCUNet', 'CodeFormer'], help="use CPU as torch device for specified modules", default=[]) parser.add_argument("--listen", action='store_true', help="launch gradio with 0.0.0.0 as server name, allowing to respond to network requests") parser.add_argument("--port", type=int, help="launch gradio with given server port, you need root/admin rights for ports < 1024, defaults to 7860 if available", default=None) parser.add_argument("--show-negative-prompt", action='store_true', help="does not do anything", default=False) From 6c6ae28bf5fd1e8bc3e8f64a3430b6f29f338f77 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 12:32:22 +0300 Subject: [PATCH 067/460] send all three of GFPGAN's and codeformer's models to CPU memory instead of just one for #1283 --- modules/codeformer_model.py | 12 ++++++++++-- modules/devices.py | 10 ++++++++++ modules/gfpgan_model.py | 14 ++++++++++++-- modules/processing.py | 16 +++++++++------- 4 files changed, 41 insertions(+), 11 deletions(-) diff --git a/modules/codeformer_model.py b/modules/codeformer_model.py index a29f38550..e6d9fa4f4 100644 --- a/modules/codeformer_model.py +++ b/modules/codeformer_model.py @@ -69,10 +69,14 @@ def setup_model(dirname): self.net = net self.face_helper = face_helper - self.net.to(devices.device_codeformer) return net, face_helper + def send_model_to(self, device): + self.net.to(device) + self.face_helper.face_det.to(device) + self.face_helper.face_parse.to(device) + def restore(self, np_image, w=None): np_image = np_image[:, :, ::-1] @@ -82,6 +86,8 @@ def setup_model(dirname): if self.net is None or self.face_helper is None: return np_image + self.send_model_to(devices.device_codeformer) + self.face_helper.clean_all() self.face_helper.read_image(np_image) self.face_helper.get_face_landmarks_5(only_center_face=False, resize=640, eye_dist_threshold=5) @@ -113,8 +119,10 @@ def setup_model(dirname): if original_resolution != restored_img.shape[0:2]: restored_img = cv2.resize(restored_img, (0, 0), fx=original_resolution[1]/restored_img.shape[1], fy=original_resolution[0]/restored_img.shape[0], interpolation=cv2.INTER_LINEAR) + self.face_helper.clean_all() + if shared.opts.face_restoration_unload: - self.net.to(devices.cpu) + self.send_model_to(devices.cpu) return restored_img diff --git a/modules/devices.py b/modules/devices.py index ff82f2f64..12aab6652 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -1,3 +1,5 @@ +import contextlib + import torch # has_mps is only available in nightly pytorch (for now), `getattr` for compatibility @@ -57,3 +59,11 @@ def randn_without_seed(shape): return torch.randn(shape, device=device) + +def autocast(): + from modules import shared + + if dtype == torch.float32 or shared.cmd_opts.precision == "full": + return contextlib.nullcontext() + + return torch.autocast("cuda") diff --git a/modules/gfpgan_model.py b/modules/gfpgan_model.py index dd3fbcab1..5586b554b 100644 --- a/modules/gfpgan_model.py +++ b/modules/gfpgan_model.py @@ -37,22 +37,32 @@ def gfpgann(): print("Unable to load gfpgan model!") return None model = gfpgan_constructor(model_path=model_file, upscale=1, arch='clean', channel_multiplier=2, bg_upsampler=None) - model.gfpgan.to(shared.device) loaded_gfpgan_model = model return model +def send_model_to(model, device): + model.gfpgan.to(device) + model.face_helper.face_det.to(device) + model.face_helper.face_parse.to(device) + + def gfpgan_fix_faces(np_image): model = gfpgann() if model is None: return np_image + + send_model_to(model, devices.device) + np_image_bgr = np_image[:, :, ::-1] cropped_faces, restored_faces, gfpgan_output_bgr = model.enhance(np_image_bgr, has_aligned=False, only_center_face=False, paste_back=True) np_image = gfpgan_output_bgr[:, :, ::-1] + model.face_helper.clean_all() + if shared.opts.face_restoration_unload: - model.gfpgan.to(devices.cpu) + send_model_to(model, devices.cpu) return np_image diff --git a/modules/processing.py b/modules/processing.py index 0a4b6198f..9cbecdd83 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1,4 +1,3 @@ -import contextlib import json import math import os @@ -330,9 +329,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: infotexts = [] output_images = [] - precision_scope = torch.autocast if cmd_opts.precision == "autocast" else contextlib.nullcontext - ema_scope = (contextlib.nullcontext if cmd_opts.lowvram else p.sd_model.ema_scope) - with torch.no_grad(), precision_scope("cuda"), ema_scope(): + + with torch.no_grad(): p.init(all_prompts, all_seeds, all_subseeds) if state.job_count == -1: @@ -351,8 +349,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed: #uc = p.sd_model.get_learned_conditioning(len(prompts) * [p.negative_prompt]) #c = p.sd_model.get_learned_conditioning(prompts) - uc = prompt_parser.get_learned_conditioning(len(prompts) * [p.negative_prompt], p.steps) - c = prompt_parser.get_learned_conditioning(prompts, p.steps) + with devices.autocast(): + uc = prompt_parser.get_learned_conditioning(len(prompts) * [p.negative_prompt], p.steps) + c = prompt_parser.get_learned_conditioning(prompts, p.steps) if len(model_hijack.comments) > 0: for comment in model_hijack.comments: @@ -361,7 +360,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if p.n_iter > 1: shared.state.job = f"Batch {n+1} out of {p.n_iter}" - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength) + with devices.autocast(): + samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength).to(devices.dtype) + if state.interrupted: # if we are interruped, sample returns just noise @@ -386,6 +387,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: devices.torch_gc() x_sample = modules.face_restoration.restore_faces(x_sample) + devices.torch_gc() image = Image.fromarray(x_sample) From 2f1b61d97987ae0a52a7dfc6bc99c68928bdb594 Mon Sep 17 00:00:00 2001 From: dan Date: Mon, 3 Oct 2022 19:25:36 +0800 Subject: [PATCH 068/460] Allow nested structures inside schedules --- modules/prompt_parser.py | 119 +++++++++++++++++--------------------- requirements.txt | 1 + requirements_versions.txt | 1 + 3 files changed, 55 insertions(+), 66 deletions(-) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index e811eb9ec..99c8ed99c 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -1,20 +1,11 @@ import re from collections import namedtuple import torch +from lark import Lark, Transformer, Visitor +import functools import modules.shared as shared -re_prompt = re.compile(r''' -(.*?) -\[ - ([^]:]+): - (?:([^]:]*):)? - ([0-9]*\.?[0-9]+) -] -| -(.+) -''', re.X) - # a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]" # will be represented with prompt_schedule like this (assuming steps=100): # [25, 'fantasy landscape with a mountain and an oak in foreground shoddy'] @@ -25,61 +16,57 @@ re_prompt = re.compile(r''' def get_learned_conditioning_prompt_schedules(prompts, steps): - res = [] - cache = {} - - for prompt in prompts: - prompt_schedule: list[list[str | int]] = [[steps, ""]] - - cached = cache.get(prompt, None) - if cached is not None: - res.append(cached) - continue - - for m in re_prompt.finditer(prompt): - plaintext = m.group(1) if m.group(5) is None else m.group(5) - concept_from = m.group(2) - concept_to = m.group(3) - if concept_to is None: - concept_to = concept_from - concept_from = "" - swap_position = float(m.group(4)) if m.group(4) is not None else None - - if swap_position is not None: - if swap_position < 1: - swap_position = swap_position * steps - swap_position = int(min(swap_position, steps)) - - swap_index = None - found_exact_index = False - for i in range(len(prompt_schedule)): - end_step = prompt_schedule[i][0] - prompt_schedule[i][1] += plaintext - - if swap_position is not None and swap_index is None: - if swap_position == end_step: - swap_index = i - found_exact_index = True - - if swap_position < end_step: - swap_index = i - - if swap_index is not None: - if not found_exact_index: - prompt_schedule.insert(swap_index, [swap_position, prompt_schedule[swap_index][1]]) - - for i in range(len(prompt_schedule)): - end_step = prompt_schedule[i][0] - must_replace = swap_position < end_step - - prompt_schedule[i][1] += concept_to if must_replace else concept_from - - res.append(prompt_schedule) - cache[prompt] = prompt_schedule - #for t in prompt_schedule: - # print(t) - - return res + grammar = r""" + start: prompt + prompt: (emphasized | scheduled | weighted | plain)* + !emphasized: "(" prompt ")" + | "(" prompt ":" prompt ")" + | "[" prompt "]" + scheduled: "[" (prompt ":")? prompt ":" NUMBER "]" + !weighted: "{" weighted_item ("|" weighted_item)* "}" + !weighted_item: prompt (":" prompt)? + plain: /([^\\\[\](){}:|]|\\.)+/ + %import common.SIGNED_NUMBER -> NUMBER + """ + parser = Lark(grammar, parser='lalr') + def collect_steps(steps, tree): + l = [steps] + class CollectSteps(Visitor): + def scheduled(self, tree): + tree.children[-1] = float(tree.children[-1]) + if tree.children[-1] < 1: + tree.children[-1] *= steps + tree.children[-1] = min(steps, int(tree.children[-1])) + l.append(tree.children[-1]) + CollectSteps().visit(tree) + return sorted(set(l)) + def at_step(step, tree): + class AtStep(Transformer): + def scheduled(self, args): + if len(args) == 2: + before, after, when = (), *args + else: + before, after, when = args + yield before if step <= when else after + def start(self, args): + def flatten(x): + if type(x) == str: + yield x + else: + for gen in x: + yield from flatten(gen) + return ''.join(flatten(args[0])) + def plain(self, args): + yield args[0].value + def __default__(self, data, children, meta): + for child in children: + yield from child + return AtStep().transform(tree) + @functools.cache + def get_schedule(prompt): + tree = parser.parse(prompt) + return [[t, at_step(t, tree)] for t in collect_steps(steps, tree)] + return [get_schedule(prompt) for prompt in prompts] ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at_step", "cond"]) diff --git a/requirements.txt b/requirements.txt index d4b337fce..631fe616a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -22,3 +22,4 @@ clean-fid resize-right torchdiffeq kornia +lark diff --git a/requirements_versions.txt b/requirements_versions.txt index 8a9acf205..fdff26878 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -21,3 +21,4 @@ clean-fid==0.1.29 resize-right==0.0.2 torchdiffeq==0.2.3 kornia==0.6.7 +lark==1.1.2 From 61652461242951966e5b4cee83ce359cefa91c17 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 14:23:22 +0300 Subject: [PATCH 069/460] support interrupting after the previous change --- modules/processing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 9cbecdd83..6f5599c7d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -361,7 +361,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: shared.state.job = f"Batch {n+1} out of {p.n_iter}" with devices.autocast(): - samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength).to(devices.dtype) + samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength) if state.interrupted: @@ -369,6 +369,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: # use the image collected previously in sampler loop samples_ddim = shared.state.current_latent + samples_ddim = samples_ddim.to(devices.dtype) + x_samples_ddim = p.sd_model.decode_first_stage(samples_ddim) x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0) From d5bba20a58f43a9f984bb67b4e17f48661f6b818 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 14:35:12 +0300 Subject: [PATCH 070/460] ignore errors in parse for purposes of token counting for #1564 --- modules/ui.py | 12 ++++++++++-- 1 file changed, 10 insertions(+), 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 55f7aa953..20dc8c379 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -386,14 +386,22 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: outputs=[seed, dummy_component] ) + def update_token_counter(text, steps): - prompt_schedules = get_learned_conditioning_prompt_schedules([text], steps) + try: + prompt_schedules = get_learned_conditioning_prompt_schedules([text], steps) + except Exception: + # a parsing error can happen here during typing, and we don't want to bother the user with + # messages related to it in console + prompt_schedules = [[[steps, text]]] + flat_prompts = reduce(lambda list1, list2: list1+list2, prompt_schedules) - prompts = [prompt_text for step,prompt_text in flat_prompts] + prompts = [prompt_text for step, prompt_text in flat_prompts] tokens, token_count, max_length = max([model_hijack.tokenize(prompt) for prompt in prompts], key=lambda args: args[1]) style_class = ' class="red"' if (token_count > max_length) else "" return f"{token_count}/{max_length}" + def create_toprow(is_img2img): id_part = "img2img" if is_img2img else "txt2img" From accd00d6b8258c12b5168918a4c546b02357924a Mon Sep 17 00:00:00 2001 From: Justin Riddiough Date: Tue, 4 Oct 2022 01:14:28 -0500 Subject: [PATCH 071/460] Explain how to use second progress bar in pycharm --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 25aff5b0e..11bdf01a7 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -200,7 +200,7 @@ options_templates.update(options_section(('face-restoration', "Face restoration" options_templates.update(options_section(('system', "System"), { "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}), "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), - "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. Broken in PyCharm console."), + "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. In PyCharm select 'emulate terminal in console output'."), })) options_templates.update(options_section(('sd', "Stable Diffusion"), { From ea6b0d98a64290a0305e27126ea59ce1da7959a2 Mon Sep 17 00:00:00 2001 From: Justin Riddiough Date: Tue, 4 Oct 2022 06:38:45 -0500 Subject: [PATCH 072/460] Remove pycharm note, fix typo --- modules/shared.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 11bdf01a7..a7d13b2d4 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -200,7 +200,7 @@ options_templates.update(options_section(('face-restoration', "Face restoration" options_templates.update(options_section(('system', "System"), { "memmon_poll_rate": OptionInfo(8, "VRAM usage polls per second during generation. Set to 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 40, "step": 1}), "samples_log_stdout": OptionInfo(False, "Always print all generation info to standard output"), - "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job. In PyCharm select 'emulate terminal in console output'."), + "multiple_tqdm": OptionInfo(True, "Add a second progress bar to the console that shows progress for an entire job."), })) options_templates.update(options_section(('sd', "Stable Diffusion"), { @@ -209,7 +209,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."), "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds. Requires restart to apply."), - "enable_emphasis": OptionInfo(True, "Eemphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"), + "enable_emphasis": OptionInfo(True, "Emphasis: use (text) to make model pay more attention to text and [text] to make it pay less attention"), "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "filter_nsfw": OptionInfo(False, "Filter NSFW content"), From eec1b39bd54711ca31e43022d2d6ac8c6d7281da Mon Sep 17 00:00:00 2001 From: Milly Date: Tue, 4 Oct 2022 20:16:52 +0900 Subject: [PATCH 073/460] Apply prompt pattern last --- modules/images.py | 43 ++++++++++++++++++++++--------------------- 1 file changed, 22 insertions(+), 21 deletions(-) diff --git a/modules/images.py b/modules/images.py index bba55158e..5b56c7e37 100644 --- a/modules/images.py +++ b/modules/images.py @@ -287,32 +287,13 @@ def apply_filename_pattern(x, p, seed, prompt): if seed is not None: x = x.replace("[seed]", str(seed)) - if prompt is not None: - x = x.replace("[prompt]", sanitize_filename_part(prompt)) - if "[prompt_no_styles]" in x: - prompt_no_style = prompt - for style in shared.prompt_styles.get_style_prompts(p.styles): - if len(style) > 0: - style_parts = [y for y in style.split("{prompt}")] - for part in style_parts: - prompt_no_style = prompt_no_style.replace(part, "").replace(", ,", ",").strip().strip(',') - prompt_no_style = prompt_no_style.replace(style, "").strip().strip(',').strip() - x = x.replace("[prompt_no_styles]", sanitize_filename_part(prompt_no_style, replace_spaces=False)) - - x = x.replace("[prompt_spaces]", sanitize_filename_part(prompt, replace_spaces=False)) - if "[prompt_words]" in x: - words = [x for x in re_nonletters.split(prompt or "") if len(x) > 0] - if len(words) == 0: - words = ["empty"] - x = x.replace("[prompt_words]", sanitize_filename_part(" ".join(words[0:max_prompt_words]), replace_spaces=False)) - if p is not None: x = x.replace("[steps]", str(p.steps)) x = x.replace("[cfg]", str(p.cfg_scale)) x = x.replace("[width]", str(p.width)) x = x.replace("[height]", str(p.height)) - - #currently disabled if using the save button, will work otherwise + + #currently disabled if using the save button, will work otherwise # if enabled it will cause a bug because styles is not included in the save_files data dictionary if hasattr(p, "styles"): x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"] or "None"), replace_spaces=False)) @@ -324,6 +305,26 @@ def apply_filename_pattern(x, p, seed, prompt): x = x.replace("[datetime]", datetime.datetime.now().strftime("%Y%m%d%H%M%S")) x = x.replace("[job_timestamp]", shared.state.job_timestamp) + # Apply [prompt] at last. Because it may contain any replacement word.^M + if prompt is not None: + x = x.replace("[prompt]", sanitize_filename_part(prompt)) + if "[prompt_no_styles]" in x: + prompt_no_style = prompt + for style in shared.prompt_styles.get_style_prompts(p.styles): + if len(style) > 0: + style_parts = [y for y in style.split("{prompt}")] + for part in style_parts: + prompt_no_style = prompt_no_style.replace(part, "").replace(", ,", ",").strip().strip(',') + prompt_no_style = prompt_no_style.replace(style, "").strip().strip(',').strip() + x = x.replace("[prompt_no_styles]", sanitize_filename_part(prompt_no_style, replace_spaces=False)) + + x = x.replace("[prompt_spaces]", sanitize_filename_part(prompt, replace_spaces=False)) + if "[prompt_words]" in x: + words = [x for x in re_nonletters.split(prompt or "") if len(x) > 0] + if len(words) == 0: + words = ["empty"] + x = x.replace("[prompt_words]", sanitize_filename_part(" ".join(words[0:max_prompt_words]), replace_spaces=False)) + if cmd_opts.hide_ui_dir_config: x = re.sub(r'^[\\/]+|\.{2,}[\\/]+|[\\/]+\.{2,}', '', x) From 52cef36f6ba169a8e606ecdcaed73d47378f0e8e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 16:54:31 +0300 Subject: [PATCH 074/460] emergency fix for img2img --- modules/processing.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 6f5599c7d..e9c453942 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -331,7 +331,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: output_images = [] with torch.no_grad(): - p.init(all_prompts, all_seeds, all_subseeds) + with devices.autocast(): + p.init(all_prompts, all_seeds, all_subseeds) if state.job_count == -1: state.job_count = p.n_iter From 957e29a8e9cb8ca069799ec69263e188c89ed6a6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 17:23:48 +0300 Subject: [PATCH 075/460] option to not show images in web ui --- modules/img2img.py | 3 +++ modules/shared.py | 1 + modules/txt2img.py | 3 +++ 3 files changed, 7 insertions(+) diff --git a/modules/img2img.py b/modules/img2img.py index 2ff8e2617..da212d72b 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -129,4 +129,7 @@ def img2img(mode: int, prompt: str, negative_prompt: str, prompt_style: str, pro if opts.samples_log_stdout: print(generation_info_js) + if opts.do_not_show_images: + processed.images = [] + return processed.images, generation_info_js, plaintext_to_html(processed.info) diff --git a/modules/shared.py b/modules/shared.py index a7d13b2d4..ff4e5fa39 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -229,6 +229,7 @@ options_templates.update(options_section(('ui', "User interface"), { "show_progressbar": OptionInfo(True, "Show progressbar"), "show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}), "return_grid": OptionInfo(True, "Show grid in results for web"), + "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), "font": OptionInfo("", "Font for image grids that have text"), "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), diff --git a/modules/txt2img.py b/modules/txt2img.py index d4406c3c0..e985242b3 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -48,5 +48,8 @@ def txt2img(prompt: str, negative_prompt: str, prompt_style: str, prompt_style2: if opts.samples_log_stdout: print(generation_info_js) + if opts.do_not_show_images: + processed.images = [] + return processed.images, generation_info_js, plaintext_to_html(processed.info) From e1b128d8e46bddb9c0b2fd3ee0eefd57e0527ee0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 17:36:39 +0300 Subject: [PATCH 076/460] do not touch p.seed/p.subseed during processing #1181 --- modules/processing.py | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index e9c453942..8180c63d8 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -248,9 +248,16 @@ def create_random_tensors(shape, seeds, subseeds=None, subseed_strength=0.0, see return x +def get_fixed_seed(seed): + if seed is None or seed == '' or seed == -1: + return int(random.randrange(4294967294)) + + return seed + + def fix_seed(p): - p.seed = int(random.randrange(4294967294)) if p.seed is None or p.seed == '' or p.seed == -1 else p.seed - p.subseed = int(random.randrange(4294967294)) if p.subseed is None or p.subseed == '' or p.subseed == -1 else p.subseed + p.seed = get_fixed_seed(p.seed) + p.subseed = get_fixed_seed(p.subseed) def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0): @@ -292,7 +299,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: devices.torch_gc() - fix_seed(p) + seed = get_fixed_seed(p.seed) + subseed = get_fixed_seed(p.subseed) if p.outpath_samples is not None: os.makedirs(p.outpath_samples, exist_ok=True) @@ -311,15 +319,15 @@ def process_images(p: StableDiffusionProcessing) -> Processed: else: all_prompts = p.batch_size * p.n_iter * [p.prompt] - if type(p.seed) == list: - all_seeds = p.seed + if type(seed) == list: + all_seeds = seed else: - all_seeds = [int(p.seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(all_prompts))] + all_seeds = [int(seed) + (x if p.subseed_strength == 0 else 0) for x in range(len(all_prompts))] - if type(p.subseed) == list: - all_subseeds = p.subseed + if type(subseed) == list: + all_subseeds = subseed else: - all_subseeds = [int(p.subseed) + x for x in range(len(all_prompts))] + all_subseeds = [int(subseed) + x for x in range(len(all_prompts))] def infotext(iteration=0, position_in_batch=0): return create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration, position_in_batch) From 1eb588cbf19924333b88beaa1ac0041904966640 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 18:02:01 +0300 Subject: [PATCH 077/460] remove functools.cache as some people are having issues with it --- modules/prompt_parser.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 99c8ed99c..5d58c4ed9 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -29,6 +29,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): %import common.SIGNED_NUMBER -> NUMBER """ parser = Lark(grammar, parser='lalr') + def collect_steps(steps, tree): l = [steps] class CollectSteps(Visitor): @@ -40,6 +41,7 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): l.append(tree.children[-1]) CollectSteps().visit(tree) return sorted(set(l)) + def at_step(step, tree): class AtStep(Transformer): def scheduled(self, args): @@ -62,11 +64,13 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): for child in children: yield from child return AtStep().transform(tree) - @functools.cache + def get_schedule(prompt): tree = parser.parse(prompt) return [[t, at_step(t, tree)] for t in collect_steps(steps, tree)] - return [get_schedule(prompt) for prompt in prompts] + + promptdict = {prompt: get_schedule(prompt) for prompt in set(prompts)} + return [promptdict[prompt] for prompt in prompts] ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at_step", "cond"]) From 90e911fd546e76f879b38a764473569911a0f845 Mon Sep 17 00:00:00 2001 From: Rae Fu Date: Tue, 4 Oct 2022 09:49:51 -0600 Subject: [PATCH 078/460] prompt_parser: allow spaces in schedules, add test, log/ignore errors Only build the parser once (at import time) instead of for each step. doctest is run by simply executing modules/prompt_parser.py --- modules/processing.py | 10 +-- modules/prompt_parser.py | 139 +++++++++++++++++++++++++-------------- 2 files changed, 95 insertions(+), 54 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 8180c63d8..bb94033b1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -84,7 +84,7 @@ class StableDiffusionProcessing: self.s_tmin = opts.s_tmin self.s_tmax = float('inf') # not representable as a standard ui option self.s_noise = opts.s_noise - + if not seed_enable_extras: self.subseed = -1 self.subseed_strength = 0 @@ -296,7 +296,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: assert(len(p.prompt) > 0) else: assert p.prompt is not None - + devices.torch_gc() seed = get_fixed_seed(p.seed) @@ -359,8 +359,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: #uc = p.sd_model.get_learned_conditioning(len(prompts) * [p.negative_prompt]) #c = p.sd_model.get_learned_conditioning(prompts) with devices.autocast(): - uc = prompt_parser.get_learned_conditioning(len(prompts) * [p.negative_prompt], p.steps) - c = prompt_parser.get_learned_conditioning(prompts, p.steps) + uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps) + c = prompt_parser.get_learned_conditioning(shared.sd_model, prompts, p.steps) if len(model_hijack.comments) > 0: for comment in model_hijack.comments: @@ -527,7 +527,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): # GC now before running the next img2img to prevent running out of memory x = None devices.torch_gc() - + samples = self.sampler.sample_img2img(self, samples, noise, conditioning, unconditional_conditioning, steps=self.steps) return samples diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 5d58c4ed9..a3b124219 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -1,10 +1,7 @@ import re from collections import namedtuple -import torch -from lark import Lark, Transformer, Visitor -import functools -import modules.shared as shared +import lark # a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]" # will be represented with prompt_schedule like this (assuming steps=100): @@ -14,25 +11,48 @@ import modules.shared as shared # [75, 'fantasy landscape with a lake and an oak in background masterful'] # [100, 'fantasy landscape with a lake and a christmas tree in background masterful'] +schedule_parser = lark.Lark(r""" +!start: (prompt | /[][():]/+)* +prompt: (emphasized | scheduled | plain | WHITESPACE)* +!emphasized: "(" prompt ")" + | "(" prompt ":" prompt ")" + | "[" prompt "]" +scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER "]" +WHITESPACE: /\s+/ +plain: /([^\\\[\]():]|\\.)+/ +%import common.SIGNED_NUMBER -> NUMBER +""") def get_learned_conditioning_prompt_schedules(prompts, steps): - grammar = r""" - start: prompt - prompt: (emphasized | scheduled | weighted | plain)* - !emphasized: "(" prompt ")" - | "(" prompt ":" prompt ")" - | "[" prompt "]" - scheduled: "[" (prompt ":")? prompt ":" NUMBER "]" - !weighted: "{" weighted_item ("|" weighted_item)* "}" - !weighted_item: prompt (":" prompt)? - plain: /([^\\\[\](){}:|]|\\.)+/ - %import common.SIGNED_NUMBER -> NUMBER """ - parser = Lark(grammar, parser='lalr') + >>> g = lambda p: get_learned_conditioning_prompt_schedules([p], 10)[0] + >>> g("test") + [[10, 'test']] + >>> g("a [b:3]") + [[3, 'a '], [10, 'a b']] + >>> g("a [b: 3]") + [[3, 'a '], [10, 'a b']] + >>> g("a [[[b]]:2]") + [[2, 'a '], [10, 'a [[b]]']] + >>> g("[(a:2):3]") + [[3, ''], [10, '(a:2)']] + >>> g("a [b : c : 1] d") + [[1, 'a b d'], [10, 'a c d']] + >>> g("a[b:[c:d:2]:1]e") + [[1, 'abe'], [2, 'ace'], [10, 'ade']] + >>> g("a [unbalanced") + [[10, 'a [unbalanced']] + >>> g("a [b:.5] c") + [[5, 'a c'], [10, 'a b c']] + >>> g("a [{b|d{:.5] c") # not handling this right now + [[5, 'a c'], [10, 'a {b|d{ c']] + >>> g("((a][:b:c [d:3]") + [[3, '((a][:b:c '], [10, '((a][:b:c d']] + """ def collect_steps(steps, tree): l = [steps] - class CollectSteps(Visitor): + class CollectSteps(lark.Visitor): def scheduled(self, tree): tree.children[-1] = float(tree.children[-1]) if tree.children[-1] < 1: @@ -43,13 +63,10 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): return sorted(set(l)) def at_step(step, tree): - class AtStep(Transformer): + class AtStep(lark.Transformer): def scheduled(self, args): - if len(args) == 2: - before, after, when = (), *args - else: - before, after, when = args - yield before if step <= when else after + before, after, _, when = args + yield before or () if step <= when else after def start(self, args): def flatten(x): if type(x) == str: @@ -57,16 +74,22 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): else: for gen in x: yield from flatten(gen) - return ''.join(flatten(args[0])) + return ''.join(flatten(args)) def plain(self, args): yield args[0].value def __default__(self, data, children, meta): for child in children: yield from child return AtStep().transform(tree) - + def get_schedule(prompt): - tree = parser.parse(prompt) + try: + tree = schedule_parser.parse(prompt) + except lark.exceptions.LarkError as e: + if 0: + import traceback + traceback.print_exc() + return [[steps, prompt]] return [[t, at_step(t, tree)] for t in collect_steps(steps, tree)] promptdict = {prompt: get_schedule(prompt) for prompt in set(prompts)} @@ -77,8 +100,7 @@ ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at ScheduledPromptBatch = namedtuple("ScheduledPromptBatch", ["shape", "schedules"]) -def get_learned_conditioning(prompts, steps): - +def get_learned_conditioning(model, prompts, steps): res = [] prompt_schedules = get_learned_conditioning_prompt_schedules(prompts, steps) @@ -92,7 +114,7 @@ def get_learned_conditioning(prompts, steps): continue texts = [x[1] for x in prompt_schedule] - conds = shared.sd_model.get_learned_conditioning(texts) + conds = model.get_learned_conditioning(texts) cond_schedule = [] for i, (end_at_step, text) in enumerate(prompt_schedule): @@ -105,12 +127,13 @@ def get_learned_conditioning(prompts, steps): def reconstruct_cond_batch(c: ScheduledPromptBatch, current_step): - res = torch.zeros(c.shape, device=shared.device, dtype=next(shared.sd_model.parameters()).dtype) + param = c.schedules[0][0].cond + res = torch.zeros(c.shape, device=param.device, dtype=param.dtype) for i, cond_schedule in enumerate(c.schedules): target_index = 0 - for curret_index, (end_at, cond) in enumerate(cond_schedule): + for current, (end_at, cond) in enumerate(cond_schedule): if current_step <= end_at: - target_index = curret_index + target_index = current break res[i] = cond_schedule[target_index].cond @@ -148,23 +171,26 @@ def parse_prompt_attention(text): \\ - literal character '\' anything else - just text - Example: - - 'a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).' - - produces: - - [ - ['a ', 1.0], - ['house', 1.5730000000000004], - [' ', 1.1], - ['on', 1.0], - [' a ', 1.1], - ['hill', 0.55], - [', sun, ', 1.1], - ['sky', 1.4641000000000006], - ['.', 1.1] - ] + >>> parse_prompt_attention('normal text') + [['normal text', 1.0]] + >>> parse_prompt_attention('an (important) word') + [['an ', 1.0], ['important', 1.1], [' word', 1.0]] + >>> parse_prompt_attention('(unbalanced') + [['unbalanced', 1.1]] + >>> parse_prompt_attention('\(literal\]') + [['(literal]', 1.0]] + >>> parse_prompt_attention('(unnecessary)(parens)') + [['unnecessaryparens', 1.1]] + >>> parse_prompt_attention('a (((house:1.3)) [on] a (hill:0.5), sun, (((sky))).') + [['a ', 1.0], + ['house', 1.5730000000000004], + [' ', 1.1], + ['on', 1.0], + [' a ', 1.1], + ['hill', 0.55], + [', sun, ', 1.1], + ['sky', 1.4641000000000006], + ['.', 1.1]] """ res = [] @@ -206,4 +232,19 @@ def parse_prompt_attention(text): if len(res) == 0: res = [["", 1.0]] + # merge runs of identical weights + i = 0 + while i + 1 < len(res): + if res[i][1] == res[i + 1][1]: + res[i][0] += res[i + 1][0] + res.pop(i + 1) + else: + i += 1 + return res + +if __name__ == "__main__": + import doctest + doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE) +else: + import torch # doctest faster From b32852ef037251eb3d846af76e2965594e1ac7a5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Tue, 4 Oct 2022 20:49:54 +0300 Subject: [PATCH 079/460] add editor to img2img --- modules/shared.py | 1 + modules/ui.py | 2 +- style.css | 4 ++++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index ff4e5fa39..e52c9b1d1 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -55,6 +55,7 @@ parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide dire parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json')) parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option") parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None) +parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image uploader tool: can be either editor for ctopping, or color-sketch for drawing', choices=["color-sketch", "editor"], default="color-sketch") parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last") parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv')) parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False) diff --git a/modules/ui.py b/modules/ui.py index 20dc8c379..6cd6761b8 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -644,7 +644,7 @@ def create_ui(wrap_gradio_gpu_call): with gr.Tabs(elem_id="mode_img2img") as tabs_img2img_mode: with gr.TabItem('img2img', id='img2img'): - init_img = gr.Image(label="Image for img2img", show_label=False, source="upload", interactive=True, type="pil") + init_img = gr.Image(label="Image for img2img", elem_id="img2img_image", show_label=False, source="upload", interactive=True, type="pil", tool=cmd_opts.gradio_img2img_tool) with gr.TabItem('Inpaint', id='inpaint'): init_img_with_mask = gr.Image(label="Image for inpainting with mask", show_label=False, elem_id="img2maskimg", source="upload", interactive=True, type="pil", tool="sketch", image_mode="RGBA") diff --git a/style.css b/style.css index 39586bf18..e8f4cb752 100644 --- a/style.css +++ b/style.css @@ -403,3 +403,7 @@ input[type="range"]{ .red { color: red; } + +#img2img_image div.h-60{ + height: 480px; +} \ No newline at end of file From ef40e4cd4d383a3405e03f1da3f5b5a1820a8f53 Mon Sep 17 00:00:00 2001 From: xpscyho Date: Tue, 4 Oct 2022 15:12:38 -0400 Subject: [PATCH 080/460] Display time taken in mins, secs when relevant Fixes #1656 --- modules/ui.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index 6cd6761b8..de6342a48 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -196,6 +196,11 @@ def wrap_gradio_call(func, extra_outputs=None): res = extra_outputs_array + [f"
{plaintext_to_html(type(e).__name__+': '+str(e))}
"] elapsed = time.perf_counter() - t + elapsed_m = int(elapsed // 60) + elapsed_s = elapsed % 60 + elapsed_text = f"{elapsed_s:.2f}s" + if (elapsed_m > 0): + elapsed_text = f"{elapsed_m}m "+elapsed_text if run_memmon: mem_stats = {k: -(v//-(1024*1024)) for k, v in shared.mem_mon.stop().items()} @@ -210,7 +215,7 @@ def wrap_gradio_call(func, extra_outputs=None): vram_html = '' # last item is always HTML - res[-1] += f"

Time taken: {elapsed:.2f}s

{vram_html}
" + res[-1] += f"

Time taken: {elapsed_text}

{vram_html}
" shared.state.interrupted = False shared.state.job_count = 0 From 82380d9ac18614c87bebba1b4cfd4b147cc76a18 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Tue, 4 Oct 2022 22:28:50 -0300 Subject: [PATCH 081/460] Removing parts no longer needed to fix vram --- modules/devices.py | 3 +-- modules/processing.py | 21 ++++++++------------- 2 files changed, 9 insertions(+), 15 deletions(-) diff --git a/modules/devices.py b/modules/devices.py index 6db4e57c9..0158b11fc 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -1,7 +1,6 @@ import contextlib import torch -import gc from modules import errors @@ -20,8 +19,8 @@ def get_optimal_device(): return cpu + def torch_gc(): - gc.collect() if torch.cuda.is_available(): torch.cuda.empty_cache() torch.cuda.ipc_collect() diff --git a/modules/processing.py b/modules/processing.py index e7f9c85e1..f666ba811 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -345,8 +345,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if state.job_count == -1: state.job_count = p.n_iter - for n in range(p.n_iter): - with torch.no_grad(), precision_scope("cuda"), ema_scope(): + for n in range(p.n_iter): if state.interrupted: break @@ -395,22 +394,19 @@ def process_images(p: StableDiffusionProcessing) -> Processed: import modules.safety as safety x_samples_ddim = modules.safety.censor_batch(x_samples_ddim) - for i, x_sample in enumerate(x_samples_ddim): - with torch.no_grad(), precision_scope("cuda"), ema_scope(): + for i, x_sample in enumerate(x_samples_ddim): x_sample = 255. * np.moveaxis(x_sample.cpu().numpy(), 0, 2) x_sample = x_sample.astype(np.uint8) - if p.restore_faces: - with torch.no_grad(), precision_scope("cuda"), ema_scope(): + if p.restore_faces: if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration: images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration") + devices.torch_gc() + x_sample = modules.face_restoration.restore_faces(x_sample) devices.torch_gc() - devices.torch_gc() - - with torch.no_grad(), precision_scope("cuda"), ema_scope(): image = Image.fromarray(x_sample) if p.color_corrections is not None and i < len(p.color_corrections): @@ -438,13 +434,12 @@ def process_images(p: StableDiffusionProcessing) -> Processed: infotexts.append(infotext(n, i)) output_images.append(image) - del x_samples_ddim + del x_samples_ddim - devices.torch_gc() + devices.torch_gc() - state.nextjob() + state.nextjob() - with torch.no_grad(), precision_scope("cuda"), ema_scope(): p.color_corrections = None index_of_first_image = 0 From bbdbbd36eda870cf0bd49fdf28476c78919a123e Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Wed, 5 Oct 2022 04:43:05 +0100 Subject: [PATCH 082/460] shared.state.interrupt when restart is requested --- modules/ui.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/ui.py b/modules/ui.py index de6342a48..523ab25b3 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1210,6 +1210,7 @@ def create_ui(wrap_gradio_gpu_call): ) def request_restart(): + shared.state.interrupt() settings_interface.gradio_ref.do_restart = True restart_gradio.click( From 67d011b02eddc20202b654dfea56528de3d5edf7 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Wed, 5 Oct 2022 04:44:22 +0100 Subject: [PATCH 083/460] Show generation progress in window title --- javascript/progressbar.js | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/javascript/progressbar.js b/javascript/progressbar.js index 1e297abbe..3e3220c3f 100644 --- a/javascript/progressbar.js +++ b/javascript/progressbar.js @@ -4,6 +4,21 @@ global_progressbars = {} function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_interrupt, id_preview, id_gallery){ var progressbar = gradioApp().getElementById(id_progressbar) var interrupt = gradioApp().getElementById(id_interrupt) + + if(progressbar && progressbar.offsetParent){ + if(progressbar.innerText){ + let newtitle = 'Stable Diffusion - ' + progressbar.innerText + if(document.title != newtitle){ + document.title = newtitle; + } + }else{ + let newtitle = 'Stable Diffusion' + if(document.title != newtitle){ + document.title = newtitle; + } + } + } + if(progressbar!= null && progressbar != global_progressbars[id_progressbar]){ global_progressbars[id_progressbar] = progressbar From 59a2b9e5afc27d2fda72069ca0635070535d18fe Mon Sep 17 00:00:00 2001 From: Greendayle Date: Wed, 5 Oct 2022 20:50:10 +0200 Subject: [PATCH 084/460] deepdanbooru interrogator --- ... deepbooru release project folder here.txt | 0 modules/deepbooru.py | 60 +++++++++++++++++++ modules/ui.py | 24 ++++++-- requirements.txt | 3 + requirements_versions.txt | 3 + style.css | 7 ++- 6 files changed, 91 insertions(+), 6 deletions(-) create mode 100644 models/deepbooru/Put your deepbooru release project folder here.txt create mode 100644 modules/deepbooru.py diff --git a/models/deepbooru/Put your deepbooru release project folder here.txt b/models/deepbooru/Put your deepbooru release project folder here.txt new file mode 100644 index 000000000..e69de29bb diff --git a/modules/deepbooru.py b/modules/deepbooru.py new file mode 100644 index 000000000..958b1c3d8 --- /dev/null +++ b/modules/deepbooru.py @@ -0,0 +1,60 @@ +import os.path +from concurrent.futures import ProcessPoolExecutor + +import numpy as np +import deepdanbooru as dd +import tensorflow as tf + + +def _load_tf_and_return_tags(pil_image, threshold): + this_folder = os.path.dirname(__file__) + model_path = os.path.join(this_folder, '..', 'models', 'deepbooru', 'deepdanbooru-v3-20211112-sgd-e28') + if not os.path.exists(model_path): + return "Download https://github.com/KichangKim/DeepDanbooru/releases/download/v3-20211112-sgd-e28/deepdanbooru-v3-20211112-sgd-e28.zip unpack and put into models/deepbooru" + + tags = dd.project.load_tags_from_project(model_path) + model = dd.project.load_model_from_project( + model_path, compile_model=True + ) + + width = model.input_shape[2] + height = model.input_shape[1] + image = np.array(pil_image) + image = tf.image.resize( + image, + size=(height, width), + method=tf.image.ResizeMethod.AREA, + preserve_aspect_ratio=True, + ) + image = image.numpy() # EagerTensor to np.array + image = dd.image.transform_and_pad_image(image, width, height) + image = image / 255.0 + image_shape = image.shape + image = image.reshape((1, image_shape[0], image_shape[1], image_shape[2])) + + y = model.predict(image)[0] + + result_dict = {} + + for i, tag in enumerate(tags): + result_dict[tag] = y[i] + + + + result_tags_out = [] + result_tags_print = [] + for tag in tags: + if result_dict[tag] >= threshold: + result_tags_out.append(tag) + result_tags_print.append(f'{result_dict[tag]} {tag}') + + print('\n'.join(sorted(result_tags_print, reverse=True))) + + return ', '.join(result_tags_out) + + +def get_deepbooru_tags(pil_image, threshold=0.5): + with ProcessPoolExecutor() as executor: + f = executor.submit(_load_tf_and_return_tags, pil_image, threshold) + ret = f.result() # will rethrow any exceptions + return ret \ No newline at end of file diff --git a/modules/ui.py b/modules/ui.py index 20dc8c379..ae98219a6 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -23,6 +23,7 @@ import gradio.utils import gradio.routes from modules import sd_hijack +from modules.deepbooru import get_deepbooru_tags from modules.paths import script_path from modules.shared import opts, cmd_opts import modules.shared as shared @@ -312,6 +313,11 @@ def interrogate(image): return gr_show(True) if prompt is None else prompt +def interrogate_deepbooru(image): + prompt = get_deepbooru_tags(image) + return gr_show(True) if prompt is None else prompt + + def create_seed_inputs(): with gr.Row(): with gr.Box(): @@ -439,15 +445,17 @@ def create_toprow(is_img2img): outputs=[], ) - with gr.Row(): + with gr.Row(scale=1): if is_img2img: - interrogate = gr.Button('Interrogate', elem_id="interrogate") + interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") + deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") else: interrogate = None + deepbooru = None prompt_style_apply = gr.Button('Apply style', elem_id="style_apply") save_style = gr.Button('Create style', elem_id="style_create") - return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, prompt_style_apply, save_style, paste, token_counter, token_button + return prompt, roll, prompt_style, negative_prompt, prompt_style2, submit, interrogate, deepbooru, prompt_style_apply, save_style, paste, token_counter, token_button def setup_progressbar(progressbar, preview, id_part, textinfo=None): @@ -476,7 +484,7 @@ def create_ui(wrap_gradio_gpu_call): import modules.txt2img with gr.Blocks(analytics_enabled=False) as txt2img_interface: - txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False) + txt2img_prompt, roll, txt2img_prompt_style, txt2img_negative_prompt, txt2img_prompt_style2, submit, _, _, txt2img_prompt_style_apply, txt2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=False) dummy_component = gr.Label(visible=False) with gr.Row(elem_id='txt2img_progress_row'): @@ -628,7 +636,7 @@ def create_ui(wrap_gradio_gpu_call): token_button.click(fn=update_token_counter, inputs=[txt2img_prompt, steps], outputs=[token_counter]) with gr.Blocks(analytics_enabled=False) as img2img_interface: - img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True) + img2img_prompt, roll, img2img_prompt_style, img2img_negative_prompt, img2img_prompt_style2, submit, img2img_interrogate, img2img_deepbooru, img2img_prompt_style_apply, img2img_save_style, paste, token_counter, token_button = create_toprow(is_img2img=True) with gr.Row(elem_id='img2img_progress_row'): with gr.Column(scale=1): @@ -785,6 +793,12 @@ def create_ui(wrap_gradio_gpu_call): outputs=[img2img_prompt], ) + img2img_deepbooru.click( + fn=interrogate_deepbooru, + inputs=[init_img], + outputs=[img2img_prompt], + ) + save.click( fn=wrap_gradio_call(save_files), _js="(x, y, z) => [x, y, selected_gallery_index()]", diff --git a/requirements.txt b/requirements.txt index 631fe616a..cab101f88 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,3 +23,6 @@ resize-right torchdiffeq kornia lark +deepdanbooru +tensorflow +tensorflow-io diff --git a/requirements_versions.txt b/requirements_versions.txt index fdff26878..811953c68 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -22,3 +22,6 @@ resize-right==0.0.2 torchdiffeq==0.2.3 kornia==0.6.7 lark==1.1.2 +git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26#egg=deepdanbooru[tensorflow] +tensorflow==2.10.0 +tensorflow-io==0.27.0 diff --git a/style.css b/style.css index 39586bf18..2fd351f91 100644 --- a/style.css +++ b/style.css @@ -103,7 +103,12 @@ #style_apply, #style_create, #interrogate{ margin: 0.75em 0.25em 0.25em 0.25em; - min-width: 3em; + min-width: 5em; +} + +#style_apply, #style_create, #deepbooru{ + margin: 0.75em 0.25em 0.25em 0.25em; + min-width: 5em; } #style_pos_col, #style_neg_col{ From 1506fab29ad54beb9f52236912abc432209c8089 Mon Sep 17 00:00:00 2001 From: Greendayle Date: Wed, 5 Oct 2022 21:15:08 +0200 Subject: [PATCH 085/460] removing problematic tag --- modules/deepbooru.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 958b1c3d8..841cb9c5f 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -38,13 +38,12 @@ def _load_tf_and_return_tags(pil_image, threshold): for i, tag in enumerate(tags): result_dict[tag] = y[i] - - - result_tags_out = [] result_tags_print = [] for tag in tags: if result_dict[tag] >= threshold: + if tag.startswith("rating:"): + continue result_tags_out.append(tag) result_tags_print.append(f'{result_dict[tag]} {tag}') From 17a99baf0c929e5df4dfc4b2a96aa3890a141112 Mon Sep 17 00:00:00 2001 From: Greendayle Date: Wed, 5 Oct 2022 22:05:24 +0200 Subject: [PATCH 086/460] better model search --- modules/deepbooru.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 841cb9c5f..a64fd9cd1 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -9,8 +9,15 @@ import tensorflow as tf def _load_tf_and_return_tags(pil_image, threshold): this_folder = os.path.dirname(__file__) model_path = os.path.join(this_folder, '..', 'models', 'deepbooru', 'deepdanbooru-v3-20211112-sgd-e28') - if not os.path.exists(model_path): - return "Download https://github.com/KichangKim/DeepDanbooru/releases/download/v3-20211112-sgd-e28/deepdanbooru-v3-20211112-sgd-e28.zip unpack and put into models/deepbooru" + + model_good = False + for path_candidate in [model_path, os.path.dirname(model_path)]: + if os.path.exists(os.path.join(path_candidate, 'project.json')): + model_path = path_candidate + model_good = True + if not model_good: + return ("Download https://github.com/KichangKim/DeepDanbooru/releases/download/v3-20211112-sgd-e28/" + "deepdanbooru-v3-20211112-sgd-e28.zip unpack and put into models/deepbooru") tags = dd.project.load_tags_from_project(model_path) model = dd.project.load_model_from_project( From c26732fbee2a57e621ac22bf70decf7496daa4cd Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 5 Oct 2022 23:16:27 +0300 Subject: [PATCH 087/460] added support for AND from https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/ --- modules/processing.py | 2 +- modules/prompt_parser.py | 114 ++++++++++++++++++++++++++++++++++++--- modules/sd_samplers.py | 35 ++++++++---- modules/ui.py | 6 ++- 4 files changed, 138 insertions(+), 19 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index bb94033b1..d8c6b8d57 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -360,7 +360,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: #c = p.sd_model.get_learned_conditioning(prompts) with devices.autocast(): uc = prompt_parser.get_learned_conditioning(shared.sd_model, len(prompts) * [p.negative_prompt], p.steps) - c = prompt_parser.get_learned_conditioning(shared.sd_model, prompts, p.steps) + c = prompt_parser.get_multicond_learned_conditioning(shared.sd_model, prompts, p.steps) if len(model_hijack.comments) > 0: for comment in model_hijack.comments: diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index a3b124219..f7420daf9 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -97,10 +97,26 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): ScheduledPromptConditioning = namedtuple("ScheduledPromptConditioning", ["end_at_step", "cond"]) -ScheduledPromptBatch = namedtuple("ScheduledPromptBatch", ["shape", "schedules"]) def get_learned_conditioning(model, prompts, steps): + """converts a list of prompts into a list of prompt schedules - each schedule is a list of ScheduledPromptConditioning, specifying the comdition (cond), + and the sampling step at which this condition is to be replaced by the next one. + + Input: + (model, ['a red crown', 'a [blue:green:5] jeweled crown'], 20) + + Output: + [ + [ + ScheduledPromptConditioning(end_at_step=20, cond=tensor([[-0.3886, 0.0229, -0.0523, ..., -0.4901, -0.3066, 0.0674], ..., [ 0.3317, -0.5102, -0.4066, ..., 0.4119, -0.7647, -1.0160]], device='cuda:0')) + ], + [ + ScheduledPromptConditioning(end_at_step=5, cond=tensor([[-0.3886, 0.0229, -0.0522, ..., -0.4901, -0.3067, 0.0673], ..., [-0.0192, 0.3867, -0.4644, ..., 0.1135, -0.3696, -0.4625]], device='cuda:0')), + ScheduledPromptConditioning(end_at_step=20, cond=tensor([[-0.3886, 0.0229, -0.0522, ..., -0.4901, -0.3067, 0.0673], ..., [-0.7352, -0.4356, -0.7888, ..., 0.6994, -0.4312, -1.2593]], device='cuda:0')) + ] + ] + """ res = [] prompt_schedules = get_learned_conditioning_prompt_schedules(prompts, steps) @@ -123,13 +139,75 @@ def get_learned_conditioning(model, prompts, steps): cache[prompt] = cond_schedule res.append(cond_schedule) - return ScheduledPromptBatch((len(prompts),) + res[0][0].cond.shape, res) + return res -def reconstruct_cond_batch(c: ScheduledPromptBatch, current_step): - param = c.schedules[0][0].cond - res = torch.zeros(c.shape, device=param.device, dtype=param.dtype) - for i, cond_schedule in enumerate(c.schedules): +re_AND = re.compile(r"\bAND\b") +re_weight = re.compile(r"^(.*?)(?:\s*:\s*([-+]?\s*(?:\d+|\d*\.\d+)?))?\s*$") + + +def get_multicond_prompt_list(prompts): + res_indexes = [] + + prompt_flat_list = [] + prompt_indexes = {} + + for prompt in prompts: + subprompts = re_AND.split(prompt) + + indexes = [] + for subprompt in subprompts: + text, weight = re_weight.search(subprompt).groups() + + weight = float(weight) if weight is not None else 1.0 + + index = prompt_indexes.get(text, None) + if index is None: + index = len(prompt_flat_list) + prompt_flat_list.append(text) + prompt_indexes[text] = index + + indexes.append((index, weight)) + + res_indexes.append(indexes) + + return res_indexes, prompt_flat_list, prompt_indexes + + +class ComposableScheduledPromptConditioning: + def __init__(self, schedules, weight=1.0): + self.schedules: list[ScheduledPromptConditioning] = schedules + self.weight: float = weight + + +class MulticondLearnedConditioning: + def __init__(self, shape, batch): + self.shape: tuple = shape # the shape field is needed to send this object to DDIM/PLMS + self.batch: list[list[ComposableScheduledPromptConditioning]] = batch + + +def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearnedConditioning: + """same as get_learned_conditioning, but returns a list of ScheduledPromptConditioning along with the weight objects for each prompt. + For each prompt, the list is obtained by splitting the prompt using the AND separator. + + https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/ + """ + + res_indexes, prompt_flat_list, prompt_indexes = get_multicond_prompt_list(prompts) + + learned_conditioning = get_learned_conditioning(model, prompt_flat_list, steps) + + res = [] + for indexes in res_indexes: + res.append([ComposableScheduledPromptConditioning(learned_conditioning[i], weight) for i, weight in indexes]) + + return MulticondLearnedConditioning(shape=(len(prompts),), batch=res) + + +def reconstruct_cond_batch(c: list[list[ScheduledPromptConditioning]], current_step): + param = c[0][0].cond + res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype) + for i, cond_schedule in enumerate(c): target_index = 0 for current, (end_at, cond) in enumerate(cond_schedule): if current_step <= end_at: @@ -140,6 +218,30 @@ def reconstruct_cond_batch(c: ScheduledPromptBatch, current_step): return res +def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current_step): + param = c.batch[0][0].schedules[0].cond + + tensors = [] + conds_list = [] + + for batch_no, composable_prompts in enumerate(c.batch): + conds_for_batch = [] + + for cond_index, composable_prompt in enumerate(composable_prompts): + target_index = 0 + for current, (end_at, cond) in enumerate(composable_prompt.schedules): + if current_step <= end_at: + target_index = current + break + + conds_for_batch.append((len(tensors), composable_prompt.weight)) + tensors.append(composable_prompt.schedules[target_index].cond) + + conds_list.append(conds_for_batch) + + return conds_list, torch.stack(tensors).to(device=param.device, dtype=param.dtype) + + re_attention = re.compile(r""" \\\(| \\\)| diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index dbf570d2c..d27c547b3 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -109,9 +109,12 @@ class VanillaStableDiffusionSampler: return 0 def p_sample_ddim_hook(self, x_dec, cond, ts, unconditional_conditioning, *args, **kwargs): - cond = prompt_parser.reconstruct_cond_batch(cond, self.step) + conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) unconditional_conditioning = prompt_parser.reconstruct_cond_batch(unconditional_conditioning, self.step) + assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers' + cond = tensor + if self.mask is not None: img_orig = self.sampler.model.q_sample(self.init_latent, ts) x_dec = img_orig * self.mask + self.nmask * x_dec @@ -183,19 +186,31 @@ class CFGDenoiser(torch.nn.Module): self.step = 0 def forward(self, x, sigma, uncond, cond, cond_scale): - cond = prompt_parser.reconstruct_cond_batch(cond, self.step) + conds_list, tensor = prompt_parser.reconstruct_multicond_batch(cond, self.step) uncond = prompt_parser.reconstruct_cond_batch(uncond, self.step) + batch_size = len(conds_list) + repeats = [len(conds_list[i]) for i in range(batch_size)] + + x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) + sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) + cond_in = torch.cat([tensor, uncond]) + if shared.batch_cond_uncond: - x_in = torch.cat([x] * 2) - sigma_in = torch.cat([sigma] * 2) - cond_in = torch.cat([uncond, cond]) - uncond, cond = self.inner_model(x_in, sigma_in, cond=cond_in).chunk(2) - denoised = uncond + (cond - uncond) * cond_scale + x_out = self.inner_model(x_in, sigma_in, cond=cond_in) else: - uncond = self.inner_model(x, sigma, cond=uncond) - cond = self.inner_model(x, sigma, cond=cond) - denoised = uncond + (cond - uncond) * cond_scale + x_out = torch.zeros_like(x_in) + for batch_offset in range(0, x_out.shape[0], batch_size): + a = batch_offset + b = a + batch_size + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=cond_in[a:b]) + + denoised_uncond = x_out[-batch_size:] + denoised = torch.clone(denoised_uncond) + + for i, conds in enumerate(conds_list): + for cond_index, weight in conds: + denoised[i] += (x_out[cond_index] - denoised_uncond[i]) * (weight * cond_scale) if self.mask is not None: denoised = self.init_latent * self.mask + self.nmask * denoised diff --git a/modules/ui.py b/modules/ui.py index 523ab25b3..9620350fc 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -34,7 +34,7 @@ import modules.gfpgan_model import modules.codeformer_model import modules.styles import modules.generation_parameters_copypaste -from modules.prompt_parser import get_learned_conditioning_prompt_schedules +from modules import prompt_parser from modules.images import apply_filename_pattern, get_next_sequence_number import modules.textual_inversion.ui @@ -394,7 +394,9 @@ def connect_reuse_seed(seed: gr.Number, reuse_seed: gr.Button, generation_info: def update_token_counter(text, steps): try: - prompt_schedules = get_learned_conditioning_prompt_schedules([text], steps) + _, prompt_flat_list, _ = prompt_parser.get_multicond_prompt_list([text]) + prompt_schedules = prompt_parser.get_learned_conditioning_prompt_schedules(prompt_flat_list, steps) + except Exception: # a parsing error can happen here during typing, and we don't want to bother the user with # messages related to it in console From 4320f386d9641c7c234589c4cb0c0c6cbeb156ad Mon Sep 17 00:00:00 2001 From: Greendayle Date: Wed, 5 Oct 2022 22:39:32 +0200 Subject: [PATCH 088/460] removing underscores and colons --- modules/deepbooru.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/deepbooru.py b/modules/deepbooru.py index a64fd9cd1..fb5018a6c 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -56,7 +56,7 @@ def _load_tf_and_return_tags(pil_image, threshold): print('\n'.join(sorted(result_tags_print, reverse=True))) - return ', '.join(result_tags_out) + return ', '.join(result_tags_out).replace('_', ' ').replace(':', ' ') def get_deepbooru_tags(pil_image, threshold=0.5): From f8e41a96bb30a04dd5e294c7e1178c1c3b09d481 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Wed, 5 Oct 2022 23:52:05 +0300 Subject: [PATCH 089/460] fix various float parsing errors --- modules/prompt_parser.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index f7420daf9..800b12c75 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -143,8 +143,7 @@ def get_learned_conditioning(model, prompts, steps): re_AND = re.compile(r"\bAND\b") -re_weight = re.compile(r"^(.*?)(?:\s*:\s*([-+]?\s*(?:\d+|\d*\.\d+)?))?\s*$") - +re_weight = re.compile(r"^(.*?)(?:\s*:\s*([-+]?(?:\d+\.?|\d*\.\d+)))?\s*$") def get_multicond_prompt_list(prompts): res_indexes = [] From 20f8ec877a99ce2ebf193cb1e2e773cfc77b7c41 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 6 Oct 2022 00:09:32 +0300 Subject: [PATCH 090/460] remove type annotations in new code because presumably they don't work in 3.7 --- modules/prompt_parser.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 800b12c75..ee4c5d02d 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -175,14 +175,14 @@ def get_multicond_prompt_list(prompts): class ComposableScheduledPromptConditioning: def __init__(self, schedules, weight=1.0): - self.schedules: list[ScheduledPromptConditioning] = schedules + self.schedules = schedules # : list[ScheduledPromptConditioning] self.weight: float = weight class MulticondLearnedConditioning: def __init__(self, shape, batch): self.shape: tuple = shape # the shape field is needed to send this object to DDIM/PLMS - self.batch: list[list[ComposableScheduledPromptConditioning]] = batch + self.batch = batch # : list[list[ComposableScheduledPromptConditioning]] def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearnedConditioning: @@ -203,7 +203,7 @@ def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearne return MulticondLearnedConditioning(shape=(len(prompts),), batch=res) -def reconstruct_cond_batch(c: list[list[ScheduledPromptConditioning]], current_step): +def reconstruct_cond_batch(c, current_step): # c: list[list[ScheduledPromptConditioning]] param = c[0][0].cond res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype) for i, cond_schedule in enumerate(c): From 34c358d10d52817f7a889ae4c52096ee654f3fe6 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Wed, 5 Oct 2022 22:11:30 +0100 Subject: [PATCH 091/460] use typing.list in prompt_parser.py for wider python version support --- modules/prompt_parser.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 800b12c75..fdfa21ae6 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -1,6 +1,6 @@ import re from collections import namedtuple - +from typing import List import lark # a prompt like this: "fantasy landscape with a [mountain:lake:0.25] and [an oak:a christmas tree:0.75][ in foreground::0.6][ in background:0.25] [shoddy:masterful:0.5]" @@ -175,14 +175,14 @@ def get_multicond_prompt_list(prompts): class ComposableScheduledPromptConditioning: def __init__(self, schedules, weight=1.0): - self.schedules: list[ScheduledPromptConditioning] = schedules + self.schedules: List[ScheduledPromptConditioning] = schedules self.weight: float = weight class MulticondLearnedConditioning: def __init__(self, shape, batch): self.shape: tuple = shape # the shape field is needed to send this object to DDIM/PLMS - self.batch: list[list[ComposableScheduledPromptConditioning]] = batch + self.batch: List[List[ComposableScheduledPromptConditioning]] = batch def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearnedConditioning: @@ -203,7 +203,7 @@ def get_multicond_learned_conditioning(model, prompts, steps) -> MulticondLearne return MulticondLearnedConditioning(shape=(len(prompts),), batch=res) -def reconstruct_cond_batch(c: list[list[ScheduledPromptConditioning]], current_step): +def reconstruct_cond_batch(c: List[List[ScheduledPromptConditioning]], current_step): param = c[0][0].cond res = torch.zeros((len(c),) + param.shape, device=param.device, dtype=param.dtype) for i, cond_schedule in enumerate(c): From 55400c981b7c1389482057a35ed6ea11f08da194 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Thu, 6 Oct 2022 03:11:15 +0100 Subject: [PATCH 092/460] Set gradio-img2img-tool default to 'editor' --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index e52c9b1d1..bab0fe6ee 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -55,7 +55,7 @@ parser.add_argument("--hide-ui-dir-config", action='store_true', help="hide dire parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(script_path, 'config.json')) parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option") parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None) -parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image uploader tool: can be either editor for ctopping, or color-sketch for drawing', choices=["color-sketch", "editor"], default="color-sketch") +parser.add_argument("--gradio-img2img-tool", type=str, help='gradio image uploader tool: can be either editor for ctopping, or color-sketch for drawing', choices=["color-sketch", "editor"], default="editor") parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last") parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(script_path, 'styles.csv')) parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False) From 2499fb4e1910d31ff12c24110f161b20641b8835 Mon Sep 17 00:00:00 2001 From: Raphael Stoeckli Date: Wed, 5 Oct 2022 21:57:18 +0200 Subject: [PATCH 093/460] Add sanitizer for captions in Textual inversion --- modules/textual_inversion/preprocess.py | 28 +++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index f545a9937..4f3df4bd9 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -1,5 +1,8 @@ +from cmath import log import os from PIL import Image, ImageOps +import platform +import sys import tqdm from modules import shared, images @@ -25,6 +28,7 @@ def preprocess(process_src, process_dst, process_flip, process_split, process_ca def save_pic_with_caption(image, index): if process_caption: caption = "-" + shared.interrogator.generate_caption(image) + caption = sanitize_caption(os.path.join(dst, f"{index:05}-{subindex[0]}"), caption, ".png") else: caption = filename caption = os.path.splitext(caption)[0] @@ -75,3 +79,27 @@ def preprocess(process_src, process_dst, process_flip, process_split, process_ca if process_caption: shared.interrogator.send_blip_to_ram() + +def sanitize_caption(base_path, original_caption, suffix): + operating_system = platform.system().lower() + if (operating_system == "windows"): + invalid_path_characters = "\\/:*?\"<>|" + max_path_length = 259 + else: + invalid_path_characters = "/" #linux/macos + max_path_length = 1023 + caption = original_caption + for invalid_character in invalid_path_characters: + caption = caption.replace(invalid_character, "") + fixed_path_length = len(base_path) + len(suffix) + if fixed_path_length + len(caption) <= max_path_length: + return caption + caption_tokens = caption.split() + new_caption = "" + for token in caption_tokens: + last_caption = new_caption + new_caption = new_caption + token + " " + if (len(new_caption) + fixed_path_length - 1 > max_path_length): + break + print(f"\nPath will be too long. Truncated caption: {original_caption}\nto: {last_caption}", file=sys.stderr) + return last_caption.strip() From 4288e53fc2ea25fa49715bf5b7f14603553c9e38 Mon Sep 17 00:00:00 2001 From: Raphael Stoeckli Date: Wed, 5 Oct 2022 23:11:32 +0200 Subject: [PATCH 094/460] removed unused import, fixed typo --- modules/textual_inversion/preprocess.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/textual_inversion/preprocess.py b/modules/textual_inversion/preprocess.py index 4f3df4bd9..f1c002a2b 100644 --- a/modules/textual_inversion/preprocess.py +++ b/modules/textual_inversion/preprocess.py @@ -1,4 +1,3 @@ -from cmath import log import os from PIL import Image, ImageOps import platform @@ -13,7 +12,7 @@ def preprocess(process_src, process_dst, process_flip, process_split, process_ca src = os.path.abspath(process_src) dst = os.path.abspath(process_dst) - assert src != dst, 'same directory specified as source and desitnation' + assert src != dst, 'same directory specified as source and destination' os.makedirs(dst, exist_ok=True) From a93c3ffbfd264ed6b5d989922352300c9d3efbe4 Mon Sep 17 00:00:00 2001 From: Jocke Date: Wed, 5 Oct 2022 16:31:48 +0200 Subject: [PATCH 095/460] Outpainting mk2, prevent generation of a completely random image every time even when global seed is static --- scripts/outpainting_mk_2.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/scripts/outpainting_mk_2.py b/scripts/outpainting_mk_2.py index 11613ca36..a6468e09a 100644 --- a/scripts/outpainting_mk_2.py +++ b/scripts/outpainting_mk_2.py @@ -85,8 +85,11 @@ def get_matched_noise(_np_src_image, np_mask_rgb, noise_q=1, color_variation=0.0 src_dist = np.absolute(src_fft) src_phase = src_fft / src_dist + # create a generator with a static seed to make outpainting deterministic / only follow global seed + rng = np.random.default_rng(0) + noise_window = _get_gaussian_window(width, height, mode=1) # start with simple gaussian noise - noise_rgb = np.random.random_sample((width, height, num_channels)) + noise_rgb = rng.random((width, height, num_channels)) noise_grey = (np.sum(noise_rgb, axis=2) / 3.) noise_rgb *= color_variation # the colorfulness of the starting noise is blended to greyscale with a parameter for c in range(num_channels): From 6e7057b31b9762a9720282c7da486e4f264dee28 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 6 Oct 2022 12:08:06 +0300 Subject: [PATCH 096/460] support for downloading new commit hash for git repos --- launch.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/launch.py b/launch.py index 57405feab..2f91f586c 100644 --- a/launch.py +++ b/launch.py @@ -86,6 +86,15 @@ def git_clone(url, dir, name, commithash=None): # TODO clone into temporary dir and move if successful if os.path.exists(dir): + if commithash is None: + return + + current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, "Couldn't determine {name}'s hash: {commithash}").strip() + if current_hash == commithash: + return + + run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}") + run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commint for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}") return run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}") From 5f24b7bcf4a074fbdec757617fcd1bc82e76551b Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 6 Oct 2022 12:08:48 +0300 Subject: [PATCH 097/460] option to let users select which samplers they want to hide --- modules/processing.py | 13 ++++++------- modules/sd_samplers.py | 19 +++++++++++++++++-- modules/shared.py | 15 +++++++++------ webui.py | 4 +++- 4 files changed, 35 insertions(+), 16 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index d8c6b8d57..e01c8b3f6 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -11,9 +11,8 @@ import cv2 from skimage import exposure import modules.sd_hijack -from modules import devices, prompt_parser, masking +from modules import devices, prompt_parser, masking, sd_samplers from modules.sd_hijack import model_hijack -from modules.sd_samplers import samplers, samplers_for_img2img from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.face_restoration @@ -110,7 +109,7 @@ class Processed: self.width = p.width self.height = p.height self.sampler_index = p.sampler_index - self.sampler = samplers[p.sampler_index].name + self.sampler = sd_samplers.samplers[p.sampler_index].name self.cfg_scale = p.cfg_scale self.steps = p.steps self.batch_size = p.batch_size @@ -265,7 +264,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration generation_params = { "Steps": p.steps, - "Sampler": samplers[p.sampler_index].name, + "Sampler": sd_samplers.samplers[p.sampler_index].name, "CFG scale": p.cfg_scale, "Seed": all_seeds[index], "Face restoration": (opts.face_restoration_model if p.restore_faces else None), @@ -478,7 +477,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.firstphase_height_truncated = int(scale * self.height) def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength): - self.sampler = samplers[self.sampler_index].constructor(self.sd_model) + self.sampler = sd_samplers.samplers[self.sampler_index].constructor(self.sd_model) if not self.enable_hr: x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) @@ -521,7 +520,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): shared.state.nextjob() - self.sampler = samplers[self.sampler_index].constructor(self.sd_model) + self.sampler = sd_samplers.samplers[self.sampler_index].constructor(self.sd_model) noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) # GC now before running the next img2img to prevent running out of memory @@ -556,7 +555,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.nmask = None def init(self, all_prompts, all_seeds, all_subseeds): - self.sampler = samplers_for_img2img[self.sampler_index].constructor(self.sd_model) + self.sampler = sd_samplers.samplers_for_img2img[self.sampler_index].constructor(self.sd_model) crop_region = None if self.image_mask is not None: diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index d27c547b3..2e1f77153 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -32,12 +32,27 @@ samplers_data_k_diffusion = [ if hasattr(k_diffusion.sampling, funcname) ] -samplers = [ +all_samplers = [ *samplers_data_k_diffusion, SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), []), SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), []), ] -samplers_for_img2img = [x for x in samplers if x.name not in ['PLMS', 'DPM fast', 'DPM adaptive']] + +samplers = [] +samplers_for_img2img = [] + + +def set_samplers(): + global samplers, samplers_for_img2img + + hidden = set(opts.hide_samplers) + hidden_img2img = set(opts.hide_samplers + ['PLMS', 'DPM fast', 'DPM adaptive']) + + samplers = [x for x in all_samplers if x.name not in hidden] + samplers_for_img2img = [x for x in all_samplers if x.name not in hidden_img2img] + + +set_samplers() sampler_extra_params = { 'sample_euler': ['s_churn', 's_tmin', 's_tmax', 's_noise'], diff --git a/modules/shared.py b/modules/shared.py index bab0fe6ee..ca2e4c742 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -13,6 +13,7 @@ import modules.memmon import modules.sd_models import modules.styles import modules.devices as devices +from modules import sd_samplers from modules.paths import script_path, sd_path sd_model_file = os.path.join(script_path, 'model.ckpt') @@ -238,14 +239,16 @@ options_templates.update(options_section(('ui', "User interface"), { })) options_templates.update(options_section(('sampler-params', "Sampler parameters"), { - "eta_ddim": OptionInfo(0.0, "eta (noise multiplier) for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), - 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), - 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + "hide_samplers": OptionInfo([], "Hide samplers in user interface (requires restart)", gr.CheckboxGroup, lambda: {"choices": [x.name for x in sd_samplers.all_samplers]}), + "eta_ddim": OptionInfo(0.0, "eta (noise multiplier) for DDIM", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + "eta_ancestral": OptionInfo(1.0, "eta (noise multiplier) for ancestral samplers", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + "ddim_discretize": OptionInfo('uniform', "img2img DDIM discretize", gr.Radio, {"choices": ['uniform', 'quad']}), + 's_churn': OptionInfo(0.0, "sigma churn", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + 's_tmin': OptionInfo(0.0, "sigma tmin", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), + 's_noise': OptionInfo(1.0, "sigma noise", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}), })) + class Options: data = None data_labels = options_templates diff --git a/webui.py b/webui.py index 47848ba58..9ef124274 100644 --- a/webui.py +++ b/webui.py @@ -2,7 +2,7 @@ import os import threading import time import importlib -from modules import devices +from modules import devices, sd_samplers from modules.paths import script_path import signal import threading @@ -109,6 +109,8 @@ def webui(): time.sleep(0.5) break + sd_samplers.set_samplers() + print('Reloading Custom Scripts') modules.scripts.reload_scripts(os.path.join(script_path, "scripts")) print('Reloading modules: modules.ui') From 2d3ea42a2d1e909bbccdb6b49561b187c60a9402 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 6 Oct 2022 13:21:12 +0300 Subject: [PATCH 098/460] workaround for a mysterious bug where prompt weights can't be matched --- modules/prompt_parser.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index a7a6aa314..f00256f28 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -156,7 +156,9 @@ def get_multicond_prompt_list(prompts): indexes = [] for subprompt in subprompts: - text, weight = re_weight.search(subprompt).groups() + match = re_weight.search(subprompt) + + text, weight = match.groups() if match is not None else (subprompt, 1.0) weight = float(weight) if weight is not None else 1.0 From 2a532804957e47bc36c67c8f5b104dcfa8e8f3f0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 6 Oct 2022 13:21:32 +0300 Subject: [PATCH 099/460] reorder imports to fix the bug with k-diffusion on some version --- webui.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/webui.py b/webui.py index 9ef124274..480360fe0 100644 --- a/webui.py +++ b/webui.py @@ -2,11 +2,12 @@ import os import threading import time import importlib -from modules import devices, sd_samplers -from modules.paths import script_path import signal import threading +from modules.paths import script_path + +from modules import devices, sd_samplers import modules.codeformer_model as codeformer import modules.extras import modules.face_restoration From c30c06db207a580d76544fd10fc1e03cd58ce85e Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Mon, 3 Oct 2022 12:48:16 +0300 Subject: [PATCH 100/460] update k-diffusion --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/launch.py b/launch.py index 2f91f586c..c2713c645 100644 --- a/launch.py +++ b/launch.py @@ -19,7 +19,7 @@ clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLI stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc") taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6") -k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "a7ec1974d4ccb394c2dca275f42cd97490618924") +k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "567e11f7062ba20ae32b5a8cd07fb0fc4b9410cf") codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") From c1a068ed0acc788774afc1541ca69342fd1d94ad Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Mon, 3 Oct 2022 12:49:17 +0300 Subject: [PATCH 101/460] Create alternate_sampler_noise_schedules.py --- scripts/alternate_sampler_noise_schedules.py | 53 ++++++++++++++++++++ 1 file changed, 53 insertions(+) create mode 100644 scripts/alternate_sampler_noise_schedules.py diff --git a/scripts/alternate_sampler_noise_schedules.py b/scripts/alternate_sampler_noise_schedules.py new file mode 100644 index 000000000..4f3ed8fb1 --- /dev/null +++ b/scripts/alternate_sampler_noise_schedules.py @@ -0,0 +1,53 @@ +import inspect +from modules.processing import Processed, process_images +import gradio as gr +import modules.scripts as scripts +import k_diffusion.sampling +import torch + + +class Script(scripts.Script): + + def title(self): + return "Alternate Sampler Noise Schedules" + + def ui(self, is_img2img): + noise_scheduler = gr.Dropdown(label="Noise Scheduler", choices=['Default','Karras','Exponential', 'Variance Preserving'], value='Default', type="index") + sched_smin = gr.Slider(value=0.1, label="Sigma min", minimum=0.0, maximum=100.0, step=0.5,) + sched_smax = gr.Slider(value=10.0, label="Sigma max", minimum=0.0, maximum=100.0, step=0.5) + sched_rho = gr.Slider(value=7.0, label="Sigma rho (Karras only)", minimum=7.0, maximum=100.0, step=0.5) + sched_beta_d = gr.Slider(value=19.9, label="Beta distribution (VP only)",minimum=0.0, maximum=40.0, step=0.5) + sched_beta_min = gr.Slider(value=0.1, label="Beta min (VP only)", minimum=0.0, maximum=40.0, step=0.1) + sched_eps_s = gr.Slider(value=0.001, label="Epsilon (VP only)", minimum=0.001, maximum=1.0, step=0.001) + + return [noise_scheduler, sched_smin, sched_smax, sched_rho, sched_beta_d, sched_beta_min, sched_eps_s] + + def run(self, p, noise_scheduler, sched_smin, sched_smax, sched_rho, sched_beta_d, sched_beta_min, sched_eps_s): + + noise_scheduler_func_name = ['-','get_sigmas_karras','get_sigmas_exponential','get_sigmas_vp'][noise_scheduler] + + base_params = { + "sigma_min":sched_smin, + "sigma_max":sched_smax, + "rho":sched_rho, + "beta_d":sched_beta_d, + "beta_min":sched_beta_min, + "eps_s":sched_eps_s, + "device":"cuda" if torch.cuda.is_available() else "cpu" + } + + if hasattr(k_diffusion.sampling,noise_scheduler_func_name): + + sigma_func = getattr(k_diffusion.sampling,noise_scheduler_func_name) + sigma_func_kwargs = {} + + for k,v in base_params.items(): + if k in inspect.signature(sigma_func).parameters: + sigma_func_kwargs[k] = v + + def substitute_noise_scheduler(n): + return sigma_func(n,**sigma_func_kwargs) + + p.sampler_noise_scheduler_override = substitute_noise_scheduler + + return process_images(p) From 71901b3d3bea1d035bf4a7229d19356b4b062151 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Wed, 5 Oct 2022 14:30:57 +0300 Subject: [PATCH 102/460] add karras scheduling variants --- modules/sd_samplers.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 2e1f77153..8d6eb7620 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -26,6 +26,17 @@ samplers_k_diffusion = [ ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad']), ] +if opts.show_karras_scheduler_variants: + k_diffusion.sampling.sample_dpm_2_ka = k_diffusion.sampling.sample_dpm_2 + k_diffusion.sampling.sample_dpm_2_ancestral_ka = k_diffusion.sampling.sample_dpm_2_ancestral + k_diffusion.sampling.sample_lms_ka = k_diffusion.sampling.sample_lms + samplers_k_diffusion_ka = [ + ('LMS K Scheduling', 'sample_lms_ka', ['k_lms_ka']), + ('DPM2 K Scheduling', 'sample_dpm_2_ka', ['k_dpm_2_ka']), + ('DPM2 a K Scheduling', 'sample_dpm_2_ancestral_ka', ['k_dpm_2_a_ka']), + ] + samplers_k_diffusion.extend(samplers_k_diffusion_ka) + samplers_data_k_diffusion = [ SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases) for label, funcname, aliases in samplers_k_diffusion @@ -345,6 +356,8 @@ class KDiffusionSampler: if p.sampler_noise_scheduler_override: sigmas = p.sampler_noise_scheduler_override(steps) + elif self.funcname.endswith('ka'): + sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device) else: sigmas = self.model_wrap.get_sigmas(steps) x = x * sigmas[0] From 3ddf80a9db8793188e2fe9488233d2b272cceb33 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Wed, 5 Oct 2022 14:31:51 +0300 Subject: [PATCH 103/460] add variant setting --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/shared.py b/modules/shared.py index ca2e4c742..9e4860a28 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -236,6 +236,7 @@ options_templates.update(options_section(('ui', "User interface"), { "font": OptionInfo("", "Font for image grids that have text"), "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox_initialy_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), + "show_karras_scheduler_variants": OptionInfo(True, "Show Karras scheduling variants for select samplers. Try these variants if your K sampled images suffer from excessive noise."), })) options_templates.update(options_section(('sampler-params', "Sampler parameters"), { From a971e4a767118ec41ec0f129770122babfb16a16 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Thu, 6 Oct 2022 13:34:42 +0300 Subject: [PATCH 104/460] update k-diff once again --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/launch.py b/launch.py index c2713c645..9fe0fd675 100644 --- a/launch.py +++ b/launch.py @@ -19,7 +19,7 @@ clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLI stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc") taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6") -k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "567e11f7062ba20ae32b5a8cd07fb0fc4b9410cf") +k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "f4e99857772fc3a126ba886aadf795a332774878") codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") From 5993df24a1026225cb8af89237547c1d9101ce69 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 6 Oct 2022 14:12:52 +0300 Subject: [PATCH 105/460] integrate the new samplers PR --- modules/processing.py | 7 ++- modules/sd_samplers.py | 59 ++++++++++---------- modules/shared.py | 1 - scripts/alternate_sampler_noise_schedules.py | 53 ------------------ scripts/img2imgalt.py | 3 +- 5 files changed, 36 insertions(+), 87 deletions(-) delete mode 100644 scripts/alternate_sampler_noise_schedules.py diff --git a/modules/processing.py b/modules/processing.py index e01c8b3f6..e567956ce 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -477,7 +477,7 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): self.firstphase_height_truncated = int(scale * self.height) def sample(self, conditioning, unconditional_conditioning, seeds, subseeds, subseed_strength): - self.sampler = sd_samplers.samplers[self.sampler_index].constructor(self.sd_model) + self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model) if not self.enable_hr: x = create_random_tensors([opt_C, self.height // opt_f, self.width // opt_f], seeds=seeds, subseeds=subseeds, subseed_strength=self.subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) @@ -520,7 +520,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing): shared.state.nextjob() - self.sampler = sd_samplers.samplers[self.sampler_index].constructor(self.sd_model) + self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, self.sampler_index, self.sd_model) + noise = create_random_tensors(samples.shape[1:], seeds=seeds, subseeds=subseeds, subseed_strength=subseed_strength, seed_resize_from_h=self.seed_resize_from_h, seed_resize_from_w=self.seed_resize_from_w, p=self) # GC now before running the next img2img to prevent running out of memory @@ -555,7 +556,7 @@ class StableDiffusionProcessingImg2Img(StableDiffusionProcessing): self.nmask = None def init(self, all_prompts, all_seeds, all_subseeds): - self.sampler = sd_samplers.samplers_for_img2img[self.sampler_index].constructor(self.sd_model) + self.sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers_for_img2img, self.sampler_index, self.sd_model) crop_region = None if self.image_mask is not None: diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 8d6eb7620..497df9430 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -13,46 +13,46 @@ from modules.shared import opts, cmd_opts, state import modules.shared as shared -SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases']) +SamplerData = namedtuple('SamplerData', ['name', 'constructor', 'aliases', 'options']) samplers_k_diffusion = [ - ('Euler a', 'sample_euler_ancestral', ['k_euler_a']), - ('Euler', 'sample_euler', ['k_euler']), - ('LMS', 'sample_lms', ['k_lms']), - ('Heun', 'sample_heun', ['k_heun']), - ('DPM2', 'sample_dpm_2', ['k_dpm_2']), - ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a']), - ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast']), - ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad']), + ('Euler a', 'sample_euler_ancestral', ['k_euler_a'], {}), + ('Euler', 'sample_euler', ['k_euler'], {}), + ('LMS', 'sample_lms', ['k_lms'], {}), + ('Heun', 'sample_heun', ['k_heun'], {}), + ('DPM2', 'sample_dpm_2', ['k_dpm_2'], {}), + ('DPM2 a', 'sample_dpm_2_ancestral', ['k_dpm_2_a'], {}), + ('DPM fast', 'sample_dpm_fast', ['k_dpm_fast'], {}), + ('DPM adaptive', 'sample_dpm_adaptive', ['k_dpm_ad'], {}), + ('LMS Karras', 'sample_lms', ['k_lms_ka'], {'scheduler': 'karras'}), + ('DPM2 Karras', 'sample_dpm_2', ['k_dpm_2_ka'], {'scheduler': 'karras'}), + ('DPM2 a Karras', 'sample_dpm_2_ancestral', ['k_dpm_2_a_ka'], {'scheduler': 'karras'}), ] -if opts.show_karras_scheduler_variants: - k_diffusion.sampling.sample_dpm_2_ka = k_diffusion.sampling.sample_dpm_2 - k_diffusion.sampling.sample_dpm_2_ancestral_ka = k_diffusion.sampling.sample_dpm_2_ancestral - k_diffusion.sampling.sample_lms_ka = k_diffusion.sampling.sample_lms - samplers_k_diffusion_ka = [ - ('LMS K Scheduling', 'sample_lms_ka', ['k_lms_ka']), - ('DPM2 K Scheduling', 'sample_dpm_2_ka', ['k_dpm_2_ka']), - ('DPM2 a K Scheduling', 'sample_dpm_2_ancestral_ka', ['k_dpm_2_a_ka']), - ] - samplers_k_diffusion.extend(samplers_k_diffusion_ka) - samplers_data_k_diffusion = [ - SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases) - for label, funcname, aliases in samplers_k_diffusion + SamplerData(label, lambda model, funcname=funcname: KDiffusionSampler(funcname, model), aliases, options) + for label, funcname, aliases, options in samplers_k_diffusion if hasattr(k_diffusion.sampling, funcname) ] all_samplers = [ *samplers_data_k_diffusion, - SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), []), - SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), []), + SamplerData('DDIM', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.ddim.DDIMSampler, model), [], {}), + SamplerData('PLMS', lambda model: VanillaStableDiffusionSampler(ldm.models.diffusion.plms.PLMSSampler, model), [], {}), ] samplers = [] samplers_for_img2img = [] +def create_sampler_with_index(list_of_configs, index, model): + config = list_of_configs[index] + sampler = config.constructor(model) + sampler.config = config + + return sampler + + def set_samplers(): global samplers, samplers_for_img2img @@ -130,6 +130,7 @@ class VanillaStableDiffusionSampler: self.step = 0 self.eta = None self.default_eta = 0.0 + self.config = None def number_of_needed_noises(self, p): return 0 @@ -291,6 +292,7 @@ class KDiffusionSampler: self.stop_at = None self.eta = None self.default_eta = 1.0 + self.config = None def callback_state(self, d): store_latent(d["denoised"]) @@ -355,11 +357,12 @@ class KDiffusionSampler: steps = steps or p.steps if p.sampler_noise_scheduler_override: - sigmas = p.sampler_noise_scheduler_override(steps) - elif self.funcname.endswith('ka'): - sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device) + sigmas = p.sampler_noise_scheduler_override(steps) + elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': + sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device) else: - sigmas = self.model_wrap.get_sigmas(steps) + sigmas = self.model_wrap.get_sigmas(steps) + x = x * sigmas[0] extra_params_kwargs = self.initialize(p) diff --git a/modules/shared.py b/modules/shared.py index 9e4860a28..ca2e4c742 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -236,7 +236,6 @@ options_templates.update(options_section(('ui', "User interface"), { "font": OptionInfo("", "Font for image grids that have text"), "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox_initialy_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), - "show_karras_scheduler_variants": OptionInfo(True, "Show Karras scheduling variants for select samplers. Try these variants if your K sampled images suffer from excessive noise."), })) options_templates.update(options_section(('sampler-params', "Sampler parameters"), { diff --git a/scripts/alternate_sampler_noise_schedules.py b/scripts/alternate_sampler_noise_schedules.py deleted file mode 100644 index 4f3ed8fb1..000000000 --- a/scripts/alternate_sampler_noise_schedules.py +++ /dev/null @@ -1,53 +0,0 @@ -import inspect -from modules.processing import Processed, process_images -import gradio as gr -import modules.scripts as scripts -import k_diffusion.sampling -import torch - - -class Script(scripts.Script): - - def title(self): - return "Alternate Sampler Noise Schedules" - - def ui(self, is_img2img): - noise_scheduler = gr.Dropdown(label="Noise Scheduler", choices=['Default','Karras','Exponential', 'Variance Preserving'], value='Default', type="index") - sched_smin = gr.Slider(value=0.1, label="Sigma min", minimum=0.0, maximum=100.0, step=0.5,) - sched_smax = gr.Slider(value=10.0, label="Sigma max", minimum=0.0, maximum=100.0, step=0.5) - sched_rho = gr.Slider(value=7.0, label="Sigma rho (Karras only)", minimum=7.0, maximum=100.0, step=0.5) - sched_beta_d = gr.Slider(value=19.9, label="Beta distribution (VP only)",minimum=0.0, maximum=40.0, step=0.5) - sched_beta_min = gr.Slider(value=0.1, label="Beta min (VP only)", minimum=0.0, maximum=40.0, step=0.1) - sched_eps_s = gr.Slider(value=0.001, label="Epsilon (VP only)", minimum=0.001, maximum=1.0, step=0.001) - - return [noise_scheduler, sched_smin, sched_smax, sched_rho, sched_beta_d, sched_beta_min, sched_eps_s] - - def run(self, p, noise_scheduler, sched_smin, sched_smax, sched_rho, sched_beta_d, sched_beta_min, sched_eps_s): - - noise_scheduler_func_name = ['-','get_sigmas_karras','get_sigmas_exponential','get_sigmas_vp'][noise_scheduler] - - base_params = { - "sigma_min":sched_smin, - "sigma_max":sched_smax, - "rho":sched_rho, - "beta_d":sched_beta_d, - "beta_min":sched_beta_min, - "eps_s":sched_eps_s, - "device":"cuda" if torch.cuda.is_available() else "cpu" - } - - if hasattr(k_diffusion.sampling,noise_scheduler_func_name): - - sigma_func = getattr(k_diffusion.sampling,noise_scheduler_func_name) - sigma_func_kwargs = {} - - for k,v in base_params.items(): - if k in inspect.signature(sigma_func).parameters: - sigma_func_kwargs[k] = v - - def substitute_noise_scheduler(n): - return sigma_func(n,**sigma_func_kwargs) - - p.sampler_noise_scheduler_override = substitute_noise_scheduler - - return process_images(p) diff --git a/scripts/img2imgalt.py b/scripts/img2imgalt.py index 0ef137f7d..f9894cb01 100644 --- a/scripts/img2imgalt.py +++ b/scripts/img2imgalt.py @@ -8,7 +8,6 @@ import gradio as gr from modules import processing, shared, sd_samplers, prompt_parser from modules.processing import Processed -from modules.sd_samplers import samplers from modules.shared import opts, cmd_opts, state import torch @@ -159,7 +158,7 @@ class Script(scripts.Script): combined_noise = ((1 - randomness) * rec_noise + randomness * rand_noise) / ((randomness**2 + (1-randomness)**2) ** 0.5) - sampler = samplers[p.sampler_index].constructor(p.sd_model) + sampler = sd_samplers.create_sampler_with_index(sd_samplers.samplers, p.sampler_index, p.sd_model) sigmas = sampler.model_wrap.get_sigmas(p.steps) From f5490674a8fd84162b4e80c045e675633afb9ee7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 6 Oct 2022 17:41:49 +0300 Subject: [PATCH 106/460] fix bad output for error when updating a git repo --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/launch.py b/launch.py index 9fe0fd675..75edb66a9 100644 --- a/launch.py +++ b/launch.py @@ -89,7 +89,7 @@ def git_clone(url, dir, name, commithash=None): if commithash is None: return - current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, "Couldn't determine {name}'s hash: {commithash}").strip() + current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip() if current_hash == commithash: return From be71115b1a1201d04f0e2a11e718fb31cbd26474 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Thu, 6 Oct 2022 01:09:44 +0100 Subject: [PATCH 107/460] Update shared.py --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/shared.py b/modules/shared.py index ca2e4c742..9f7c6efe5 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -236,6 +236,7 @@ options_templates.update(options_section(('ui', "User interface"), { "font": OptionInfo("", "Font for image grids that have text"), "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox_initialy_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), + "show_progress_in_title": OptionInfo(False, "Show generation progress in window title."), })) options_templates.update(options_section(('sampler-params', "Sampler parameters"), { From c06298d1d003aa034007978ee7508af636c18124 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Thu, 6 Oct 2022 01:10:38 +0100 Subject: [PATCH 108/460] add check for progress in title setting --- javascript/progressbar.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/progressbar.js b/javascript/progressbar.js index 3e3220c3f..f9e9290e2 100644 --- a/javascript/progressbar.js +++ b/javascript/progressbar.js @@ -5,7 +5,7 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_inte var progressbar = gradioApp().getElementById(id_progressbar) var interrupt = gradioApp().getElementById(id_interrupt) - if(progressbar && progressbar.offsetParent){ + if(opts.show_progress_in_title && progressbar && progressbar.offsetParent){ if(progressbar.innerText){ let newtitle = 'Stable Diffusion - ' + progressbar.innerText if(document.title != newtitle){ From fec71e4de24b65b0f205a3c071b71651bbcb0dfc Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Thu, 6 Oct 2022 01:35:07 +0100 Subject: [PATCH 109/460] Default window title progress updates on --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 9f7c6efe5..5c16f0257 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -236,7 +236,7 @@ options_templates.update(options_section(('ui', "User interface"), { "font": OptionInfo("", "Font for image grids that have text"), "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox_initialy_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), - "show_progress_in_title": OptionInfo(False, "Show generation progress in window title."), + "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), })) options_templates.update(options_section(('sampler-params', "Sampler parameters"), { From 5d0e6ab8567bda2ee8f5ed31f332ca07c1b84b98 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Thu, 6 Oct 2022 04:04:50 +0100 Subject: [PATCH 110/460] Allow escaping of commas in xy_grid --- scripts/xy_grid.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 1237e754d..210829a79 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -168,6 +168,7 @@ re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*") re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*") +re_non_escaped_comma = re.compile(r"(? Date: Thu, 6 Oct 2022 11:55:21 +0100 Subject: [PATCH 111/460] use csv.reader --- scripts/xy_grid.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 210829a79..1a625898f 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -1,8 +1,9 @@ from collections import namedtuple from copy import copy -from itertools import permutations +from itertools import permutations, chain import random - +import csv +from io import StringIO from PIL import Image import numpy as np @@ -168,8 +169,6 @@ re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d re_range_count = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\[(\d+)\s*\])?\s*") re_range_count_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\[(\d+(?:.\d*)?)\s*\])?\s*") -re_non_escaped_comma = re.compile(r"(? Date: Thu, 6 Oct 2022 12:32:17 +0100 Subject: [PATCH 112/460] strip() split comma delimited lines --- scripts/xy_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 1a625898f..ec27e58bc 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -197,7 +197,7 @@ class Script(scripts.Script): if opt.label == 'Nothing': return [0] - valslist = list(chain.from_iterable(csv.reader(StringIO(s)))) + valslist = list(map(str.strip,chain.from_iterable(csv.reader(StringIO(s))))) if opt.type == int: valslist_ext = [] From 82eb8ea452b1e63535c58d15ec6db2ad2342faa8 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Thu, 6 Oct 2022 15:22:51 +0100 Subject: [PATCH 113/460] Update xy_grid.py split vals not 's' from tests --- scripts/xy_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index ec27e58bc..210c7b6e9 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -197,7 +197,7 @@ class Script(scripts.Script): if opt.label == 'Nothing': return [0] - valslist = list(map(str.strip,chain.from_iterable(csv.reader(StringIO(s))))) + valslist = list(map(str.strip,chain.from_iterable(csv.reader(StringIO(vals))))) if opt.type == int: valslist_ext = [] From 0bb458f0ca06a7be27cf1a1003c536d1f06a5bd3 Mon Sep 17 00:00:00 2001 From: Milly Date: Wed, 5 Oct 2022 01:19:50 +0900 Subject: [PATCH 114/460] Removed duplicate image saving codes Use `modules.images.save_image()` instead. --- modules/images.py | 7 ++++--- modules/ui.py | 46 ++++++++++------------------------------------ 2 files changed, 14 insertions(+), 39 deletions(-) diff --git a/modules/images.py b/modules/images.py index c2fadab99..810f1446e 100644 --- a/modules/images.py +++ b/modules/images.py @@ -353,7 +353,7 @@ def get_next_sequence_number(path, basename): return result + 1 -def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix=""): +def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None): if short_filename or prompt is None or seed is None: file_decoration = "" elif opts.save_to_dirs: @@ -377,7 +377,8 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i else: pnginfo = None - save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt) + if save_to_dirs is None: + save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt) if save_to_dirs: dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, seed, prompt).strip('\\ /') @@ -431,4 +432,4 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i with open(f"{fullfn_without_extension}.txt", "w", encoding="utf8") as file: file.write(info + "\n") - + return fullfn diff --git a/modules/ui.py b/modules/ui.py index 9620350fc..4f18126fb 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -35,7 +35,7 @@ import modules.codeformer_model import modules.styles import modules.generation_parameters_copypaste from modules import prompt_parser -from modules.images import apply_filename_pattern, get_next_sequence_number +from modules.images import save_image import modules.textual_inversion.ui # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI @@ -114,20 +114,13 @@ def save_files(js_data, images, index): p = MyObject(data) path = opts.outdir_save save_to_dirs = opts.use_save_to_dirs_for_ui - - if save_to_dirs: - dirname = apply_filename_pattern(opts.directories_filename_pattern or "[prompt_words]", p, p.seed, p.prompt) - path = os.path.join(opts.outdir_save, dirname) - - os.makedirs(path, exist_ok=True) - + extension: str = opts.samples_format + start_index = 0 if index > -1 and opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only images = [images[index]] - infotexts = [data["infotexts"][index]] - else: - infotexts = data["infotexts"] + start_index = index with open(os.path.join(opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file: at_start = file.tell() == 0 @@ -135,37 +128,18 @@ def save_files(js_data, images, index): if at_start: writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"]) - file_decoration = opts.samples_filename_pattern or "[seed]-[prompt_spaces]" - if file_decoration != "": - file_decoration = "-" + file_decoration.lower() - file_decoration = apply_filename_pattern(file_decoration, p, p.seed, p.prompt) - truncated = (file_decoration[:240] + '..') if len(file_decoration) > 240 else file_decoration - filename_base = truncated - extension = opts.samples_format.lower() - - basecount = get_next_sequence_number(path, "") - for i, filedata in enumerate(images): - file_number = f"{basecount+i:05}" - filename = file_number + filename_base + f".{extension}" - filepath = os.path.join(path, filename) - - + for image_index, filedata in enumerate(images, start_index): if filedata.startswith("data:image/png;base64,"): filedata = filedata[len("data:image/png;base64,"):] image = Image.open(io.BytesIO(base64.decodebytes(filedata.encode('utf-8')))) - if opts.enable_pnginfo and extension == 'png': - pnginfo = PngImagePlugin.PngInfo() - pnginfo.add_text('parameters', infotexts[i]) - image.save(filepath, pnginfo=pnginfo) - else: - image.save(filepath, quality=opts.jpeg_quality) - if opts.enable_pnginfo and extension in ("jpg", "jpeg", "webp"): - piexif.insert(piexif.dump({"Exif": { - piexif.ExifIFD.UserComment: piexif.helper.UserComment.dump(infotexts[i], encoding="unicode") - }}), filepath) + is_grid = image_index < p.index_of_first_image + i = 0 if is_grid else (image_index - p.index_of_first_image) + fullfn = save_image(image, path, "", seed=p.all_seeds[i], prompt=p.all_prompts[i], extension=extension, info=p.infotexts[image_index], grid=is_grid, p=p, save_to_dirs=save_to_dirs) + + filename = os.path.relpath(fullfn, path) filenames.append(filename) writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]]) From 1069ec49a35d04c1e85c92534e92a2d6aa59cb75 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 6 Oct 2022 20:16:21 +0300 Subject: [PATCH 115/460] revert back to using list comprehension rather than list and map --- scripts/xy_grid.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 210c7b6e9..6344e612f 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -197,7 +197,7 @@ class Script(scripts.Script): if opt.label == 'Nothing': return [0] - valslist = list(map(str.strip,chain.from_iterable(csv.reader(StringIO(vals))))) + valslist = [x.strip() for x in chain.from_iterable(csv.reader(StringIO(vals)))] if opt.type == int: valslist_ext = [] From dbc8a4d35129b08eab30776bbbaf3a2e7ac10a6c Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 6 Oct 2022 20:27:50 +0300 Subject: [PATCH 116/460] add generation parameters to images shown in web ui --- modules/processing.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index de818d5b9..8faf90956 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -430,7 +430,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if opts.samples_save and not p.do_not_save_samples: images.save_image(image, p.outpath_samples, "", seeds[i], prompts[i], opts.samples_format, info=infotext(n, i), p=p) - infotexts.append(infotext(n, i)) + text = infotext(n, i) + infotexts.append(text) + image.info["parameters"] = text output_images.append(image) del x_samples_ddim @@ -447,7 +449,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed: grid = images.image_grid(output_images, p.batch_size) if opts.return_grid: - infotexts.insert(0, infotext()) + text = infotext() + infotexts.insert(0, text) + grid.info["parameters"] = text output_images.insert(0, grid) index_of_first_image = 1 From cf7c784fcc0c84a8a4edd8d3aca4dda4c7025c43 Mon Sep 17 00:00:00 2001 From: Milly Date: Fri, 7 Oct 2022 00:19:52 +0900 Subject: [PATCH 117/460] Removed duplicate defined models_path Use `modules.paths.models_path` instead `modules.shared.model_path`. --- modules/shared.py | 19 +++++++++---------- 1 file changed, 9 insertions(+), 10 deletions(-) diff --git a/modules/shared.py b/modules/shared.py index 5c16f0257..25bb6e6c9 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -14,11 +14,10 @@ import modules.sd_models import modules.styles import modules.devices as devices from modules import sd_samplers -from modules.paths import script_path, sd_path +from modules.paths import models_path, script_path, sd_path sd_model_file = os.path.join(script_path, 'model.ckpt') default_sd_model_file = sd_model_file -model_path = os.path.join(script_path, 'models') parser = argparse.ArgumentParser() parser.add_argument("--config", type=str, default=os.path.join(sd_path, "configs/stable-diffusion/v1-inference.yaml"), help="path to config which constructs model",) parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",) @@ -36,14 +35,14 @@ parser.add_argument("--always-batch-cond-uncond", action='store_true', help="dis parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.") parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast") parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site (doesn't work for me but you might have better luck)") -parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(model_path, 'Codeformer')) -parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(model_path, 'GFPGAN')) -parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(model_path, 'ESRGAN')) -parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(model_path, 'BSRGAN')) -parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(model_path, 'RealESRGAN')) -parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(model_path, 'ScuNET')) -parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(model_path, 'SwinIR')) -parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(model_path, 'LDSR')) +parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer')) +parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN')) +parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN')) +parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN')) +parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN')) +parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET')) +parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR')) +parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR')) parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.") parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization") parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") From 070b7d60cf5dac6387b3bfc8f3b3977b620e4fd5 Mon Sep 17 00:00:00 2001 From: Milly Date: Wed, 5 Oct 2022 02:13:09 +0900 Subject: [PATCH 118/460] Added styles to Processed So `[styles]` pattern can use in saving image UI. --- modules/images.py | 7 +------ modules/processing.py | 2 ++ 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/modules/images.py b/modules/images.py index 810f1446e..fa0714fd1 100644 --- a/modules/images.py +++ b/modules/images.py @@ -292,12 +292,7 @@ def apply_filename_pattern(x, p, seed, prompt): x = x.replace("[cfg]", str(p.cfg_scale)) x = x.replace("[width]", str(p.width)) x = x.replace("[height]", str(p.height)) - - #currently disabled if using the save button, will work otherwise - # if enabled it will cause a bug because styles is not included in the save_files data dictionary - if hasattr(p, "styles"): - x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"]) or "None", replace_spaces=False)) - + x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"]) or "None", replace_spaces=False)) x = x.replace("[sampler]", sanitize_filename_part(sd_samplers.samplers[p.sampler_index].name, replace_spaces=False)) x = x.replace("[model_hash]", shared.sd_model.sd_model_hash) diff --git a/modules/processing.py b/modules/processing.py index 8faf90956..706dbfa87 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -121,6 +121,7 @@ class Processed: self.denoising_strength = getattr(p, 'denoising_strength', None) self.extra_generation_params = p.extra_generation_params self.index_of_first_image = index_of_first_image + self.styles = p.styles self.eta = p.eta self.ddim_discretize = p.ddim_discretize @@ -165,6 +166,7 @@ class Processed: "extra_generation_params": self.extra_generation_params, "index_of_first_image": self.index_of_first_image, "infotexts": self.infotexts, + "styles": self.styles, } return json.dumps(obj) From 1cc36d170ac15e7f04208df32db27af1b10c867c Mon Sep 17 00:00:00 2001 From: Milly Date: Wed, 5 Oct 2022 02:17:15 +0900 Subject: [PATCH 119/460] Added job_timestamp to Processed So `[job_timestamp]` pattern can use in saving image UI. --- modules/images.py | 2 +- modules/processing.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index fa0714fd1..669d76af6 100644 --- a/modules/images.py +++ b/modules/images.py @@ -298,7 +298,7 @@ def apply_filename_pattern(x, p, seed, prompt): x = x.replace("[model_hash]", shared.sd_model.sd_model_hash) x = x.replace("[date]", datetime.date.today().isoformat()) x = x.replace("[datetime]", datetime.datetime.now().strftime("%Y%m%d%H%M%S")) - x = x.replace("[job_timestamp]", shared.state.job_timestamp) + x = x.replace("[job_timestamp]", getattr(p, "job_timestamp", shared.state.job_timestamp)) # Apply [prompt] at last. Because it may contain any replacement word.^M if prompt is not None: diff --git a/modules/processing.py b/modules/processing.py index 706dbfa87..f773a30ef 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -122,6 +122,7 @@ class Processed: self.extra_generation_params = p.extra_generation_params self.index_of_first_image = index_of_first_image self.styles = p.styles + self.job_timestamp = state.job_timestamp self.eta = p.eta self.ddim_discretize = p.ddim_discretize @@ -167,6 +168,7 @@ class Processed: "index_of_first_image": self.index_of_first_image, "infotexts": self.infotexts, "styles": self.styles, + "job_timestamp": self.job_timestamp, } return json.dumps(obj) From 405c8171d1acbb994084d98770bbcb97d01d9406 Mon Sep 17 00:00:00 2001 From: Milly Date: Thu, 6 Oct 2022 00:59:04 +0900 Subject: [PATCH 120/460] Prefer using `Processed.sd_model_hash` attribute when filename pattern --- modules/images.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/images.py b/modules/images.py index 669d76af6..29c5ee249 100644 --- a/modules/images.py +++ b/modules/images.py @@ -295,7 +295,7 @@ def apply_filename_pattern(x, p, seed, prompt): x = x.replace("[styles]", sanitize_filename_part(", ".join([x for x in p.styles if not x == "None"]) or "None", replace_spaces=False)) x = x.replace("[sampler]", sanitize_filename_part(sd_samplers.samplers[p.sampler_index].name, replace_spaces=False)) - x = x.replace("[model_hash]", shared.sd_model.sd_model_hash) + x = x.replace("[model_hash]", getattr(p, "sd_model_hash", shared.sd_model.sd_model_hash)) x = x.replace("[date]", datetime.date.today().isoformat()) x = x.replace("[datetime]", datetime.datetime.now().strftime("%Y%m%d%H%M%S")) x = x.replace("[job_timestamp]", getattr(p, "job_timestamp", shared.state.job_timestamp)) From b34b25b4c941819d34f29be6c4c1ec01e64585b4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 6 Oct 2022 23:27:01 +0300 Subject: [PATCH 121/460] karras samplers for img2img? --- modules/sd_samplers.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 497df9430..df17e93ca 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -338,9 +338,11 @@ class KDiffusionSampler: steps, t_enc = setup_img2img_steps(p, steps) if p.sampler_noise_scheduler_override: - sigmas = p.sampler_noise_scheduler_override(steps) + sigmas = p.sampler_noise_scheduler_override(steps) + elif self.config is not None and self.config.options.get('scheduler', None) == 'karras': + sigmas = k_diffusion.sampling.get_sigmas_karras(n=steps, sigma_min=0.1, sigma_max=10, device=shared.device) else: - sigmas = self.model_wrap.get_sigmas(steps) + sigmas = self.model_wrap.get_sigmas(steps) noise = noise * sigmas[steps - t_enc - 1] xi = x + noise From 2995107fa24cfd72b0a991e18271dcde148c2807 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Thu, 6 Oct 2022 23:44:54 +0300 Subject: [PATCH 122/460] added ctrl+up or ctrl+down hotkeys for attention --- README.md | 4 ++++ javascript/edit-attention.js | 41 ++++++++++++++++++++++++++++++++++++ 2 files changed, 45 insertions(+) create mode 100644 javascript/edit-attention.js diff --git a/README.md b/README.md index ec3d7532d..a14a63306 100644 --- a/README.md +++ b/README.md @@ -16,6 +16,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web - Attention, specify parts of text that the model should pay more attention to - a man in a ((tuxedo)) - will pay more attention to tuxedo - a man in a (tuxedo:1.21) - alternative syntax + - select text and press ctrl+up or ctrl+down to aduotmatically adjust attention to selected text - Loopback, run img2img processing multiple times - X/Y plot, a way to draw a 2 dimensional plot of images with different parameters - Textual Inversion @@ -61,6 +62,9 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web - Reloading checkpoints on the fly - Checkpoint Merger, a tab that allows you to merge two checkpoints into one - [Custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Custom-Scripts) with many extensions from community +- [Composable-Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/), a way to use multiple prompts at once + - separate prompts using uppercase `AND` + - also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2` ## Installation and Running Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs. diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js new file mode 100644 index 000000000..c67ed5794 --- /dev/null +++ b/javascript/edit-attention.js @@ -0,0 +1,41 @@ +addEventListener('keydown', (event) => { + let target = event.originalTarget; + if (!target.hasAttribute("placeholder")) return; + if (!target.placeholder.toLowerCase().includes("prompt")) return; + + let plus = "ArrowUp" + let minus = "ArrowDown" + if (event.key != plus && event.key != minus) return; + + selectionStart = target.selectionStart; + selectionEnd = target.selectionEnd; + if(selectionStart == selectionEnd) return; + + event.preventDefault(); + + if (selectionStart == 0 || target.value[selectionStart - 1] != "(") { + target.value = target.value.slice(0, selectionStart) + + "(" + target.value.slice(selectionStart, selectionEnd) + ":1.0)" + + target.value.slice(selectionEnd); + + target.focus(); + target.selectionStart = selectionStart + 1; + target.selectionEnd = selectionEnd + 1; + + } else { + end = target.value.slice(selectionEnd + 1).indexOf(")") + 1; + weight = parseFloat(target.value.slice(selectionEnd + 1, selectionEnd + 1 + end)); + if (event.key == minus) weight -= 0.1; + if (event.key == plus) weight += 0.1; + + weight = parseFloat(weight.toPrecision(12)); + + target.value = target.value.slice(0, selectionEnd + 1) + + weight + + target.value.slice(selectionEnd + 1 + end - 1); + + target.focus(); + target.selectionStart = selectionStart; + target.selectionEnd = selectionEnd; + } +}); From f174fb29228a04955fb951b32b0bab79e33ec2b8 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Fri, 7 Oct 2022 05:21:49 +0300 Subject: [PATCH 123/460] add xformers attention --- modules/sd_hijack_optimizations.py | 39 +++++++++++++++++++++++++++++- 1 file changed, 38 insertions(+), 1 deletion(-) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index ea4cfdfcd..da1b76e1c 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -1,7 +1,9 @@ import math import torch from torch import einsum - +import xformers.ops +import functorch +xformers._is_functorch_available=True from ldm.util import default from einops import rearrange @@ -92,6 +94,41 @@ def split_cross_attention_forward(self, x, context=None, mask=None): return self.to_out(r2) +def _maybe_init(self, x): + """ + Initialize the attention operator, if required We expect the head dimension to be exposed here, meaning that x + : B, Head, Length + """ + if self.attention_op is not None: + return + _, M, K = x.shape + try: + self.attention_op = xformers.ops.AttentionOpDispatch( + dtype=x.dtype, + device=x.device, + k=K, + attn_bias_type=type(None), + has_dropout=False, + kv_len=M, + q_len=M, + ).op + except NotImplementedError as err: + raise NotImplementedError(f"Please install xformers with the flash attention / cutlass components.\n{err}") + +def xformers_attention_forward(self, x, context=None, mask=None): + h = self.heads + q_in = self.to_q(x) + context = default(context, x) + k_in = self.to_k(context) + v_in = self.to_v(context) + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) + del q_in, k_in, v_in + self._maybe_init(q) + out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op) + + out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + return self.to_out(out) + def cross_attention_attnblock_forward(self, x): h_ = x h_ = self.norm(h_) From 2eb911b056ce6ff4434f673366782ed34f2b2f12 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Fri, 7 Oct 2022 05:22:28 +0300 Subject: [PATCH 124/460] Update sd_hijack.py --- modules/sd_hijack.py | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index a6fa890c4..6221ed5ac 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -20,12 +20,17 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At def apply_optimizations(): - ldm.modules.diffusionmodules.model.nonlinearity = silu - if cmd_opts.opt_split_attention_v1: ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 - elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()): - ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward + if cmd_opts.opt_split_attention: + ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward + ldm.modules.diffusionmodules.model.nonlinearity = sd_hijack_optimizations.nonlinearity_hijack + ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward + elif not cmd_opts.disable_opt_xformers_attention: + ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward + ldm.modules.attention.CrossAttention._maybe_init = sd_hijack_optimizations._maybe_init + ldm.modules.attention.CrossAttention.attention_op = None + ldm.modules.diffusionmodules.model.nonlinearity = sd_hijack_optimizations.nonlinearity_hijack ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward From da4ab2707b4cb0611cf181ba248a271d1937433e Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Fri, 7 Oct 2022 05:23:06 +0300 Subject: [PATCH 125/460] Update shared.py --- modules/shared.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/shared.py b/modules/shared.py index 25bb6e6c9..8cc3b2fe2 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -43,6 +43,7 @@ parser.add_argument("--realesrgan-models-path", type=str, help="Path to director parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET')) parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR')) parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR')) +parser.add_argument("--disable-opt-xformers-attention", action='store_true', help="force-disables xformers attention optimization") parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.") parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization") parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") From cd8bb597c6bcb6c59b538b7a1ab8f2face764fc5 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Fri, 7 Oct 2022 05:23:25 +0300 Subject: [PATCH 126/460] Update requirements.txt --- requirements.txt | 2 ++ 1 file changed, 2 insertions(+) diff --git a/requirements.txt b/requirements.txt index 631fe616a..304a066a3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,3 +23,5 @@ resize-right torchdiffeq kornia lark +functorch +#xformers? From 35d6b231628d18d53d166c3a92fea1523e88d51e Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Fri, 7 Oct 2022 05:31:53 +0300 Subject: [PATCH 127/460] Update sd_hijack.py --- modules/sd_hijack.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 6221ed5ac..a006c0a3b 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -20,17 +20,16 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At def apply_optimizations(): + ldm.modules.diffusionmodules.model.nonlinearity = silu if cmd_opts.opt_split_attention_v1: ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 if cmd_opts.opt_split_attention: ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward - ldm.modules.diffusionmodules.model.nonlinearity = sd_hijack_optimizations.nonlinearity_hijack ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward elif not cmd_opts.disable_opt_xformers_attention: ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward ldm.modules.attention.CrossAttention._maybe_init = sd_hijack_optimizations._maybe_init ldm.modules.attention.CrossAttention.attention_op = None - ldm.modules.diffusionmodules.model.nonlinearity = sd_hijack_optimizations.nonlinearity_hijack ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward From 5303df24282ba06abb34a423f2967354d37d078e Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Fri, 7 Oct 2022 06:01:14 +0300 Subject: [PATCH 128/460] Update sd_hijack.py --- modules/sd_hijack.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index a006c0a3b..ddacb0ad8 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -23,10 +23,10 @@ def apply_optimizations(): ldm.modules.diffusionmodules.model.nonlinearity = silu if cmd_opts.opt_split_attention_v1: ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 - if cmd_opts.opt_split_attention: + elif cmd_opts.opt_split_attention: ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward - elif not cmd_opts.disable_opt_xformers_attention: + elif not cmd_opts.disable_opt_xformers_attention and not cmd_opts.opt_split_attention: ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward ldm.modules.attention.CrossAttention._maybe_init = sd_hijack_optimizations._maybe_init ldm.modules.attention.CrossAttention.attention_op = None From 5e3ff846c56dc8e1d5c76ea04a8f2f74d7da07fc Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Fri, 7 Oct 2022 06:38:01 +0300 Subject: [PATCH 129/460] Update sd_hijack.py --- modules/sd_hijack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index ddacb0ad8..cbdb9d3c7 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -26,7 +26,7 @@ def apply_optimizations(): elif cmd_opts.opt_split_attention: ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward - elif not cmd_opts.disable_opt_xformers_attention and not cmd_opts.opt_split_attention: + elif not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip): ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward ldm.modules.attention.CrossAttention._maybe_init = sd_hijack_optimizations._maybe_init ldm.modules.attention.CrossAttention.attention_op = None From bad7cb29cecac51c5c0f39afec332b007ed73133 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 7 Oct 2022 10:17:52 +0300 Subject: [PATCH 130/460] added support for hypernetworks (???) --- modules/hypernetwork.py | 55 ++++++++++++++++++++++++++++++ modules/sd_hijack_optimizations.py | 17 +++++++-- modules/shared.py | 9 ++++- scripts/xy_grid.py | 10 ++++++ 4 files changed, 88 insertions(+), 3 deletions(-) create mode 100644 modules/hypernetwork.py diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py new file mode 100644 index 000000000..9ed1eed9b --- /dev/null +++ b/modules/hypernetwork.py @@ -0,0 +1,55 @@ +import glob +import os +import torch +from modules import devices + + +class HypernetworkModule(torch.nn.Module): + def __init__(self, dim, state_dict): + super().__init__() + + self.linear1 = torch.nn.Linear(dim, dim * 2) + self.linear2 = torch.nn.Linear(dim * 2, dim) + + self.load_state_dict(state_dict, strict=True) + self.to(devices.device) + + def forward(self, x): + return x + (self.linear2(self.linear1(x))) + + +class Hypernetwork: + filename = None + name = None + + def __init__(self, filename): + self.filename = filename + self.name = os.path.splitext(os.path.basename(filename))[0] + self.layers = {} + + state_dict = torch.load(filename, map_location='cpu') + for size, sd in state_dict.items(): + self.layers[size] = (HypernetworkModule(size, sd[0]), HypernetworkModule(size, sd[1])) + + +def load_hypernetworks(path): + res = {} + + for filename in glob.iglob(path + '**/*.pt', recursive=True): + hn = Hypernetwork(filename) + res[hn.name] = hn + + return res + +def apply(self, x, context=None, mask=None, original=None): + + + if CrossAttention.hypernetwork is not None and context.shape[2] in CrossAttention.hypernetwork: + if context.shape[1] == 77 and CrossAttention.noise_cond: + context = context + (torch.randn_like(context) * 0.1) + h_k, h_v = CrossAttention.hypernetwork[context.shape[2]] + k = self.to_k(h_k(context)) + v = self.to_v(h_v(context)) + else: + k = self.to_k(context) + v = self.to_v(context) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index ea4cfdfcd..d9cca4851 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -5,6 +5,8 @@ from torch import einsum from ldm.util import default from einops import rearrange +from modules import shared + # see https://github.com/basujindal/stable-diffusion/pull/117 for discussion def split_cross_attention_forward_v1(self, x, context=None, mask=None): @@ -42,8 +44,19 @@ def split_cross_attention_forward(self, x, context=None, mask=None): q_in = self.to_q(x) context = default(context, x) - k_in = self.to_k(context) * self.scale - v_in = self.to_v(context) + + hypernetwork = shared.selected_hypernetwork() + hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None) + + if hypernetwork_layers is not None: + k_in = self.to_k(hypernetwork_layers[0](context)) + v_in = self.to_v(hypernetwork_layers[1](context)) + else: + k_in = self.to_k(context) + v_in = self.to_v(context) + + k_in *= self.scale + del context, x q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) diff --git a/modules/shared.py b/modules/shared.py index 25bb6e6c9..879d8424a 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -13,7 +13,7 @@ import modules.memmon import modules.sd_models import modules.styles import modules.devices as devices -from modules import sd_samplers +from modules import sd_samplers, hypernetwork from modules.paths import models_path, script_path, sd_path sd_model_file = os.path.join(script_path, 'model.ckpt') @@ -76,6 +76,12 @@ parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram config_filename = cmd_opts.ui_settings_file +hypernetworks = hypernetwork.load_hypernetworks(os.path.join(models_path, 'hypernetworks')) + + +def selected_hypernetwork(): + return hypernetworks.get(opts.sd_hypernetwork, None) + class State: interrupted = False @@ -206,6 +212,7 @@ options_templates.update(options_section(('system', "System"), { options_templates.update(options_section(('sd', "Stable Diffusion"), { "sd_model_checkpoint": OptionInfo(None, "Stable Diffusion checkpoint", gr.Dropdown, lambda: {"choices": modules.sd_models.checkpoint_tiles()}), + "sd_hypernetwork": OptionInfo("None", "Stable Diffusion finetune hypernetwork", gr.Dropdown, lambda: {"choices": ["None"] + [x for x in hypernetworks.keys()]}), "img2img_color_correction": OptionInfo(False, "Apply color correction to img2img results to match original colors."), "save_images_before_color_correction": OptionInfo(False, "Save a copy of image before applying color correction to img2img results"), "img2img_fix_steps": OptionInfo(False, "With img2img, do exactly the amount of steps the slider specifies (normally you'd do less with less denoising)."), diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 6344e612f..c0c364df8 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -77,6 +77,11 @@ def apply_checkpoint(p, x, xs): modules.sd_models.reload_model_weights(shared.sd_model, info) +def apply_hypernetwork(p, x, xs): + hn = shared.hypernetworks.get(x, None) + opts.data["sd_hypernetwork"] = hn.name if hn is not None else 'None' + + def format_value_add_label(p, opt, x): if type(x) == float: x = round(x, 8) @@ -122,6 +127,7 @@ axis_options = [ AxisOption("Prompt order", str_permutations, apply_order, format_value_join_list), AxisOption("Sampler", str, apply_sampler, format_value), AxisOption("Checkpoint name", str, apply_checkpoint, format_value), + AxisOption("Hypernetwork", str, apply_hypernetwork, format_value), AxisOption("Sigma Churn", float, apply_field("s_churn"), format_value_add_label), AxisOption("Sigma min", float, apply_field("s_tmin"), format_value_add_label), AxisOption("Sigma max", float, apply_field("s_tmax"), format_value_add_label), @@ -193,6 +199,8 @@ class Script(scripts.Script): modules.processing.fix_seed(p) p.batch_size = 1 + initial_hn = opts.sd_hypernetwork + def process_axis(opt, vals): if opt.label == 'Nothing': return [0] @@ -300,4 +308,6 @@ class Script(scripts.Script): # restore checkpoint in case it was changed by axes modules.sd_models.reload_model_weights(shared.sd_model) + opts.data["sd_hypernetwork"] = initial_hn + return processed From d15b3ec0013c10f02f0fb80e8448bac8872a151f Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 7 Oct 2022 10:40:22 +0300 Subject: [PATCH 131/460] support loading VAE --- modules/sd_models.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/modules/sd_models.py b/modules/sd_models.py index 5f9920647..8f794b479 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -134,6 +134,14 @@ def load_model_weights(model, checkpoint_file, sd_model_hash): devices.dtype = torch.float32 if shared.cmd_opts.no_half else torch.float16 + vae_file = os.path.splitext(checkpoint_file)[0] + ".vae.pt" + if os.path.exists(vae_file): + print(f"Loading VAE weights from: {vae_file}") + vae_ckpt = torch.load(vae_file, map_location="cpu") + vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"} + + model.first_stage_model.load_state_dict(vae_dict) + model.sd_model_hash = sd_model_hash model.sd_model_checkpint = checkpoint_file From 97bc0b9504572d2df80598d0b694703bcd626de6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 7 Oct 2022 13:22:50 +0300 Subject: [PATCH 132/460] do not stop working on failed hypernetwork load --- modules/hypernetwork.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py index 9ed1eed9b..c5cf4afa4 100644 --- a/modules/hypernetwork.py +++ b/modules/hypernetwork.py @@ -1,5 +1,8 @@ import glob import os +import sys +import traceback + import torch from modules import devices @@ -36,8 +39,12 @@ def load_hypernetworks(path): res = {} for filename in glob.iglob(path + '**/*.pt', recursive=True): - hn = Hypernetwork(filename) - res[hn.name] = hn + try: + hn = Hypernetwork(filename) + res[hn.name] = hn + except Exception: + print(f"Error loading hypernetwork {filename}", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) return res From f7c787eb7c295c27439f4fbdf78c26b8389560be Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 7 Oct 2022 16:39:51 +0300 Subject: [PATCH 133/460] make it possible to use hypernetworks without opt split attention --- modules/hypernetwork.py | 42 +++++++++++++++++++++++++++++++++-------- modules/sd_hijack.py | 6 ++++-- 2 files changed, 38 insertions(+), 10 deletions(-) diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py index c5cf4afa4..c7b866829 100644 --- a/modules/hypernetwork.py +++ b/modules/hypernetwork.py @@ -4,7 +4,12 @@ import sys import traceback import torch -from modules import devices + +from ldm.util import default +from modules import devices, shared +import torch +from torch import einsum +from einops import rearrange, repeat class HypernetworkModule(torch.nn.Module): @@ -48,15 +53,36 @@ def load_hypernetworks(path): return res -def apply(self, x, context=None, mask=None, original=None): +def attention_CrossAttention_forward(self, x, context=None, mask=None): + h = self.heads - if CrossAttention.hypernetwork is not None and context.shape[2] in CrossAttention.hypernetwork: - if context.shape[1] == 77 and CrossAttention.noise_cond: - context = context + (torch.randn_like(context) * 0.1) - h_k, h_v = CrossAttention.hypernetwork[context.shape[2]] - k = self.to_k(h_k(context)) - v = self.to_v(h_v(context)) + q = self.to_q(x) + context = default(context, x) + + hypernetwork = shared.selected_hypernetwork() + hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None) + + if hypernetwork_layers is not None: + k = self.to_k(hypernetwork_layers[0](context)) + v = self.to_v(hypernetwork_layers[1](context)) else: k = self.to_k(context) v = self.to_v(context) + + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + + sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + + if mask is not None: + mask = rearrange(mask, 'b ... -> b (...)') + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + sim.masked_fill_(~mask, max_neg_value) + + # attention, what we cannot get enough of + attn = sim.softmax(dim=-1) + + out = einsum('b i j, b j d -> b i d', attn, v) + out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + return self.to_out(out) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index a6fa890c4..d68f89cc2 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -8,7 +8,7 @@ from torch import einsum from torch.nn.functional import silu import modules.textual_inversion.textual_inversion -from modules import prompt_parser, devices, sd_hijack_optimizations, shared +from modules import prompt_parser, devices, sd_hijack_optimizations, shared, hypernetwork from modules.shared import opts, device, cmd_opts import ldm.modules.attention @@ -20,6 +20,8 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At def apply_optimizations(): + undo_optimizations() + ldm.modules.diffusionmodules.model.nonlinearity = silu if cmd_opts.opt_split_attention_v1: @@ -30,7 +32,7 @@ def apply_optimizations(): def undo_optimizations(): - ldm.modules.attention.CrossAttention.forward = attention_CrossAttention_forward + ldm.modules.attention.CrossAttention.forward = hypernetwork.attention_CrossAttention_forward ldm.modules.diffusionmodules.model.nonlinearity = diffusionmodules_model_nonlinearity ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward From 54fa613c8391e3973cca9d94cdf539061932508b Mon Sep 17 00:00:00 2001 From: Greendayle Date: Fri, 7 Oct 2022 20:37:43 +0200 Subject: [PATCH 134/460] loading tf only in interrogation process --- modules/deepbooru.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/deepbooru.py b/modules/deepbooru.py index fb5018a6c..79dc59bdf 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -1,12 +1,13 @@ import os.path from concurrent.futures import ProcessPoolExecutor -import numpy as np -import deepdanbooru as dd -import tensorflow as tf def _load_tf_and_return_tags(pil_image, threshold): + import deepdanbooru as dd + import tensorflow as tf + import numpy as np + this_folder = os.path.dirname(__file__) model_path = os.path.join(this_folder, '..', 'models', 'deepbooru', 'deepdanbooru-v3-20211112-sgd-e28') From fa2ea648db81f5723bb5d722f2fe0ebd7dfc319a Mon Sep 17 00:00:00 2001 From: Greendayle Date: Fri, 7 Oct 2022 20:46:38 +0200 Subject: [PATCH 135/460] even more powerfull fix --- modules/deepbooru.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 79dc59bdf..600943368 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -60,8 +60,13 @@ def _load_tf_and_return_tags(pil_image, threshold): return ', '.join(result_tags_out).replace('_', ' ').replace(':', ' ') +def subprocess_init_no_cuda(): + import os + os.environ["CUDA_VISIBLE_DEVICES"] = "-1" + + def get_deepbooru_tags(pil_image, threshold=0.5): - with ProcessPoolExecutor() as executor: - f = executor.submit(_load_tf_and_return_tags, pil_image, threshold) + with ProcessPoolExecutor(initializer=subprocess_init_no_cuda) as executor: + f = executor.submit(_load_tf_and_return_tags, pil_image, threshold, ) ret = f.result() # will rethrow any exceptions return ret \ No newline at end of file From 5f12e7efd92ad802742f96788b4be3249ad02829 Mon Sep 17 00:00:00 2001 From: Greendayle Date: Fri, 7 Oct 2022 20:58:30 +0200 Subject: [PATCH 136/460] linux test --- modules/deepbooru.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 600943368..781b22492 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -1,6 +1,6 @@ import os.path from concurrent.futures import ProcessPoolExecutor - +from multiprocessing import get_context def _load_tf_and_return_tags(pil_image, threshold): @@ -66,7 +66,8 @@ def subprocess_init_no_cuda(): def get_deepbooru_tags(pil_image, threshold=0.5): - with ProcessPoolExecutor(initializer=subprocess_init_no_cuda) as executor: + context = get_context('spawn') + with ProcessPoolExecutor(initializer=subprocess_init_no_cuda, mp_context=context) as executor: f = executor.submit(_load_tf_and_return_tags, pil_image, threshold, ) ret = f.result() # will rethrow any exceptions return ret \ No newline at end of file From 12c4d5c6b5bf9dd50d0601c36af4f99b65316d58 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Fri, 7 Oct 2022 23:22:22 +0300 Subject: [PATCH 137/460] hypernetwork training mk1 --- modules/hypernetwork.py | 88 ------ modules/hypernetwork/hypernetwork.py | 267 +++++++++++++++++++ modules/hypernetwork/ui.py | 43 +++ modules/sd_hijack.py | 4 +- modules/sd_hijack_optimizations.py | 3 +- modules/shared.py | 13 +- modules/textual_inversion/ui.py | 1 - modules/ui.py | 58 +++- scripts/xy_grid.py | 7 +- textual_inversion_templates/hypernetwork.txt | 27 ++ textual_inversion_templates/none.txt | 1 + webui.py | 9 + 12 files changed, 414 insertions(+), 107 deletions(-) delete mode 100644 modules/hypernetwork.py create mode 100644 modules/hypernetwork/hypernetwork.py create mode 100644 modules/hypernetwork/ui.py create mode 100644 textual_inversion_templates/hypernetwork.txt create mode 100644 textual_inversion_templates/none.txt diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py deleted file mode 100644 index c7b866829..000000000 --- a/modules/hypernetwork.py +++ /dev/null @@ -1,88 +0,0 @@ -import glob -import os -import sys -import traceback - -import torch - -from ldm.util import default -from modules import devices, shared -import torch -from torch import einsum -from einops import rearrange, repeat - - -class HypernetworkModule(torch.nn.Module): - def __init__(self, dim, state_dict): - super().__init__() - - self.linear1 = torch.nn.Linear(dim, dim * 2) - self.linear2 = torch.nn.Linear(dim * 2, dim) - - self.load_state_dict(state_dict, strict=True) - self.to(devices.device) - - def forward(self, x): - return x + (self.linear2(self.linear1(x))) - - -class Hypernetwork: - filename = None - name = None - - def __init__(self, filename): - self.filename = filename - self.name = os.path.splitext(os.path.basename(filename))[0] - self.layers = {} - - state_dict = torch.load(filename, map_location='cpu') - for size, sd in state_dict.items(): - self.layers[size] = (HypernetworkModule(size, sd[0]), HypernetworkModule(size, sd[1])) - - -def load_hypernetworks(path): - res = {} - - for filename in glob.iglob(path + '**/*.pt', recursive=True): - try: - hn = Hypernetwork(filename) - res[hn.name] = hn - except Exception: - print(f"Error loading hypernetwork {filename}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - - return res - - -def attention_CrossAttention_forward(self, x, context=None, mask=None): - h = self.heads - - q = self.to_q(x) - context = default(context, x) - - hypernetwork = shared.selected_hypernetwork() - hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None) - - if hypernetwork_layers is not None: - k = self.to_k(hypernetwork_layers[0](context)) - v = self.to_v(hypernetwork_layers[1](context)) - else: - k = self.to_k(context) - v = self.to_v(context) - - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) - - sim = einsum('b i d, b j d -> b i j', q, k) * self.scale - - if mask is not None: - mask = rearrange(mask, 'b ... -> b (...)') - max_neg_value = -torch.finfo(sim.dtype).max - mask = repeat(mask, 'b j -> (b h) () j', h=h) - sim.masked_fill_(~mask, max_neg_value) - - # attention, what we cannot get enough of - attn = sim.softmax(dim=-1) - - out = einsum('b i j, b j d -> b i d', attn, v) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) - return self.to_out(out) diff --git a/modules/hypernetwork/hypernetwork.py b/modules/hypernetwork/hypernetwork.py new file mode 100644 index 000000000..a3d6a47ef --- /dev/null +++ b/modules/hypernetwork/hypernetwork.py @@ -0,0 +1,267 @@ +import datetime +import glob +import html +import os +import sys +import traceback +import tqdm + +import torch + +from ldm.util import default +from modules import devices, shared, processing, sd_models +import torch +from torch import einsum +from einops import rearrange, repeat +import modules.textual_inversion.dataset + + +class HypernetworkModule(torch.nn.Module): + def __init__(self, dim, state_dict=None): + super().__init__() + + self.linear1 = torch.nn.Linear(dim, dim * 2) + self.linear2 = torch.nn.Linear(dim * 2, dim) + + if state_dict is not None: + self.load_state_dict(state_dict, strict=True) + else: + self.linear1.weight.data.fill_(0.0001) + self.linear1.bias.data.fill_(0.0001) + self.linear2.weight.data.fill_(0.0001) + self.linear2.bias.data.fill_(0.0001) + + self.to(devices.device) + + def forward(self, x): + return x + (self.linear2(self.linear1(x))) + + +class Hypernetwork: + filename = None + name = None + + def __init__(self, name=None): + self.filename = None + self.name = name + self.layers = {} + self.step = 0 + self.sd_checkpoint = None + self.sd_checkpoint_name = None + + for size in [320, 640, 768, 1280]: + self.layers[size] = (HypernetworkModule(size), HypernetworkModule(size)) + + def weights(self): + res = [] + + for k, layers in self.layers.items(): + for layer in layers: + layer.train() + res += [layer.linear1.weight, layer.linear1.bias, layer.linear2.weight, layer.linear2.bias] + + return res + + def save(self, filename): + state_dict = {} + + for k, v in self.layers.items(): + state_dict[k] = (v[0].state_dict(), v[1].state_dict()) + + state_dict['step'] = self.step + state_dict['name'] = self.name + state_dict['sd_checkpoint'] = self.sd_checkpoint + state_dict['sd_checkpoint_name'] = self.sd_checkpoint_name + + torch.save(state_dict, filename) + + def load(self, filename): + self.filename = filename + if self.name is None: + self.name = os.path.splitext(os.path.basename(filename))[0] + + state_dict = torch.load(filename, map_location='cpu') + + for size, sd in state_dict.items(): + if type(size) == int: + self.layers[size] = (HypernetworkModule(size, sd[0]), HypernetworkModule(size, sd[1])) + + self.name = state_dict.get('name', self.name) + self.step = state_dict.get('step', 0) + self.sd_checkpoint = state_dict.get('sd_checkpoint', None) + self.sd_checkpoint_name = state_dict.get('sd_checkpoint_name', None) + + +def load_hypernetworks(path): + res = {} + + for filename in glob.iglob(path + '**/*.pt', recursive=True): + try: + hn = Hypernetwork() + hn.load(filename) + res[hn.name] = hn + except Exception: + print(f"Error loading hypernetwork {filename}", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + + return res + + +def attention_CrossAttention_forward(self, x, context=None, mask=None): + h = self.heads + + q = self.to_q(x) + context = default(context, x) + + hypernetwork_layers = (shared.hypernetwork.layers if shared.hypernetwork is not None else {}).get(context.shape[2], None) + + if hypernetwork_layers is not None: + hypernetwork_k, hypernetwork_v = hypernetwork_layers + + self.hypernetwork_k = hypernetwork_k + self.hypernetwork_v = hypernetwork_v + + context_k = hypernetwork_k(context) + context_v = hypernetwork_v(context) + else: + context_k = context + context_v = context + + k = self.to_k(context_k) + v = self.to_v(context_v) + + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + + sim = einsum('b i d, b j d -> b i j', q, k) * self.scale + + if mask is not None: + mask = rearrange(mask, 'b ... -> b (...)') + max_neg_value = -torch.finfo(sim.dtype).max + mask = repeat(mask, 'b j -> (b h) () j', h=h) + sim.masked_fill_(~mask, max_neg_value) + + # attention, what we cannot get enough of + attn = sim.softmax(dim=-1) + + out = einsum('b i j, b j d -> b i d', attn, v) + out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + return self.to_out(out) + + +def train_hypernetwork(hypernetwork_name, learn_rate, data_root, log_directory, steps, create_image_every, save_hypernetwork_every, template_file, preview_image_prompt): + assert hypernetwork_name, 'embedding not selected' + + shared.hypernetwork = shared.hypernetworks[hypernetwork_name] + + shared.state.textinfo = "Initializing hypernetwork training..." + shared.state.job_count = steps + + filename = os.path.join(shared.cmd_opts.hypernetwork_dir, f'{hypernetwork_name}.pt') + + log_directory = os.path.join(log_directory, datetime.datetime.now().strftime("%Y-%m-%d"), hypernetwork_name) + + if save_hypernetwork_every > 0: + hypernetwork_dir = os.path.join(log_directory, "hypernetworks") + os.makedirs(hypernetwork_dir, exist_ok=True) + else: + hypernetwork_dir = None + + if create_image_every > 0: + images_dir = os.path.join(log_directory, "images") + os.makedirs(images_dir, exist_ok=True) + else: + images_dir = None + + cond_model = shared.sd_model.cond_stage_model + + shared.state.textinfo = f"Preparing dataset from {html.escape(data_root)}..." + with torch.autocast("cuda"): + ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, size=512, placeholder_token=hypernetwork_name, model=shared.sd_model, device=devices.device, template_file=template_file) + + hypernetwork = shared.hypernetworks[hypernetwork_name] + weights = hypernetwork.weights() + for weight in weights: + weight.requires_grad = True + + optimizer = torch.optim.AdamW(weights, lr=learn_rate) + + losses = torch.zeros((32,)) + + last_saved_file = "" + last_saved_image = "" + + ititial_step = hypernetwork.step or 0 + if ititial_step > steps: + return hypernetwork, filename + + pbar = tqdm.tqdm(enumerate(ds), total=steps-ititial_step) + for i, (x, text) in pbar: + hypernetwork.step = i + ititial_step + + if hypernetwork.step > steps: + break + + if shared.state.interrupted: + break + + with torch.autocast("cuda"): + c = cond_model([text]) + + x = x.to(devices.device) + loss = shared.sd_model(x.unsqueeze(0), c)[0] + del x + + losses[hypernetwork.step % losses.shape[0]] = loss.item() + + optimizer.zero_grad() + loss.backward() + optimizer.step() + + pbar.set_description(f"loss: {losses.mean():.7f}") + + if hypernetwork.step > 0 and hypernetwork_dir is not None and hypernetwork.step % save_hypernetwork_every == 0: + last_saved_file = os.path.join(hypernetwork_dir, f'{hypernetwork_name}-{hypernetwork.step}.pt') + hypernetwork.save(last_saved_file) + + if hypernetwork.step > 0 and images_dir is not None and hypernetwork.step % create_image_every == 0: + last_saved_image = os.path.join(images_dir, f'{hypernetwork_name}-{hypernetwork.step}.png') + + preview_text = text if preview_image_prompt == "" else preview_image_prompt + + p = processing.StableDiffusionProcessingTxt2Img( + sd_model=shared.sd_model, + prompt=preview_text, + steps=20, + do_not_save_grid=True, + do_not_save_samples=True, + ) + + processed = processing.process_images(p) + image = processed.images[0] + + shared.state.current_image = image + image.save(last_saved_image) + + last_saved_image += f", prompt: {preview_text}" + + shared.state.job_no = hypernetwork.step + + shared.state.textinfo = f""" +

+Loss: {losses.mean():.7f}
+Step: {hypernetwork.step}
+Last prompt: {html.escape(text)}
+Last saved embedding: {html.escape(last_saved_file)}
+Last saved image: {html.escape(last_saved_image)}
+

+""" + + checkpoint = sd_models.select_checkpoint() + + hypernetwork.sd_checkpoint = checkpoint.hash + hypernetwork.sd_checkpoint_name = checkpoint.model_name + hypernetwork.save(filename) + + return hypernetwork, filename + + diff --git a/modules/hypernetwork/ui.py b/modules/hypernetwork/ui.py new file mode 100644 index 000000000..525f978c5 --- /dev/null +++ b/modules/hypernetwork/ui.py @@ -0,0 +1,43 @@ +import html +import os + +import gradio as gr + +import modules.textual_inversion.textual_inversion +import modules.textual_inversion.preprocess +from modules import sd_hijack, shared + + +def create_hypernetwork(name): + fn = os.path.join(shared.cmd_opts.hypernetwork_dir, f"{name}.pt") + assert not os.path.exists(fn), f"file {fn} already exists" + + hypernetwork = modules.hypernetwork.hypernetwork.Hypernetwork(name=name) + hypernetwork.save(fn) + + shared.reload_hypernetworks() + shared.hypernetwork = shared.hypernetworks.get(shared.opts.sd_hypernetwork, None) + + return gr.Dropdown.update(choices=sorted([x for x in shared.hypernetworks.keys()])), f"Created: {fn}", "" + + +def train_hypernetwork(*args): + + initial_hypernetwork = shared.hypernetwork + + try: + sd_hijack.undo_optimizations() + + hypernetwork, filename = modules.hypernetwork.hypernetwork.train_hypernetwork(*args) + + res = f""" +Training {'interrupted' if shared.state.interrupted else 'finished'} at {hypernetwork.step} steps. +Hypernetwork saved to {html.escape(filename)} +""" + return res, "" + except Exception: + raise + finally: + shared.hypernetwork = initial_hypernetwork + sd_hijack.apply_optimizations() + diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index d68f89cc2..ec8c9d4b2 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -8,7 +8,7 @@ from torch import einsum from torch.nn.functional import silu import modules.textual_inversion.textual_inversion -from modules import prompt_parser, devices, sd_hijack_optimizations, shared, hypernetwork +from modules import prompt_parser, devices, sd_hijack_optimizations, shared from modules.shared import opts, device, cmd_opts import ldm.modules.attention @@ -32,6 +32,8 @@ def apply_optimizations(): def undo_optimizations(): + from modules.hypernetwork import hypernetwork + ldm.modules.attention.CrossAttention.forward = hypernetwork.attention_CrossAttention_forward ldm.modules.diffusionmodules.model.nonlinearity = diffusionmodules_model_nonlinearity ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index d9cca4851..3f32e0209 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -45,8 +45,7 @@ def split_cross_attention_forward(self, x, context=None, mask=None): q_in = self.to_q(x) context = default(context, x) - hypernetwork = shared.selected_hypernetwork() - hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None) + hypernetwork_layers = (shared.hypernetwork.layers if shared.hypernetwork is not None else {}).get(context.shape[2], None) if hypernetwork_layers is not None: k_in = self.to_k(hypernetwork_layers[0](context)) diff --git a/modules/shared.py b/modules/shared.py index 879d8424a..c5a893e8d 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -13,7 +13,7 @@ import modules.memmon import modules.sd_models import modules.styles import modules.devices as devices -from modules import sd_samplers, hypernetwork +from modules import sd_samplers from modules.paths import models_path, script_path, sd_path sd_model_file = os.path.join(script_path, 'model.ckpt') @@ -28,6 +28,7 @@ parser.add_argument("--no-half", action='store_true', help="do not switch the mo parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)") parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI") parser.add_argument("--embeddings-dir", type=str, default=os.path.join(script_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)") +parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory") parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui") parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage") parser.add_argument("--lowvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a lot of speed for very low VRM usage") @@ -76,11 +77,15 @@ parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram config_filename = cmd_opts.ui_settings_file -hypernetworks = hypernetwork.load_hypernetworks(os.path.join(models_path, 'hypernetworks')) + +def reload_hypernetworks(): + from modules.hypernetwork import hypernetwork + hypernetworks.clear() + hypernetworks.update(hypernetwork.load_hypernetworks(cmd_opts.hypernetwork_dir)) -def selected_hypernetwork(): - return hypernetworks.get(opts.sd_hypernetwork, None) +hypernetworks = {} +hypernetwork = None class State: diff --git a/modules/textual_inversion/ui.py b/modules/textual_inversion/ui.py index f19ac5e02..c57de1f94 100644 --- a/modules/textual_inversion/ui.py +++ b/modules/textual_inversion/ui.py @@ -22,7 +22,6 @@ def preprocess(*args): def train_embedding(*args): - try: sd_hijack.undo_optimizations() diff --git a/modules/ui.py b/modules/ui.py index 4f18126fb..051908c1c 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -37,6 +37,7 @@ import modules.generation_parameters_copypaste from modules import prompt_parser from modules.images import save_image import modules.textual_inversion.ui +import modules.hypernetwork.ui # this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI mimetypes.init() @@ -965,6 +966,18 @@ def create_ui(wrap_gradio_gpu_call): with gr.Column(): create_embedding = gr.Button(value="Create", variant='primary') + with gr.Group(): + gr.HTML(value="

Create a new hypernetwork

") + + new_hypernetwork_name = gr.Textbox(label="Name") + + with gr.Row(): + with gr.Column(scale=3): + gr.HTML(value="") + + with gr.Column(): + create_hypernetwork = gr.Button(value="Create", variant='primary') + with gr.Group(): gr.HTML(value="

Preprocess images

") @@ -986,6 +999,7 @@ def create_ui(wrap_gradio_gpu_call): with gr.Group(): gr.HTML(value="

Train an embedding; must specify a directory with a set of 512x512 images

") train_embedding_name = gr.Dropdown(label='Embedding', choices=sorted(sd_hijack.model_hijack.embedding_db.word_embeddings.keys())) + train_hypernetwork_name = gr.Dropdown(label='Hypernetwork', choices=[x for x in shared.hypernetworks.keys()]) learn_rate = gr.Number(label='Learning rate', value=5.0e-03) dataset_directory = gr.Textbox(label='Dataset directory', placeholder="Path to directory with input images") log_directory = gr.Textbox(label='Log directory', placeholder="Path to directory where to write outputs", value="textual_inversion") @@ -993,15 +1007,12 @@ def create_ui(wrap_gradio_gpu_call): steps = gr.Number(label='Max steps', value=100000, precision=0) create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0) save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0) + preview_image_prompt = gr.Textbox(label='Preview prompt', value="") with gr.Row(): - with gr.Column(scale=2): - gr.HTML(value="") - - with gr.Column(): - with gr.Row(): - interrupt_training = gr.Button(value="Interrupt") - train_embedding = gr.Button(value="Train", variant='primary') + interrupt_training = gr.Button(value="Interrupt") + train_hypernetwork = gr.Button(value="Train Hypernetwork", variant='primary') + train_embedding = gr.Button(value="Train Embedding", variant='primary') with gr.Column(): progressbar = gr.HTML(elem_id="ti_progressbar") @@ -1027,6 +1038,18 @@ def create_ui(wrap_gradio_gpu_call): ] ) + create_hypernetwork.click( + fn=modules.hypernetwork.ui.create_hypernetwork, + inputs=[ + new_hypernetwork_name, + ], + outputs=[ + train_hypernetwork_name, + ti_output, + ti_outcome, + ] + ) + run_preprocess.click( fn=wrap_gradio_gpu_call(modules.textual_inversion.ui.preprocess, extra_outputs=[gr.update()]), _js="start_training_textual_inversion", @@ -1062,12 +1085,33 @@ def create_ui(wrap_gradio_gpu_call): ] ) + train_hypernetwork.click( + fn=wrap_gradio_gpu_call(modules.hypernetwork.ui.train_hypernetwork, extra_outputs=[gr.update()]), + _js="start_training_textual_inversion", + inputs=[ + train_hypernetwork_name, + learn_rate, + dataset_directory, + log_directory, + steps, + create_image_every, + save_embedding_every, + template_file, + preview_image_prompt, + ], + outputs=[ + ti_output, + ti_outcome, + ] + ) + interrupt_training.click( fn=lambda: shared.state.interrupt(), inputs=[], outputs=[], ) + def create_setting_component(key): def fun(): return opts.data[key] if key in opts.data else opts.data_labels[key].default diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index c0c364df8..5b504de6b 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -78,8 +78,7 @@ def apply_checkpoint(p, x, xs): def apply_hypernetwork(p, x, xs): - hn = shared.hypernetworks.get(x, None) - opts.data["sd_hypernetwork"] = hn.name if hn is not None else 'None' + shared.hypernetwork = shared.hypernetworks.get(x, None) def format_value_add_label(p, opt, x): @@ -199,7 +198,7 @@ class Script(scripts.Script): modules.processing.fix_seed(p) p.batch_size = 1 - initial_hn = opts.sd_hypernetwork + initial_hn = shared.hypernetwork def process_axis(opt, vals): if opt.label == 'Nothing': @@ -308,6 +307,6 @@ class Script(scripts.Script): # restore checkpoint in case it was changed by axes modules.sd_models.reload_model_weights(shared.sd_model) - opts.data["sd_hypernetwork"] = initial_hn + shared.hypernetwork = initial_hn return processed diff --git a/textual_inversion_templates/hypernetwork.txt b/textual_inversion_templates/hypernetwork.txt new file mode 100644 index 000000000..91e068905 --- /dev/null +++ b/textual_inversion_templates/hypernetwork.txt @@ -0,0 +1,27 @@ +a photo of a [filewords] +a rendering of a [filewords] +a cropped photo of the [filewords] +the photo of a [filewords] +a photo of a clean [filewords] +a photo of a dirty [filewords] +a dark photo of the [filewords] +a photo of my [filewords] +a photo of the cool [filewords] +a close-up photo of a [filewords] +a bright photo of the [filewords] +a cropped photo of a [filewords] +a photo of the [filewords] +a good photo of the [filewords] +a photo of one [filewords] +a close-up photo of the [filewords] +a rendition of the [filewords] +a photo of the clean [filewords] +a rendition of a [filewords] +a photo of a nice [filewords] +a good photo of a [filewords] +a photo of the nice [filewords] +a photo of the small [filewords] +a photo of the weird [filewords] +a photo of the large [filewords] +a photo of a cool [filewords] +a photo of a small [filewords] diff --git a/textual_inversion_templates/none.txt b/textual_inversion_templates/none.txt new file mode 100644 index 000000000..f77af4612 --- /dev/null +++ b/textual_inversion_templates/none.txt @@ -0,0 +1 @@ +picture diff --git a/webui.py b/webui.py index 480360fe0..60f9061f9 100644 --- a/webui.py +++ b/webui.py @@ -74,6 +74,15 @@ def wrap_gradio_gpu_call(func, extra_outputs=None): return modules.ui.wrap_gradio_call(f, extra_outputs=extra_outputs) +def set_hypernetwork(): + shared.hypernetwork = shared.hypernetworks.get(shared.opts.sd_hypernetwork, None) + + +shared.reload_hypernetworks() +shared.opts.onchange("sd_hypernetwork", set_hypernetwork) +set_hypernetwork() + + modules.scripts.load_scripts(os.path.join(script_path, "scripts")) shared.sd_model = modules.sd_models.load_model() From c9cc65b201679ea43c763b0d85e749d40bbc5433 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 04:09:18 +0300 Subject: [PATCH 138/460] switch to the proper way of calling xformers --- modules/sd_hijack_optimizations.py | 28 +++------------------------- 1 file changed, 3 insertions(+), 25 deletions(-) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index da1b76e1c..7fb4a45e3 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -94,39 +94,17 @@ def split_cross_attention_forward(self, x, context=None, mask=None): return self.to_out(r2) -def _maybe_init(self, x): - """ - Initialize the attention operator, if required We expect the head dimension to be exposed here, meaning that x - : B, Head, Length - """ - if self.attention_op is not None: - return - _, M, K = x.shape - try: - self.attention_op = xformers.ops.AttentionOpDispatch( - dtype=x.dtype, - device=x.device, - k=K, - attn_bias_type=type(None), - has_dropout=False, - kv_len=M, - q_len=M, - ).op - except NotImplementedError as err: - raise NotImplementedError(f"Please install xformers with the flash attention / cutlass components.\n{err}") - def xformers_attention_forward(self, x, context=None, mask=None): h = self.heads q_in = self.to_q(x) context = default(context, x) k_in = self.to_k(context) v_in = self.to_v(context) - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in)) del q_in, k_in, v_in - self._maybe_init(q) - out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None, op=self.attention_op) + out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) - out = rearrange(out, '(b h) n d -> b n (h d)', h=h) + out = rearrange(out, 'b n h d -> b n (h d)', h=h) return self.to_out(out) def cross_attention_attnblock_forward(self, x): From b70eaeb2005a5a9593119e7fd32b8072c2a208d5 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 04:10:35 +0300 Subject: [PATCH 139/460] delete broken and unnecessary aliases --- modules/sd_hijack.py | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index cbdb9d3c7..0e99c3192 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -21,16 +21,14 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At def apply_optimizations(): ldm.modules.diffusionmodules.model.nonlinearity = silu - if cmd_opts.opt_split_attention_v1: + if not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip): + ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward + ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward + elif cmd_opts.opt_split_attention_v1: ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 elif cmd_opts.opt_split_attention: ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward - elif not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip): - ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward - ldm.modules.attention.CrossAttention._maybe_init = sd_hijack_optimizations._maybe_init - ldm.modules.attention.CrossAttention.attention_op = None - ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward def undo_optimizations(): From a958f9b3fdea95c01d360aba1b6fe0ce3ea6b349 Mon Sep 17 00:00:00 2001 From: Jairo Correa Date: Fri, 7 Oct 2022 20:05:47 -0300 Subject: [PATCH 140/460] edit-attention browser compatibility and readme typo --- README.md | 2 +- javascript/edit-attention.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index a14a63306..0516c2cd8 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web - Attention, specify parts of text that the model should pay more attention to - a man in a ((tuxedo)) - will pay more attention to tuxedo - a man in a (tuxedo:1.21) - alternative syntax - - select text and press ctrl+up or ctrl+down to aduotmatically adjust attention to selected text + - select text and press ctrl+up or ctrl+down to automatically adjust attention to selected text - Loopback, run img2img processing multiple times - X/Y plot, a way to draw a 2 dimensional plot of images with different parameters - Textual Inversion diff --git a/javascript/edit-attention.js b/javascript/edit-attention.js index c67ed5794..0280c603f 100644 --- a/javascript/edit-attention.js +++ b/javascript/edit-attention.js @@ -1,5 +1,5 @@ addEventListener('keydown', (event) => { - let target = event.originalTarget; + let target = event.originalTarget || event.composedPath()[0]; if (!target.hasAttribute("placeholder")) return; if (!target.placeholder.toLowerCase().includes("prompt")) return; From f2055cb1d4ce45d7aaacc49d8ab5bec7791a8f47 Mon Sep 17 00:00:00 2001 From: brkirch Date: Sat, 8 Oct 2022 01:47:02 -0400 Subject: [PATCH 141/460] Add hypernetwork support to split cross attention v1 * Add hypernetwork support to split_cross_attention_forward_v1 * Fix device check in esrgan_model.py to use devices.device_esrgan instead of shared.device --- modules/esrgan_model.py | 2 +- modules/sd_hijack_optimizations.py | 18 ++++++++++++++---- 2 files changed, 15 insertions(+), 5 deletions(-) diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index d17e730f9..285481242 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -111,7 +111,7 @@ class UpscalerESRGAN(Upscaler): print("Unable to load %s from %s" % (self.model_path, filename)) return None - pretrained_net = torch.load(filename, map_location='cpu' if shared.device.type == 'mps' else None) + pretrained_net = torch.load(filename, map_location='cpu' if devices.device_esrgan.type == 'mps' else None) crt_model = arch.RRDBNet(3, 3, 64, 23, gc=32) pretrained_net = fix_model_layers(crt_model, pretrained_net) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index d9cca4851..3351c7409 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -12,13 +12,22 @@ from modules import shared def split_cross_attention_forward_v1(self, x, context=None, mask=None): h = self.heads - q = self.to_q(x) + q_in = self.to_q(x) context = default(context, x) - k = self.to_k(context) - v = self.to_v(context) + + hypernetwork = shared.selected_hypernetwork() + hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None) + + if hypernetwork_layers is not None: + k_in = self.to_k(hypernetwork_layers[0](context)) + v_in = self.to_v(hypernetwork_layers[1](context)) + else: + k_in = self.to_k(context) + v_in = self.to_v(context) del context, x - q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q, k, v)) + q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> (b h) n d', h=h), (q_in, k_in, v_in)) + del q_in, k_in, v_in r1 = torch.zeros(q.shape[0], q.shape[1], v.shape[2], device=q.device) for i in range(0, q.shape[0], 2): @@ -31,6 +40,7 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None): r1[i:end] = einsum('b i j, b j d -> b i d', s2, v[i:end]) del s2 + del q, k, v r2 = rearrange(r1, '(b h) n d -> b n (h d)', h=h) del r1 From e21e4732531299ef4895baccdb7a6493a3886924 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 8 Oct 2022 05:34:17 +0100 Subject: [PATCH 142/460] Context Menus --- javascript/contextMenus.js | 165 +++++++++++++++++++++++++++++++++++++ 1 file changed, 165 insertions(+) create mode 100644 javascript/contextMenus.js diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js new file mode 100644 index 000000000..99d1d3f7d --- /dev/null +++ b/javascript/contextMenus.js @@ -0,0 +1,165 @@ + +contextMenuInit = function(){ + let eventListenerApplied=false; + let menuSpecs = new Map(); + + const uid = function(){ + return Date.now().toString(36) + Math.random().toString(36).substr(2); + } + + function showContextMenu(event,element,menuEntries){ + let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft; + let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop; + + let oldMenu = gradioApp().querySelector('#context-menu') + if(oldMenu){ + oldMenu.remove() + } + + let tabButton = gradioApp().querySelector('button') + let baseStyle = window.getComputedStyle(tabButton) + + const contextMenu = document.createElement('nav') + contextMenu.id = "context-menu" + contextMenu.style.background = baseStyle.background + contextMenu.style.color = baseStyle.color + contextMenu.style.fontFamily = baseStyle.fontFamily + contextMenu.style.top = posy+'px' + contextMenu.style.left = posx+'px' + + + + const contextMenuList = document.createElement('ul') + contextMenuList.className = 'context-menu-items'; + contextMenu.append(contextMenuList); + + menuEntries.forEach(function(entry){ + let contextMenuEntry = document.createElement('a') + contextMenuEntry.innerHTML = entry['name'] + contextMenuEntry.addEventListener("click", function(e) { + entry['func'](); + }) + contextMenuList.append(contextMenuEntry); + + }) + + gradioApp().getRootNode().appendChild(contextMenu) + + let menuWidth = contextMenu.offsetWidth + 4; + let menuHeight = contextMenu.offsetHeight + 4; + + let windowWidth = window.innerWidth; + let windowHeight = window.innerHeight; + + if ( (windowWidth - posx) < menuWidth ) { + contextMenu.style.left = windowWidth - menuWidth + "px"; + } + + if ( (windowHeight - posy) < menuHeight ) { + contextMenu.style.top = windowHeight - menuHeight + "px"; + } + + } + + function appendContextMenuOption(targetEmementSelector,entryName,entryFunction){ + + currentItems = menuSpecs.get(targetEmementSelector) + + if(!currentItems){ + currentItems = [] + menuSpecs.set(targetEmementSelector,currentItems); + } + let newItem = {'id':targetEmementSelector+'_'+uid(), + 'name':entryName, + 'func':entryFunction, + 'isNew':true} + + currentItems.push(newItem) + return newItem['id'] + } + + function removeContextMenuOption(uid){ + + } + + function addContextMenuEventListener(){ + if(eventListenerApplied){ + return; + } + gradioApp().addEventListener("click", function(e) { + let source = e.composedPath()[0] + if(source.id && source.indexOf('check_progress')>-1){ + return + } + + let oldMenu = gradioApp().querySelector('#context-menu') + if(oldMenu){ + oldMenu.remove() + } + }); + gradioApp().addEventListener("contextmenu", function(e) { + let oldMenu = gradioApp().querySelector('#context-menu') + if(oldMenu){ + oldMenu.remove() + } + menuSpecs.forEach(function(v,k) { + if(e.composedPath()[0].matches(k)){ + showContextMenu(e,e.composedPath()[0],v) + e.preventDefault() + return + } + }) + }); + eventListenerApplied=true + + } + + return [appendContextMenuOption, removeContextMenuOption, addContextMenuEventListener] +} + +initResponse = contextMenuInit() +appendContextMenuOption = initResponse[0] +removeContextMenuOption = initResponse[1] +addContextMenuEventListener = initResponse[2] + + +//Start example Context Menu Items +generateOnRepeatId = appendContextMenuOption('#txt2img_generate','Generate forever',function(){ + let genbutton = gradioApp().querySelector('#txt2img_generate'); + let interruptbutton = gradioApp().querySelector('#txt2img_interrupt'); + if(!interruptbutton.offsetParent){ + genbutton.click(); + } + clearInterval(window.generateOnRepeatInterval) + window.generateOnRepeatInterval = setInterval(function(){ + if(!interruptbutton.offsetParent){ + genbutton.click(); + } + }, + 500)} +) + +cancelGenerateForever = function(){ + clearInterval(window.generateOnRepeatInterval) + let interruptbutton = gradioApp().querySelector('#txt2img_interrupt'); + if(interruptbutton.offsetParent){ + interruptbutton.click(); + } +} + +appendContextMenuOption('#txt2img_interrupt','Cancel generate forever',cancelGenerateForever) +appendContextMenuOption('#txt2img_generate','Cancel generate forever',cancelGenerateForever) + +appendContextMenuOption('#roll','Roll three', + function(){ + let rollbutton = gradioApp().querySelector('#roll'); + setTimeout(function(){rollbutton.click()},100) + setTimeout(function(){rollbutton.click()},200) + setTimeout(function(){rollbutton.click()},300) + } +) +//End example Context Menu Items + +onUiUpdate(function(){ + addContextMenuEventListener() +}); \ No newline at end of file From 83749bfc72923b946abb825ebf4fdcc8b6035c8e Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 8 Oct 2022 05:35:03 +0100 Subject: [PATCH 143/460] context menu styling --- style.css | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/style.css b/style.css index da0729a25..50c5e557c 100644 --- a/style.css +++ b/style.css @@ -410,4 +410,31 @@ input[type="range"]{ #img2img_image div.h-60{ height: 480px; -} \ No newline at end of file +} + +#context-menu{ + z-index:9999; + position:absolute; + display:block; + padding:0px 0; + border:2px solid #a55000; + border-radius:8px; + box-shadow:1px 1px 2px #CE6400; + width: 200px; +} + +.context-menu-items{ + list-style: none; + margin: 0; + padding: 0; +} + +.context-menu-items a{ + display:block; + padding:5px; + cursor:pointer; +} + +.context-menu-items a:hover{ + background: #a55000; +} From 21679435e531e729a4aea494e6cb9b7152ecdf75 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 8 Oct 2022 05:46:42 +0100 Subject: [PATCH 144/460] implement removal --- javascript/contextMenus.js | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/javascript/contextMenus.js b/javascript/contextMenus.js index 99d1d3f7d..2d82269fc 100644 --- a/javascript/contextMenus.js +++ b/javascript/contextMenus.js @@ -79,7 +79,13 @@ contextMenuInit = function(){ } function removeContextMenuOption(uid){ - + menuSpecs.forEach(function(v,k) { + let index = -1 + v.forEach(function(e,ei){if(e['id']==uid){index=ei}}) + if(index>=0){ + v.splice(index, 1); + } + }) } function addContextMenuEventListener(){ @@ -148,7 +154,8 @@ cancelGenerateForever = function(){ } appendContextMenuOption('#txt2img_interrupt','Cancel generate forever',cancelGenerateForever) -appendContextMenuOption('#txt2img_generate','Cancel generate forever',cancelGenerateForever) +appendContextMenuOption('#txt2img_generate', 'Cancel generate forever',cancelGenerateForever) + appendContextMenuOption('#roll','Roll three', function(){ @@ -162,4 +169,4 @@ appendContextMenuOption('#roll','Roll three', onUiUpdate(function(){ addContextMenuEventListener() -}); \ No newline at end of file +}); From 87db6f01cc6b118fe0c82c36c6686d72d060c417 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 8 Oct 2022 10:15:29 +0300 Subject: [PATCH 145/460] add info about cross attention javascript shortcut code --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 0516c2cd8..d6e1d50bd 100644 --- a/README.md +++ b/README.md @@ -16,7 +16,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web - Attention, specify parts of text that the model should pay more attention to - a man in a ((tuxedo)) - will pay more attention to tuxedo - a man in a (tuxedo:1.21) - alternative syntax - - select text and press ctrl+up or ctrl+down to automatically adjust attention to selected text + - select text and press ctrl+up or ctrl+down to automatically adjust attention to selected text (code contributed by anonymous user) - Loopback, run img2img processing multiple times - X/Y plot, a way to draw a 2 dimensional plot of images with different parameters - Textual Inversion From 5d54f35c583bd5a3b0ee271a862827f1ca81ef09 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 11:55:02 +0300 Subject: [PATCH 146/460] add xformers attnblock and hypernetwork support --- modules/sd_hijack_optimizations.py | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 7fb4a45e3..c78d58382 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -98,8 +98,14 @@ def xformers_attention_forward(self, x, context=None, mask=None): h = self.heads q_in = self.to_q(x) context = default(context, x) - k_in = self.to_k(context) - v_in = self.to_v(context) + hypernetwork = shared.selected_hypernetwork() + hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None) + if hypernetwork_layers is not None: + k_in = self.to_k(hypernetwork_layers[0](context)) + v_in = self.to_v(hypernetwork_layers[1](context)) + else: + k_in = self.to_k(context) + v_in = self.to_v(context) q, k, v = map(lambda t: rearrange(t, 'b n (h d) -> b n h d', h=h), (q_in, k_in, v_in)) del q_in, k_in, v_in out = xformers.ops.memory_efficient_attention(q, k, v, attn_bias=None) @@ -169,3 +175,13 @@ def cross_attention_attnblock_forward(self, x): h3 += x return h3 + + def xformers_attnblock_forward(self, x): + h_ = x + h_ = self.norm(h_) + q1 = self.q(h_).contiguous() + k1 = self.k(h_).contiguous() + v = self.v(h_).contiguous() + out = xformers.ops.memory_efficient_attention(q1, k1, v) + out = self.proj_out(out) + return x+out From 76a616fa6b814c681eaf6edc87eb3001b8c2b6be Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 11:55:38 +0300 Subject: [PATCH 147/460] Update sd_hijack_optimizations.py --- modules/sd_hijack_optimizations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index c78d58382..ee58c7e4e 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -176,7 +176,7 @@ def cross_attention_attnblock_forward(self, x): return h3 - def xformers_attnblock_forward(self, x): +def xformers_attnblock_forward(self, x): h_ = x h_ = self.norm(h_) q1 = self.q(h_).contiguous() From 91d66f5520df416db718103d460550ad495e952d Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 11:56:01 +0300 Subject: [PATCH 148/460] use new attnblock for xformers path --- modules/sd_hijack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 0e99c3192..3da8c8ce2 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -23,7 +23,7 @@ def apply_optimizations(): ldm.modules.diffusionmodules.model.nonlinearity = silu if not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip): ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward - ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward + ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward elif cmd_opts.opt_split_attention_v1: ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 elif cmd_opts.opt_split_attention: From 616b7218f7c469d25c138634472017a7e18e742e Mon Sep 17 00:00:00 2001 From: leko Date: Fri, 7 Oct 2022 23:09:21 +0800 Subject: [PATCH 149/460] fix: handles when state_dict does not exist --- modules/sd_models.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 8f794b479..9409d0707 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -122,7 +122,11 @@ def load_model_weights(model, checkpoint_file, sd_model_hash): pl_sd = torch.load(checkpoint_file, map_location="cpu") if "global_step" in pl_sd: print(f"Global Step: {pl_sd['global_step']}") - sd = pl_sd["state_dict"] + + if "state_dict" in pl_sd: + sd = pl_sd["state_dict"] + else: + sd = pl_sd model.load_state_dict(sd, strict=False) From 706d5944a075a6523ea7f00165d630efc085ca22 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 8 Oct 2022 13:38:57 +0300 Subject: [PATCH 150/460] let user choose his own prompt token count limit --- modules/processing.py | 6 ++++++ modules/sd_hijack.py | 13 +++++++------ modules/shared.py | 5 +++-- 3 files changed, 16 insertions(+), 8 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index f773a30ef..d814d5acd 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -123,6 +123,7 @@ class Processed: self.index_of_first_image = index_of_first_image self.styles = p.styles self.job_timestamp = state.job_timestamp + self.max_prompt_tokens = opts.max_prompt_tokens self.eta = p.eta self.ddim_discretize = p.ddim_discretize @@ -141,6 +142,7 @@ class Processed: self.all_subseeds = all_subseeds or [self.subseed] self.infotexts = infotexts or [info] + def js(self): obj = { "prompt": self.prompt, @@ -169,6 +171,7 @@ class Processed: "infotexts": self.infotexts, "styles": self.styles, "job_timestamp": self.job_timestamp, + "max_prompt_tokens": self.max_prompt_tokens, } return json.dumps(obj) @@ -266,6 +269,8 @@ def fix_seed(p): def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0): index = position_in_batch + iteration * p.batch_size + max_tokens = getattr(p, 'max_prompt_tokens', opts.max_prompt_tokens) + generation_params = { "Steps": p.steps, "Sampler": sd_samplers.samplers[p.sampler_index].name, @@ -281,6 +286,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), "Denoising strength": getattr(p, 'denoising_strength', None), "Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta), + "Max tokens": (None if max_tokens == shared.vanilla_max_prompt_tokens else max_tokens) } generation_params.update(p.extra_generation_params) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index d68f89cc2..340329c0b 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -18,7 +18,6 @@ attention_CrossAttention_forward = ldm.modules.attention.CrossAttention.forward diffusionmodules_model_nonlinearity = ldm.modules.diffusionmodules.model.nonlinearity diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.AttnBlock.forward - def apply_optimizations(): undo_optimizations() @@ -83,7 +82,7 @@ class StableDiffusionModelHijack: layer.padding_mode = 'circular' if enable else 'zeros' def tokenize(self, text): - max_length = self.clip.max_length - 2 + max_length = opts.max_prompt_tokens - 2 _, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text]) return remade_batch_tokens[0], token_count, max_length @@ -94,7 +93,6 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): self.wrapped = wrapped self.hijack: StableDiffusionModelHijack = hijack self.tokenizer = wrapped.tokenizer - self.max_length = wrapped.max_length self.token_mults = {} tokens_with_parens = [(k, v) for k, v in self.tokenizer.get_vocab().items() if '(' in k or ')' in k or '[' in k or ']' in k] @@ -116,7 +114,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): def tokenize_line(self, line, used_custom_terms, hijack_comments): id_start = self.wrapped.tokenizer.bos_token_id id_end = self.wrapped.tokenizer.eos_token_id - maxlen = self.wrapped.max_length + maxlen = opts.max_prompt_tokens if opts.enable_emphasis: parsed = prompt_parser.parse_prompt_attention(line) @@ -191,7 +189,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): def process_text_old(self, text): id_start = self.wrapped.tokenizer.bos_token_id id_end = self.wrapped.tokenizer.eos_token_id - maxlen = self.wrapped.max_length + maxlen = self.wrapped.max_length # you get to stay at 77 used_custom_terms = [] remade_batch_tokens = [] overflowing_words = [] @@ -268,8 +266,11 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): if len(used_custom_terms) > 0: self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms])) + position_ids_array = [min(x, 75) for x in range(len(remade_batch_tokens[0])-1)] + [76] + position_ids = torch.asarray(position_ids_array, device=devices.device).expand((1, -1)) + tokens = torch.asarray(remade_batch_tokens).to(device) - outputs = self.wrapped.transformer(input_ids=tokens) + outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids) z = outputs.last_hidden_state # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise diff --git a/modules/shared.py b/modules/shared.py index 879d8424a..864e772cf 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -118,8 +118,8 @@ prompt_styles = modules.styles.StyleDatabase(styles_filename) interrogator = modules.interrogate.InterrogateModels("interrogate") face_restorers = [] -# This was moved to webui.py with the other model "setup" calls. -# modules.sd_models.list_models() + +vanilla_max_prompt_tokens = 77 def realesrgan_models_names(): @@ -221,6 +221,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "filter_nsfw": OptionInfo(False, "Filter NSFW content"), + "max_prompt_tokens": OptionInfo(vanilla_max_prompt_tokens, f"Max prompt token count. Two tokens are reserved for for start and end. Default is {vanilla_max_prompt_tokens}. Setting this to a different value will result in different pictures for same seed.", gr.Number, {"precision": 0}), "random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}), })) From 786d9f63aaa4515df82eb2cf357ea92f3dae1e29 Mon Sep 17 00:00:00 2001 From: Trung Ngo Date: Tue, 4 Oct 2022 22:56:30 -0500 Subject: [PATCH 151/460] Add button to skip the current iteration --- javascript/hints.js | 1 + javascript/progressbar.js | 20 ++++++++++++++------ modules/img2img.py | 4 ++++ modules/processing.py | 4 ++++ modules/shared.py | 5 +++++ modules/ui.py | 8 ++++++++ style.css | 14 ++++++++++++-- webui.py | 1 + 8 files changed, 49 insertions(+), 8 deletions(-) diff --git a/javascript/hints.js b/javascript/hints.js index 8adcd983e..8e352e94a 100644 --- a/javascript/hints.js +++ b/javascript/hints.js @@ -35,6 +35,7 @@ titles = { "Denoising strength": "Determines how little respect the algorithm should have for image's content. At 0, nothing will change, and at 1 you'll get an unrelated image. With values below 1.0, processing will take less steps than the Sampling Steps slider specifies.", "Denoising strength change factor": "In loopback mode, on each loop the denoising strength is multiplied by this value. <1 means decreasing variety so your sequence will converge on a fixed picture. >1 means increasing variety so your sequence will become more and more chaotic.", + "Skip": "Stop processing current image and continue processing.", "Interrupt": "Stop processing images and return any results accumulated so far.", "Save": "Write image to a directory (default - log/images) and generation parameters into csv file.", diff --git a/javascript/progressbar.js b/javascript/progressbar.js index f9e9290e2..4395a2159 100644 --- a/javascript/progressbar.js +++ b/javascript/progressbar.js @@ -1,8 +1,9 @@ // code related to showing and updating progressbar shown as the image is being made global_progressbars = {} -function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_interrupt, id_preview, id_gallery){ +function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_skip, id_interrupt, id_preview, id_gallery){ var progressbar = gradioApp().getElementById(id_progressbar) + var skip = id_skip ? gradioApp().getElementById(id_skip) : null var interrupt = gradioApp().getElementById(id_interrupt) if(opts.show_progress_in_title && progressbar && progressbar.offsetParent){ @@ -32,30 +33,37 @@ function check_progressbar(id_part, id_progressbar, id_progressbar_span, id_inte var progressDiv = gradioApp().querySelectorAll('#' + id_progressbar_span).length > 0; if(!progressDiv){ + if (skip) { + skip.style.display = "none" + } interrupt.style.display = "none" } } - window.setTimeout(function(){ requestMoreProgress(id_part, id_progressbar_span, id_interrupt) }, 500) + window.setTimeout(function() { requestMoreProgress(id_part, id_progressbar_span, id_skip, id_interrupt) }, 500) }); mutationObserver.observe( progressbar, { childList:true, subtree:true }) } } onUiUpdate(function(){ - check_progressbar('txt2img', 'txt2img_progressbar', 'txt2img_progress_span', 'txt2img_interrupt', 'txt2img_preview', 'txt2img_gallery') - check_progressbar('img2img', 'img2img_progressbar', 'img2img_progress_span', 'img2img_interrupt', 'img2img_preview', 'img2img_gallery') - check_progressbar('ti', 'ti_progressbar', 'ti_progress_span', 'ti_interrupt', 'ti_preview', 'ti_gallery') + check_progressbar('txt2img', 'txt2img_progressbar', 'txt2img_progress_span', 'txt2img_skip', 'txt2img_interrupt', 'txt2img_preview', 'txt2img_gallery') + check_progressbar('img2img', 'img2img_progressbar', 'img2img_progress_span', 'img2img_skip', 'img2img_interrupt', 'img2img_preview', 'img2img_gallery') + check_progressbar('ti', 'ti_progressbar', 'ti_progress_span', '', 'ti_interrupt', 'ti_preview', 'ti_gallery') }) -function requestMoreProgress(id_part, id_progressbar_span, id_interrupt){ +function requestMoreProgress(id_part, id_progressbar_span, id_skip, id_interrupt){ btn = gradioApp().getElementById(id_part+"_check_progress"); if(btn==null) return; btn.click(); var progressDiv = gradioApp().querySelectorAll('#' + id_progressbar_span).length > 0; + var skip = id_skip ? gradioApp().getElementById(id_skip) : null var interrupt = gradioApp().getElementById(id_interrupt) if(progressDiv && interrupt){ + if (skip) { + skip.style.display = "block" + } interrupt.style.display = "block" } } diff --git a/modules/img2img.py b/modules/img2img.py index da212d72b..e60b7e0ff 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -32,6 +32,10 @@ def process_batch(p, input_dir, output_dir, args): for i, image in enumerate(images): state.job = f"{i+1} out of {len(images)}" + if state.skipped: + state.skipped = False + state.interrupted = False + continue if state.interrupted: break diff --git a/modules/processing.py b/modules/processing.py index d814d5acd..6805039c1 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -355,6 +355,10 @@ def process_images(p: StableDiffusionProcessing) -> Processed: state.job_count = p.n_iter for n in range(p.n_iter): + if state.skipped: + state.skipped = False + state.interrupted = False + if state.interrupted: break diff --git a/modules/shared.py b/modules/shared.py index 864e772cf..7f802bd97 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -84,6 +84,7 @@ def selected_hypernetwork(): class State: + skipped = False interrupted = False job = "" job_no = 0 @@ -96,6 +97,10 @@ class State: current_image_sampling_step = 0 textinfo = None + def skip(self): + self.skipped = True + self.interrupted = True + def interrupt(self): self.interrupted = True diff --git a/modules/ui.py b/modules/ui.py index 4f18126fb..e3e62fdd5 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -191,6 +191,7 @@ def wrap_gradio_call(func, extra_outputs=None): # last item is always HTML res[-1] += f"

Time taken: {elapsed_text}

{vram_html}
" + shared.state.skipped = False shared.state.interrupted = False shared.state.job_count = 0 @@ -411,9 +412,16 @@ def create_toprow(is_img2img): with gr.Column(scale=1): with gr.Row(): + skip = gr.Button('Skip', elem_id=f"{id_part}_skip") interrupt = gr.Button('Interrupt', elem_id=f"{id_part}_interrupt") submit = gr.Button('Generate', elem_id=f"{id_part}_generate", variant='primary') + skip.click( + fn=lambda: shared.state.skip(), + inputs=[], + outputs=[], + ) + interrupt.click( fn=lambda: shared.state.interrupt(), inputs=[], diff --git a/style.css b/style.css index 50c5e557c..6904fc50e 100644 --- a/style.css +++ b/style.css @@ -393,10 +393,20 @@ input[type="range"]{ #txt2img_interrupt, #img2img_interrupt{ position: absolute; - width: 100%; + width: 50%; height: 72px; background: #b4c0cc; - border-radius: 8px; + border-radius: 0px; + display: none; +} + +#txt2img_skip, #img2img_skip{ + position: absolute; + width: 50%; + right: 0px; + height: 72px; + background: #b4c0cc; + border-radius: 0px; display: none; } diff --git a/webui.py b/webui.py index 480360fe0..3b4cf5e9d 100644 --- a/webui.py +++ b/webui.py @@ -58,6 +58,7 @@ def wrap_gradio_gpu_call(func, extra_outputs=None): shared.state.current_latent = None shared.state.current_image = None shared.state.current_image_sampling_step = 0 + shared.state.skipped = False shared.state.interrupted = False shared.state.textinfo = None From 00117a07efbbe8482add12262a179326541467de Mon Sep 17 00:00:00 2001 From: Trung Ngo Date: Sat, 8 Oct 2022 05:33:21 -0500 Subject: [PATCH 152/460] check specifically for skipped --- modules/img2img.py | 2 -- modules/processing.py | 3 +-- modules/sd_samplers.py | 4 ++-- modules/shared.py | 1 - 4 files changed, 3 insertions(+), 7 deletions(-) diff --git a/modules/img2img.py b/modules/img2img.py index e60b7e0ff..241267745 100644 --- a/modules/img2img.py +++ b/modules/img2img.py @@ -34,8 +34,6 @@ def process_batch(p, input_dir, output_dir, args): state.job = f"{i+1} out of {len(images)}" if state.skipped: state.skipped = False - state.interrupted = False - continue if state.interrupted: break diff --git a/modules/processing.py b/modules/processing.py index 6805039c1..3657fe69b 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -357,7 +357,6 @@ def process_images(p: StableDiffusionProcessing) -> Processed: for n in range(p.n_iter): if state.skipped: state.skipped = False - state.interrupted = False if state.interrupted: break @@ -385,7 +384,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: with devices.autocast(): samples_ddim = p.sample(conditioning=c, unconditional_conditioning=uc, seeds=seeds, subseeds=subseeds, subseed_strength=p.subseed_strength) - if state.interrupted: + if state.interrupted or state.skipped: # if we are interruped, sample returns just noise # use the image collected previously in sampler loop diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index df17e93ca..13a8b3221 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -106,7 +106,7 @@ def extended_tdqm(sequence, *args, desc=None, **kwargs): seq = sequence if cmd_opts.disable_console_progressbars else tqdm.tqdm(sequence, *args, desc=state.job, file=shared.progress_print_out, **kwargs) for x in seq: - if state.interrupted: + if state.interrupted or state.skipped: break yield x @@ -254,7 +254,7 @@ def extended_trange(sampler, count, *args, **kwargs): seq = range(count) if cmd_opts.disable_console_progressbars else tqdm.trange(count, *args, desc=state.job, file=shared.progress_print_out, **kwargs) for x in seq: - if state.interrupted: + if state.interrupted or state.skipped: break if sampler.stop_at is not None and x > sampler.stop_at: diff --git a/modules/shared.py b/modules/shared.py index 7f802bd97..ca4626282 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -99,7 +99,6 @@ class State: def skip(self): self.skipped = True - self.interrupted = True def interrupt(self): self.interrupted = True From 4999eb2ef9b30e8c42ca7e4a94d4bbffe4d1f015 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 8 Oct 2022 14:25:47 +0300 Subject: [PATCH 153/460] do not let user choose his own prompt token count limit --- README.md | 1 + modules/processing.py | 5 ----- modules/sd_hijack.py | 25 ++++++++++++------------- modules/shared.py | 3 --- 4 files changed, 13 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index d6e1d50bd..ef9b5e313 100644 --- a/README.md +++ b/README.md @@ -65,6 +65,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web - [Composable-Diffusion](https://energy-based-model.github.io/Compositional-Visual-Generation-with-Composable-Diffusion-Models/), a way to use multiple prompts at once - separate prompts using uppercase `AND` - also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2` +- No token limit for prompts (original stable diffusion lets you use up to 75 tokens) ## Installation and Running Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs. diff --git a/modules/processing.py b/modules/processing.py index 3657fe69b..d5162ddc0 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -123,7 +123,6 @@ class Processed: self.index_of_first_image = index_of_first_image self.styles = p.styles self.job_timestamp = state.job_timestamp - self.max_prompt_tokens = opts.max_prompt_tokens self.eta = p.eta self.ddim_discretize = p.ddim_discretize @@ -171,7 +170,6 @@ class Processed: "infotexts": self.infotexts, "styles": self.styles, "job_timestamp": self.job_timestamp, - "max_prompt_tokens": self.max_prompt_tokens, } return json.dumps(obj) @@ -269,8 +267,6 @@ def fix_seed(p): def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0): index = position_in_batch + iteration * p.batch_size - max_tokens = getattr(p, 'max_prompt_tokens', opts.max_prompt_tokens) - generation_params = { "Steps": p.steps, "Sampler": sd_samplers.samplers[p.sampler_index].name, @@ -286,7 +282,6 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), "Denoising strength": getattr(p, 'denoising_strength', None), "Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta), - "Max tokens": (None if max_tokens == shared.vanilla_max_prompt_tokens else max_tokens) } generation_params.update(p.extra_generation_params) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 340329c0b..2c1332c9f 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -36,6 +36,13 @@ def undo_optimizations(): ldm.modules.diffusionmodules.model.AttnBlock.forward = diffusionmodules_model_AttnBlock_forward +def get_target_prompt_token_count(token_count): + if token_count < 75: + return 75 + + return math.ceil(token_count / 10) * 10 + + class StableDiffusionModelHijack: fixes = None comments = [] @@ -84,7 +91,7 @@ class StableDiffusionModelHijack: def tokenize(self, text): max_length = opts.max_prompt_tokens - 2 _, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text]) - return remade_batch_tokens[0], token_count, max_length + return remade_batch_tokens[0], token_count, get_target_prompt_token_count(token_count) class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): @@ -114,7 +121,6 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): def tokenize_line(self, line, used_custom_terms, hijack_comments): id_start = self.wrapped.tokenizer.bos_token_id id_end = self.wrapped.tokenizer.eos_token_id - maxlen = opts.max_prompt_tokens if opts.enable_emphasis: parsed = prompt_parser.parse_prompt_attention(line) @@ -146,19 +152,12 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): used_custom_terms.append((embedding.name, embedding.checksum())) i += embedding_length_in_tokens - if len(remade_tokens) > maxlen - 2: - vocab = {v: k for k, v in self.wrapped.tokenizer.get_vocab().items()} - ovf = remade_tokens[maxlen - 2:] - overflowing_words = [vocab.get(int(x), "") for x in ovf] - overflowing_text = self.wrapped.tokenizer.convert_tokens_to_string(''.join(overflowing_words)) - hijack_comments.append(f"Warning: too many input tokens; some ({len(overflowing_words)}) have been truncated:\n{overflowing_text}\n") - token_count = len(remade_tokens) - remade_tokens = remade_tokens + [id_end] * (maxlen - 2 - len(remade_tokens)) - remade_tokens = [id_start] + remade_tokens[0:maxlen - 2] + [id_end] + prompt_target_length = get_target_prompt_token_count(token_count) + tokens_to_add = prompt_target_length - len(remade_tokens) + 1 - multipliers = multipliers + [1.0] * (maxlen - 2 - len(multipliers)) - multipliers = [1.0] + multipliers[0:maxlen - 2] + [1.0] + remade_tokens = [id_start] + remade_tokens + [id_end] * tokens_to_add + multipliers = [1.0] + multipliers + [1.0] * tokens_to_add return remade_tokens, fixes, multipliers, token_count diff --git a/modules/shared.py b/modules/shared.py index ca4626282..475d7e526 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -123,8 +123,6 @@ interrogator = modules.interrogate.InterrogateModels("interrogate") face_restorers = [] -vanilla_max_prompt_tokens = 77 - def realesrgan_models_names(): import modules.realesrgan_model @@ -225,7 +223,6 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "filter_nsfw": OptionInfo(False, "Filter NSFW content"), - "max_prompt_tokens": OptionInfo(vanilla_max_prompt_tokens, f"Max prompt token count. Two tokens are reserved for for start and end. Default is {vanilla_max_prompt_tokens}. Setting this to a different value will result in different pictures for same seed.", gr.Number, {"precision": 0}), "random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}), })) From 4201fd14f5769a4cf6723d2bc5495c3c84a2cd00 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 14:42:34 +0300 Subject: [PATCH 154/460] install xformers --- launch.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/launch.py b/launch.py index 75edb66a9..f3fbe16a5 100644 --- a/launch.py +++ b/launch.py @@ -124,6 +124,9 @@ if not is_installed("gfpgan"): if not is_installed("clip"): run_pip(f"install {clip_package}", "clip") +if not is_installed("xformers"): + run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers") + os.makedirs(dir_repos, exist_ok=True) git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash) From 3f166be1b60ff2ab33a6d2646809ec3f48796303 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 14:42:50 +0300 Subject: [PATCH 155/460] Update requirements.txt --- requirements.txt | 1 - 1 file changed, 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 304a066a3..81641d68f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -24,4 +24,3 @@ torchdiffeq kornia lark functorch -#xformers? From 77f4237d1c3af1756e7dab2699e3dcebad5619d6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 8 Oct 2022 15:25:59 +0300 Subject: [PATCH 156/460] fix bugs related to variable prompt lengths --- modules/sd_hijack.py | 14 +++++++++----- modules/sd_samplers.py | 35 ++++++++++++++++++++++++++++------- 2 files changed, 37 insertions(+), 12 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 2c1332c9f..7e7fde0f9 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -89,7 +89,6 @@ class StableDiffusionModelHijack: layer.padding_mode = 'circular' if enable else 'zeros' def tokenize(self, text): - max_length = opts.max_prompt_tokens - 2 _, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text]) return remade_batch_tokens[0], token_count, get_target_prompt_token_count(token_count) @@ -174,7 +173,8 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): if line in cache: remade_tokens, fixes, multipliers = cache[line] else: - remade_tokens, fixes, multipliers, token_count = self.tokenize_line(line, used_custom_terms, hijack_comments) + remade_tokens, fixes, multipliers, current_token_count = self.tokenize_line(line, used_custom_terms, hijack_comments) + token_count = max(current_token_count, token_count) cache[line] = (remade_tokens, fixes, multipliers) @@ -265,15 +265,19 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): if len(used_custom_terms) > 0: self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms])) - position_ids_array = [min(x, 75) for x in range(len(remade_batch_tokens[0])-1)] + [76] + target_token_count = get_target_prompt_token_count(token_count) + 2 + + position_ids_array = [min(x, 75) for x in range(target_token_count-1)] + [76] position_ids = torch.asarray(position_ids_array, device=devices.device).expand((1, -1)) - tokens = torch.asarray(remade_batch_tokens).to(device) + remade_batch_tokens_of_same_length = [x + [self.wrapped.tokenizer.eos_token_id] * (target_token_count - len(x)) for x in remade_batch_tokens] + tokens = torch.asarray(remade_batch_tokens_of_same_length).to(device) outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids) z = outputs.last_hidden_state # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise - batch_multipliers = torch.asarray(batch_multipliers).to(device) + batch_multipliers_of_same_length = [x + [1.0] * (target_token_count - len(x)) for x in batch_multipliers] + batch_multipliers = torch.asarray(batch_multipliers_of_same_length).to(device) original_mean = z.mean() z *= batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) new_mean = z.mean() diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index 13a8b3221..eade0dbbd 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -142,6 +142,16 @@ class VanillaStableDiffusionSampler: assert all([len(conds) == 1 for conds in conds_list]), 'composition via AND is not supported for DDIM/PLMS samplers' cond = tensor + # for DDIM, shapes must match, we can't just process cond and uncond independently; + # filling unconditional_conditioning with repeats of the last vector to match length is + # not 100% correct but should work well enough + if unconditional_conditioning.shape[1] < cond.shape[1]: + last_vector = unconditional_conditioning[:, -1:] + last_vector_repeated = last_vector.repeat([1, cond.shape[1] - unconditional_conditioning.shape[1], 1]) + unconditional_conditioning = torch.hstack([unconditional_conditioning, last_vector_repeated]) + elif unconditional_conditioning.shape[1] > cond.shape[1]: + unconditional_conditioning = unconditional_conditioning[:, :cond.shape[1]] + if self.mask is not None: img_orig = self.sampler.model.q_sample(self.init_latent, ts) x_dec = img_orig * self.mask + self.nmask * x_dec @@ -221,18 +231,29 @@ class CFGDenoiser(torch.nn.Module): x_in = torch.cat([torch.stack([x[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [x]) sigma_in = torch.cat([torch.stack([sigma[i] for _ in range(n)]) for i, n in enumerate(repeats)] + [sigma]) - cond_in = torch.cat([tensor, uncond]) - if shared.batch_cond_uncond: - x_out = self.inner_model(x_in, sigma_in, cond=cond_in) + if tensor.shape[1] == uncond.shape[1]: + cond_in = torch.cat([tensor, uncond]) + + if shared.batch_cond_uncond: + x_out = self.inner_model(x_in, sigma_in, cond=cond_in) + else: + x_out = torch.zeros_like(x_in) + for batch_offset in range(0, x_out.shape[0], batch_size): + a = batch_offset + b = a + batch_size + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=cond_in[a:b]) else: x_out = torch.zeros_like(x_in) - for batch_offset in range(0, x_out.shape[0], batch_size): + batch_size = batch_size*2 if shared.batch_cond_uncond else batch_size + for batch_offset in range(0, tensor.shape[0], batch_size): a = batch_offset - b = a + batch_size - x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=cond_in[a:b]) + b = min(a + batch_size, tensor.shape[0]) + x_out[a:b] = self.inner_model(x_in[a:b], sigma_in[a:b], cond=tensor[a:b]) - denoised_uncond = x_out[-batch_size:] + x_out[-uncond.shape[0]:] = self.inner_model(x_in[-uncond.shape[0]:], sigma_in[-uncond.shape[0]:], cond=uncond) + + denoised_uncond = x_out[-uncond.shape[0]:] denoised = torch.clone(denoised_uncond) for i, conds in enumerate(conds_list): From 7001bffe0247804793dfabb69ac96d832572ccd0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 8 Oct 2022 15:43:25 +0300 Subject: [PATCH 157/460] fix AND broken for long prompts --- modules/prompt_parser.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index f00256f28..156660736 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -239,6 +239,15 @@ def reconstruct_multicond_batch(c: MulticondLearnedConditioning, current_step): conds_list.append(conds_for_batch) + # if prompts have wildly different lengths above the limit we'll get tensors fo different shapes + # and won't be able to torch.stack them. So this fixes that. + token_count = max([x.shape[0] for x in tensors]) + for i in range(len(tensors)): + if tensors[i].shape[0] != token_count: + last_vector = tensors[i][-1:] + last_vector_repeated = last_vector.repeat([token_count - tensors[i].shape[0], 1]) + tensors[i] = torch.vstack([tensors[i], last_vector_repeated]) + return conds_list, torch.stack(tensors).to(device=param.device, dtype=param.dtype) From 772db721a52da374d627b60994222051f26c27a7 Mon Sep 17 00:00:00 2001 From: ddPn08 Date: Fri, 7 Oct 2022 23:02:07 +0900 Subject: [PATCH 158/460] fix glob path in hypernetwork.py --- modules/hypernetwork.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py index c7b866829..7f0622428 100644 --- a/modules/hypernetwork.py +++ b/modules/hypernetwork.py @@ -43,7 +43,7 @@ class Hypernetwork: def load_hypernetworks(path): res = {} - for filename in glob.iglob(path + '**/*.pt', recursive=True): + for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True): try: hn = Hypernetwork(filename) res[hn.name] = hn From 32e428ff19c28c87bb2ed362316b928b372e3a70 Mon Sep 17 00:00:00 2001 From: guaneec Date: Sat, 8 Oct 2022 16:01:34 +0800 Subject: [PATCH 159/460] Remove duplicate event listeners --- javascript/imageviewer.js | 3 +++ 1 file changed, 3 insertions(+) diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index 3a0baac8c..4c0e8f4bb 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -86,6 +86,9 @@ function showGalleryImage(){ if(fullImg_preview != null){ fullImg_preview.forEach(function function_name(e) { + if (e.dataset.modded) + return; + e.dataset.modded = true; if(e && e.parentElement.tagName == 'DIV'){ e.style.cursor='pointer' From 5f85a74b00c0154bfd559dc67edfa7e30342b7c9 Mon Sep 17 00:00:00 2001 From: MrCheeze Date: Fri, 7 Oct 2022 17:48:34 -0400 Subject: [PATCH 160/460] fix bug where when using prompt composition, hijack_comments generated before the final AND will be dropped --- modules/processing.py | 1 + modules/sd_hijack.py | 5 ++++- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index d5162ddc0..8240ee270 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -313,6 +313,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: os.makedirs(p.outpath_grids, exist_ok=True) modules.sd_hijack.model_hijack.apply_circular(p.tiling) + modules.sd_hijack.model_hijack.clear_comments() comments = {} diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 7e7fde0f9..ba808a397 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -88,6 +88,9 @@ class StableDiffusionModelHijack: for layer in [layer for layer in self.layers if type(layer) == torch.nn.Conv2d]: layer.padding_mode = 'circular' if enable else 'zeros' + def clear_comments(self): + self.comments = [] + def tokenize(self, text): _, remade_batch_tokens, _, _, _, token_count = self.clip.process_text([text]) return remade_batch_tokens[0], token_count, get_target_prompt_token_count(token_count) @@ -260,7 +263,7 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): batch_multipliers, remade_batch_tokens, used_custom_terms, hijack_comments, hijack_fixes, token_count = self.process_text(text) self.hijack.fixes = hijack_fixes - self.hijack.comments = hijack_comments + self.hijack.comments += hijack_comments if len(used_custom_terms) > 0: self.hijack.comments.append("Used embeddings: " + ", ".join([f'{word} [{checksum}]' for word, checksum in used_custom_terms])) From d0e85873ac72416d32dee8720dc9e93ab3d3e236 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 16:13:26 +0300 Subject: [PATCH 161/460] check for OS and env variable --- launch.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/launch.py b/launch.py index f3fbe16a5..a2089b3bc 100644 --- a/launch.py +++ b/launch.py @@ -4,6 +4,7 @@ import os import sys import importlib.util import shlex +import platform dir_repos = "repositories" dir_tmp = "tmp" @@ -31,6 +32,7 @@ def extract_arg(args, name): args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test') +args, xformers = extract_arg(args, '--xformers') def repo_dir(name): @@ -124,8 +126,11 @@ if not is_installed("gfpgan"): if not is_installed("clip"): run_pip(f"install {clip_package}", "clip") -if not is_installed("xformers"): - run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers") +if not is_installed("xformers") and xformers: + if platform.system() == "Windows": + run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers") + elif: + run_pip("install xformers", "xformers") os.makedirs(dir_repos, exist_ok=True) From 26b459a3799c5cdf71ca8ed5315a99f69c69f02c Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 16:20:04 +0300 Subject: [PATCH 162/460] default to split attention if cuda is available and xformers is not --- modules/sd_hijack.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 3da8c8ce2..04adcf035 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -21,12 +21,12 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At def apply_optimizations(): ldm.modules.diffusionmodules.model.nonlinearity = silu - if not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip): + if not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip or shared.xformers_available): ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward elif cmd_opts.opt_split_attention_v1: ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 - elif cmd_opts.opt_split_attention: + elif cmd_opts.opt_split_attention or torch.cuda.is_available(): ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward From ddfa9a97865c732193023a71521c5b7b53d8571b Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 16:20:41 +0300 Subject: [PATCH 163/460] add xformers_available shared variable --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 8cc3b2fe2..6ed4b8021 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -74,7 +74,7 @@ device = devices.device batch_cond_uncond = cmd_opts.always_batch_cond_uncond or not (cmd_opts.lowvram or cmd_opts.medvram) parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram - +xformers_available = False config_filename = cmd_opts.ui_settings_file From 69d0053583757ce2942d62de81e8b89e6be07840 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 16:21:40 +0300 Subject: [PATCH 164/460] update sd_hijack_opt to respect new env variables --- modules/sd_hijack_optimizations.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index ee58c7e4e..be09ec8f4 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -1,9 +1,14 @@ import math import torch from torch import einsum -import xformers.ops -import functorch -xformers._is_functorch_available=True +try: + import xformers.ops + import functorch + xformers._is_functorch_available = True + shared.xformers_available = True +except: + print('Cannot find xformers, defaulting to split attention. Try setting --xformers in your webui-user file if you wish to install it.') + continue from ldm.util import default from einops import rearrange From ca5f0f149c29c344a6badd055b15b5e5fcd6e938 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 16:22:38 +0300 Subject: [PATCH 165/460] Update launch.py --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/launch.py b/launch.py index a2089b3bc..a592e1ba7 100644 --- a/launch.py +++ b/launch.py @@ -129,7 +129,7 @@ if not is_installed("clip"): if not is_installed("xformers") and xformers: if platform.system() == "Windows": run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers") - elif: + elif platform.system() == "Linux": run_pip("install xformers", "xformers") os.makedirs(dir_repos, exist_ok=True) From 7ffea1507813540b8cd9e73feb7bf23de1ac4e27 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 16:24:06 +0300 Subject: [PATCH 166/460] Update requirements_versions.txt --- requirements_versions.txt | 1 + 1 file changed, 1 insertion(+) diff --git a/requirements_versions.txt b/requirements_versions.txt index fdff26878..fec3e9d5b 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -22,3 +22,4 @@ resize-right==0.0.2 torchdiffeq==0.2.3 kornia==0.6.7 lark==1.1.2 +functorch==0.2.1 From 970de9ee6891ff586821d0d80dde01c2f6c681b3 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 16:29:43 +0300 Subject: [PATCH 167/460] Update sd_hijack.py --- modules/sd_hijack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 04adcf035..5b30539fe 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -21,7 +21,7 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At def apply_optimizations(): ldm.modules.diffusionmodules.model.nonlinearity = silu - if not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip or shared.xformers_available): + if not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip) and shared.xformers_available: ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward elif cmd_opts.opt_split_attention_v1: From 7ff1170a2e11b6f00f587407326db0b9f8f51adf Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 8 Oct 2022 16:33:39 +0300 Subject: [PATCH 168/460] emergency fix for xformers (continue + shared) --- modules/sd_hijack_optimizations.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index e43e2c7a3..05023b6fd 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -1,19 +1,19 @@ import math import torch from torch import einsum -try: - import xformers.ops - import functorch - xformers._is_functorch_available = True - shared.xformers_available = True -except: - print('Cannot find xformers, defaulting to split attention. Try setting --xformers in your webui-user file if you wish to install it.') - continue + from ldm.util import default from einops import rearrange from modules import shared +try: + import xformers.ops + import functorch + xformers._is_functorch_available = True + shared.xformers_available = True +except Exception: + print('Cannot find xformers, defaulting to split attention. Try adding --xformers commandline argument to your webui-user file if you wish to install it.') # see https://github.com/basujindal/stable-diffusion/pull/117 for discussion def split_cross_attention_forward_v1(self, x, context=None, mask=None): From dc1117233ef8f9b25ff1ac40b158f20b70ba2fcb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 8 Oct 2022 17:02:18 +0300 Subject: [PATCH 169/460] simplify xfrmers options: --xformers to enable and that's it --- launch.py | 2 +- modules/sd_hijack.py | 2 +- modules/sd_hijack_optimizations.py | 20 +++++++++++++------- modules/shared.py | 2 +- 4 files changed, 16 insertions(+), 10 deletions(-) diff --git a/launch.py b/launch.py index a592e1ba7..61f62096c 100644 --- a/launch.py +++ b/launch.py @@ -32,7 +32,7 @@ def extract_arg(args, name): args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test') -args, xformers = extract_arg(args, '--xformers') +xformers = '--xformers' in args def repo_dir(name): diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 5d93f7f6a..91e98c16b 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -22,7 +22,7 @@ def apply_optimizations(): undo_optimizations() ldm.modules.diffusionmodules.model.nonlinearity = silu - if not cmd_opts.disable_opt_xformers_attention and not (cmd_opts.opt_split_attention or torch.version.hip) and shared.xformers_available: + if cmd_opts.xformers and shared.xformers_available and not torch.version.hip: ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward elif cmd_opts.opt_split_attention_v1: diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index 05023b6fd..d23d733b0 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -1,4 +1,7 @@ import math +import sys +import traceback + import torch from torch import einsum @@ -7,13 +10,16 @@ from einops import rearrange from modules import shared -try: - import xformers.ops - import functorch - xformers._is_functorch_available = True - shared.xformers_available = True -except Exception: - print('Cannot find xformers, defaulting to split attention. Try adding --xformers commandline argument to your webui-user file if you wish to install it.') +if shared.cmd_opts.xformers: + try: + import xformers.ops + import functorch + xformers._is_functorch_available = True + shared.xformers_available = True + except Exception: + print("Cannot import xformers", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + # see https://github.com/basujindal/stable-diffusion/pull/117 for discussion def split_cross_attention_forward_v1(self, x, context=None, mask=None): diff --git a/modules/shared.py b/modules/shared.py index d68df7511..02cb27228 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -43,7 +43,7 @@ parser.add_argument("--realesrgan-models-path", type=str, help="Path to director parser.add_argument("--scunet-models-path", type=str, help="Path to directory with ScuNET model file(s).", default=os.path.join(models_path, 'ScuNET')) parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR')) parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR')) -parser.add_argument("--disable-opt-xformers-attention", action='store_true', help="force-disables xformers attention optimization") +parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers") parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.") parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization") parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") From 27032c47df9c07ac21dd5b89fa7dc247bb8705b6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 8 Oct 2022 17:10:05 +0300 Subject: [PATCH 170/460] restore old opt_split_attention/disable_opt_split_attention logic --- modules/sd_hijack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 91e98c16b..335a2bcfb 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -27,7 +27,7 @@ def apply_optimizations(): ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward elif cmd_opts.opt_split_attention_v1: ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 - elif cmd_opts.opt_split_attention or torch.cuda.is_available(): + elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()): ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward From 4f33289d0fc5aa3a197f4a4c926d03d44f0d597e Mon Sep 17 00:00:00 2001 From: Milly Date: Sat, 8 Oct 2022 22:48:15 +0900 Subject: [PATCH 171/460] Fixed typo --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index e3e62fdd5..ffd75f6ac 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -946,7 +946,7 @@ def create_ui(wrap_gradio_gpu_call): custom_name = gr.Textbox(label="Custom Name (Optional)") interp_amount = gr.Slider(minimum=0.0, maximum=1.0, step=0.05, label='Interpolation Amount', value=0.3) interp_method = gr.Radio(choices=["Weighted Sum", "Sigmoid", "Inverse Sigmoid"], value="Weighted Sum", label="Interpolation Method") - save_as_half = gr.Checkbox(value=False, label="Safe as float16") + save_as_half = gr.Checkbox(value=False, label="Save as float16") modelmerger_merge = gr.Button(elem_id="modelmerger_merge", label="Merge", variant='primary') with gr.Column(variant='panel'): From cfc33f99d47d1f45af15499e5965834089d11858 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 8 Oct 2022 17:28:58 +0300 Subject: [PATCH 172/460] why did you do this --- modules/sd_hijack.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 335a2bcfb..ed271976b 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -28,7 +28,7 @@ def apply_optimizations(): elif cmd_opts.opt_split_attention_v1: ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()): - ldm.modules.attention_CrossAttention_forward = sd_hijack_optimizations.split_cross_attention_forward + ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward From 7e639cd49855ef59e087ae9a9122756a937007eb Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 17:22:20 +0300 Subject: [PATCH 173/460] check for 3.10 --- launch.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/launch.py b/launch.py index 61f62096c..1d65a779c 100644 --- a/launch.py +++ b/launch.py @@ -126,7 +126,7 @@ if not is_installed("gfpgan"): if not is_installed("clip"): run_pip(f"install {clip_package}", "clip") -if not is_installed("xformers") and xformers: +if not is_installed("xformers") and xformers and platform.python_version().startswith("3.10"): if platform.system() == "Windows": run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers") elif platform.system() == "Linux": From 017b6b8744f0771e498656ec043e12d5cc6969a7 Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 17:27:21 +0300 Subject: [PATCH 174/460] check for ampere --- modules/sd_hijack.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index ed271976b..5e266d5e5 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -22,9 +22,10 @@ def apply_optimizations(): undo_optimizations() ldm.modules.diffusionmodules.model.nonlinearity = silu - if cmd_opts.xformers and shared.xformers_available and not torch.version.hip: - ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward - ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward + if cmd_opts.xformers and shared.xformers_available and torch.version.cuda: + if torch.cuda.get_device_capability(shared.device) == (8, 6): + ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward + ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward elif cmd_opts.opt_split_attention_v1: ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()): From cc0258aea7b6605be3648900063cfa96ed7c5ffa Mon Sep 17 00:00:00 2001 From: C43H66N12O12S2 <36072735+C43H66N12O12S2@users.noreply.github.com> Date: Sat, 8 Oct 2022 17:44:53 +0300 Subject: [PATCH 175/460] check for ampere without destroying the optimizations. again. --- modules/sd_hijack.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 5e266d5e5..a3e374f09 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -22,10 +22,9 @@ def apply_optimizations(): undo_optimizations() ldm.modules.diffusionmodules.model.nonlinearity = silu - if cmd_opts.xformers and shared.xformers_available and torch.version.cuda: - if torch.cuda.get_device_capability(shared.device) == (8, 6): - ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward - ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward + if cmd_opts.xformers and shared.xformers_available and torch.version.cuda and torch.cuda.get_device_capability(shared.device) == (8, 6): + ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward + ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward elif cmd_opts.opt_split_attention_v1: ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()): From 34acad1628e98a5e0cbd459fa69ded915864f53d Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Fri, 7 Oct 2022 22:56:00 +0100 Subject: [PATCH 176/460] Add GZipMiddleware to root demo --- webui.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/webui.py b/webui.py index 3b4cf5e9d..18de8e165 100644 --- a/webui.py +++ b/webui.py @@ -5,6 +5,8 @@ import importlib import signal import threading +from fastapi.middleware.gzip import GZipMiddleware + from modules.paths import script_path from modules import devices, sd_samplers @@ -93,7 +95,7 @@ def webui(): demo = modules.ui.create_ui(wrap_gradio_gpu_call=wrap_gradio_gpu_call) - demo.launch( + app,local_url,share_url = demo.launch( share=cmd_opts.share, server_name="0.0.0.0" if cmd_opts.listen else None, server_port=cmd_opts.port, @@ -102,6 +104,8 @@ def webui(): inbrowser=cmd_opts.autolaunch, prevent_thread_lock=True ) + + app.add_middleware(GZipMiddleware,minimum_size=1000) while 1: time.sleep(0.5) From a5550f0213c3f145b1c984816ebcef92c48853ee Mon Sep 17 00:00:00 2001 From: Artem Zagidulin Date: Wed, 5 Oct 2022 19:10:39 +0300 Subject: [PATCH 177/460] alternate prompt --- modules/prompt_parser.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/modules/prompt_parser.py b/modules/prompt_parser.py index 156660736..919d5d31a 100644 --- a/modules/prompt_parser.py +++ b/modules/prompt_parser.py @@ -13,13 +13,14 @@ import lark schedule_parser = lark.Lark(r""" !start: (prompt | /[][():]/+)* -prompt: (emphasized | scheduled | plain | WHITESPACE)* +prompt: (emphasized | scheduled | alternate | plain | WHITESPACE)* !emphasized: "(" prompt ")" | "(" prompt ":" prompt ")" | "[" prompt "]" scheduled: "[" [prompt ":"] prompt ":" [WHITESPACE] NUMBER "]" +alternate: "[" prompt ("|" prompt)+ "]" WHITESPACE: /\s+/ -plain: /([^\\\[\]():]|\\.)+/ +plain: /([^\\\[\]():|]|\\.)+/ %import common.SIGNED_NUMBER -> NUMBER """) @@ -59,6 +60,8 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): tree.children[-1] *= steps tree.children[-1] = min(steps, int(tree.children[-1])) l.append(tree.children[-1]) + def alternate(self, tree): + l.extend(range(1, steps+1)) CollectSteps().visit(tree) return sorted(set(l)) @@ -67,6 +70,8 @@ def get_learned_conditioning_prompt_schedules(prompts, steps): def scheduled(self, args): before, after, _, when = args yield before or () if step <= when else after + def alternate(self, args): + yield next(args[(step - 1)%len(args)]) def start(self, args): def flatten(x): if type(x) == str: From 01f8cb44474e454903c11718e6a4f33dbde34bb8 Mon Sep 17 00:00:00 2001 From: Greendayle Date: Sat, 8 Oct 2022 18:02:56 +0200 Subject: [PATCH 178/460] made deepdanbooru optional, added to readme, automatic download of deepbooru model --- README.md | 2 ++ launch.py | 4 ++++ modules/deepbooru.py | 20 ++++++++++---------- modules/shared.py | 1 + modules/ui.py | 19 ++++++++++++------- requirements.txt | 3 --- requirements_versions.txt | 3 --- 7 files changed, 29 insertions(+), 23 deletions(-) diff --git a/README.md b/README.md index ef9b5e313..6cd7a1f9d 100644 --- a/README.md +++ b/README.md @@ -66,6 +66,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web - separate prompts using uppercase `AND` - also supports weights for prompts: `a cat :1.2 AND a dog AND a penguin :2.2` - No token limit for prompts (original stable diffusion lets you use up to 75 tokens) +- DeepDanbooru integration, creates danbooru style tags for anime prompts (add --deepdanbooru to commandline args) ## Installation and Running Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Dependencies) are met and follow the instructions available for both [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended) and [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs. @@ -123,4 +124,5 @@ The documentation was moved from this README over to the project's [wiki](https: - Noise generation for outpainting mk2 - https://github.com/parlance-zz/g-diffuser-bot - CLIP interrogator idea and borrowing some code - https://github.com/pharmapsychotic/clip-interrogator - Initial Gradio script - posted on 4chan by an Anonymous user. Thank you Anonymous user. +- DeepDanbooru - interrogator for anime diffusors https://github.com/KichangKim/DeepDanbooru - (You) diff --git a/launch.py b/launch.py index 61f62096c..d46426eb3 100644 --- a/launch.py +++ b/launch.py @@ -33,6 +33,7 @@ def extract_arg(args, name): args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test') xformers = '--xformers' in args +deepdanbooru = '--deepdanbooru' in args def repo_dir(name): @@ -132,6 +133,9 @@ if not is_installed("xformers") and xformers: elif platform.system() == "Linux": run_pip("install xformers", "xformers") +if not is_installed("deepdanbooru") and deepdanbooru: + run_pip("install git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru") + os.makedirs(dir_repos, exist_ok=True) git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash) diff --git a/modules/deepbooru.py b/modules/deepbooru.py index 781b22492..7e3c06182 100644 --- a/modules/deepbooru.py +++ b/modules/deepbooru.py @@ -9,16 +9,16 @@ def _load_tf_and_return_tags(pil_image, threshold): import numpy as np this_folder = os.path.dirname(__file__) - model_path = os.path.join(this_folder, '..', 'models', 'deepbooru', 'deepdanbooru-v3-20211112-sgd-e28') - - model_good = False - for path_candidate in [model_path, os.path.dirname(model_path)]: - if os.path.exists(os.path.join(path_candidate, 'project.json')): - model_path = path_candidate - model_good = True - if not model_good: - return ("Download https://github.com/KichangKim/DeepDanbooru/releases/download/v3-20211112-sgd-e28/" - "deepdanbooru-v3-20211112-sgd-e28.zip unpack and put into models/deepbooru") + model_path = os.path.abspath(os.path.join(this_folder, '..', 'models', 'deepbooru')) + if not os.path.exists(os.path.join(model_path, 'project.json')): + # there is no point importing these every time + import zipfile + from basicsr.utils.download_util import load_file_from_url + load_file_from_url(r"https://github.com/KichangKim/DeepDanbooru/releases/download/v3-20211112-sgd-e28/deepdanbooru-v3-20211112-sgd-e28.zip", + model_path) + with zipfile.ZipFile(os.path.join(model_path, "deepdanbooru-v3-20211112-sgd-e28.zip"), "r") as zip_ref: + zip_ref.extractall(model_path) + os.remove(os.path.join(model_path, "deepdanbooru-v3-20211112-sgd-e28.zip")) tags = dd.project.load_tags_from_project(model_path) model = dd.project.load_model_from_project( diff --git a/modules/shared.py b/modules/shared.py index 02cb27228..c87b726e7 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -44,6 +44,7 @@ parser.add_argument("--scunet-models-path", type=str, help="Path to directory wi parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR')) parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR')) parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers") +parser.add_argument("--deepdanbooru", action='store_true', help="enable deepdanbooru interrogator") parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.") parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization") parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") diff --git a/modules/ui.py b/modules/ui.py index 30583fe93..c5c11c3c9 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -23,9 +23,10 @@ import gradio.utils import gradio.routes from modules import sd_hijack -from modules.deepbooru import get_deepbooru_tags from modules.paths import script_path from modules.shared import opts, cmd_opts +if cmd_opts.deepdanbooru: + from modules.deepbooru import get_deepbooru_tags import modules.shared as shared from modules.sd_samplers import samplers, samplers_for_img2img from modules.sd_hijack import model_hijack @@ -437,7 +438,10 @@ def create_toprow(is_img2img): with gr.Row(scale=1): if is_img2img: interrogate = gr.Button('Interrogate\nCLIP', elem_id="interrogate") - deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") + if cmd_opts.deepdanbooru: + deepbooru = gr.Button('Interrogate\nDeepBooru', elem_id="deepbooru") + else: + deepbooru = None else: interrogate = None deepbooru = None @@ -782,11 +786,12 @@ def create_ui(wrap_gradio_gpu_call): outputs=[img2img_prompt], ) - img2img_deepbooru.click( - fn=interrogate_deepbooru, - inputs=[init_img], - outputs=[img2img_prompt], - ) + if cmd_opts.deepdanbooru: + img2img_deepbooru.click( + fn=interrogate_deepbooru, + inputs=[init_img], + outputs=[img2img_prompt], + ) save.click( fn=wrap_gradio_call(save_files), diff --git a/requirements.txt b/requirements.txt index cd3953c6c..81641d68f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -23,7 +23,4 @@ resize-right torchdiffeq kornia lark -deepdanbooru -tensorflow -tensorflow-io functorch diff --git a/requirements_versions.txt b/requirements_versions.txt index 2d256a54f..fec3e9d5b 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -22,7 +22,4 @@ resize-right==0.0.2 torchdiffeq==0.2.3 kornia==0.6.7 lark==1.1.2 -git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26#egg=deepdanbooru[tensorflow] -tensorflow==2.10.0 -tensorflow-io==0.27.0 functorch==0.2.1 From f9c5da159245bb1e7603b3c8b9e0703bcb1c2ff5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 8 Oct 2022 19:05:19 +0300 Subject: [PATCH 179/460] add fallback for xformers_attnblock_forward --- modules/sd_hijack_optimizations.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index d23d733b0..dba21192b 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -211,6 +211,7 @@ def cross_attention_attnblock_forward(self, x): return h3 def xformers_attnblock_forward(self, x): + try: h_ = x h_ = self.norm(h_) q1 = self.q(h_).contiguous() @@ -218,4 +219,6 @@ def xformers_attnblock_forward(self, x): v = self.v(h_).contiguous() out = xformers.ops.memory_efficient_attention(q1, k1, v) out = self.proj_out(out) - return x+out + return x + out + except NotImplementedError: + return cross_attention_attnblock_forward(self, x) From 3061cdb7b610d4ba7f1ea695d9d6364b591e5bc7 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 8 Oct 2022 19:22:15 +0300 Subject: [PATCH 180/460] add --force-enable-xformers option and also add messages to console regarding cross attention optimizations --- modules/sd_hijack.py | 6 +++++- modules/shared.py | 1 + 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index a3e374f09..307cc67dd 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -22,12 +22,16 @@ def apply_optimizations(): undo_optimizations() ldm.modules.diffusionmodules.model.nonlinearity = silu - if cmd_opts.xformers and shared.xformers_available and torch.version.cuda and torch.cuda.get_device_capability(shared.device) == (8, 6): + + if cmd_opts.force_enable_xformers or (cmd_opts.xformers and shared.xformers_available and torch.version.cuda and torch.cuda.get_device_capability(shared.device) == (8, 6)): + print("Applying xformers cross attention optimization.") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.xformers_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.xformers_attnblock_forward elif cmd_opts.opt_split_attention_v1: + print("Applying v1 cross attention optimization.") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward_v1 elif not cmd_opts.disable_opt_split_attention and (cmd_opts.opt_split_attention or torch.cuda.is_available()): + print("Applying cross attention optimization.") ldm.modules.attention.CrossAttention.forward = sd_hijack_optimizations.split_cross_attention_forward ldm.modules.diffusionmodules.model.AttnBlock.forward = sd_hijack_optimizations.cross_attention_attnblock_forward diff --git a/modules/shared.py b/modules/shared.py index 02cb27228..8f9412262 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -44,6 +44,7 @@ parser.add_argument("--scunet-models-path", type=str, help="Path to directory wi parser.add_argument("--swinir-models-path", type=str, help="Path to directory with SwinIR model file(s).", default=os.path.join(models_path, 'SwinIR')) parser.add_argument("--ldsr-models-path", type=str, help="Path to directory with LDSR model file(s).", default=os.path.join(models_path, 'LDSR')) parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers") +parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work") parser.add_argument("--opt-split-attention", action='store_true', help="force-enables cross-attention layer optimization. By default, it's on for torch.cuda and off for other torch devices.") parser.add_argument("--disable-opt-split-attention", action='store_true', help="force-disables cross-attention layer optimization") parser.add_argument("--opt-split-attention-v1", action='store_true', help="enable older version of split attention optimization that does not consume all the VRAM it can find") From 15c4278f1a18b8104e135dd82690d10cff39a2e7 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 8 Oct 2022 17:50:01 +0100 Subject: [PATCH 181/460] TI preprocess wording MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I had to check the code to work out what splitting was 🤷🏿 --- modules/ui.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index ffd75f6ac..d52d74c6d 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -980,9 +980,9 @@ def create_ui(wrap_gradio_gpu_call): process_dst = gr.Textbox(label='Destination directory') with gr.Row(): - process_flip = gr.Checkbox(label='Flip') - process_split = gr.Checkbox(label='Split into two') - process_caption = gr.Checkbox(label='Add caption') + process_flip = gr.Checkbox(label='Create flipped copies') + process_split = gr.Checkbox(label='Split oversized images into two') + process_caption = gr.Checkbox(label='Use CLIP caption as filename') with gr.Row(): with gr.Column(scale=3): From b458fa48fe5734a872bca83061d702609cb52940 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sat, 8 Oct 2022 17:56:28 +0100 Subject: [PATCH 182/460] Update ui.py --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index d52d74c6d..b09359aae 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -982,7 +982,7 @@ def create_ui(wrap_gradio_gpu_call): with gr.Row(): process_flip = gr.Checkbox(label='Create flipped copies') process_split = gr.Checkbox(label='Split oversized images into two') - process_caption = gr.Checkbox(label='Use CLIP caption as filename') + process_caption = gr.Checkbox(label='Use BLIP caption as filename') with gr.Row(): with gr.Column(scale=3): From 1371d7608b402d6f15c200ec2f5fde4579836a05 Mon Sep 17 00:00:00 2001 From: Fampai Date: Sat, 8 Oct 2022 14:28:22 -0400 Subject: [PATCH 183/460] Added ability to ignore last n layers in FrozenCLIPEmbedder --- modules/sd_hijack.py | 11 +++++++++-- modules/shared.py | 1 + 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/modules/sd_hijack.py b/modules/sd_hijack.py index 307cc67dd..f12a9696f 100644 --- a/modules/sd_hijack.py +++ b/modules/sd_hijack.py @@ -281,8 +281,15 @@ class FrozenCLIPEmbedderWithCustomWords(torch.nn.Module): remade_batch_tokens_of_same_length = [x + [self.wrapped.tokenizer.eos_token_id] * (target_token_count - len(x)) for x in remade_batch_tokens] tokens = torch.asarray(remade_batch_tokens_of_same_length).to(device) - outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids) - z = outputs.last_hidden_state + + tmp = -opts.CLIP_ignore_last_layers + if (opts.CLIP_ignore_last_layers == 0): + outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids) + z = outputs.last_hidden_state + else: + outputs = self.wrapped.transformer(input_ids=tokens, position_ids=position_ids, output_hidden_states=tmp) + z = outputs.hidden_states[tmp] + z = self.wrapped.transformer.text_model.final_layer_norm(z) # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise batch_multipliers_of_same_length = [x + [1.0] * (target_token_count - len(x)) for x in batch_multipliers] diff --git a/modules/shared.py b/modules/shared.py index 8f9412262..af8dc7447 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -225,6 +225,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion"), { "use_old_emphasis_implementation": OptionInfo(False, "Use old emphasis implementation. Can be useful to reproduce old seeds."), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "filter_nsfw": OptionInfo(False, "Filter NSFW content"), + 'CLIP_ignore_last_layers': OptionInfo(0, "Ignore last layers of CLIP model", gr.Slider, {"minimum": 0, "maximum": 5, "step": 1}), "random_artist_categories": OptionInfo([], "Allowed categories for random artists selection when using the Roll button", gr.CheckboxGroup, {"choices": artist_db.categories()}), })) From e6e42f98df2c928c4f49351ad6b466387ce87d42 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 8 Oct 2022 19:25:10 +0300 Subject: [PATCH 184/460] make --force-enable-xformers work without needing --xformers --- modules/sd_hijack_optimizations.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index dba21192b..c4396bb9b 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -10,7 +10,7 @@ from einops import rearrange from modules import shared -if shared.cmd_opts.xformers: +if shared.cmd_opts.xformers or shared.cmd_opts.force_enable_xformers: try: import xformers.ops import functorch From 3b2141c5fb6a3c2b8ab4b1e759a97ead77260129 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 8 Oct 2022 22:21:15 +0300 Subject: [PATCH 185/460] add 'Ignore last layers of CLIP model' option as a parameter to the infotext --- modules/processing.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 8240ee270..515fc91a3 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -123,6 +123,7 @@ class Processed: self.index_of_first_image = index_of_first_image self.styles = p.styles self.job_timestamp = state.job_timestamp + self.clip_skip = opts.CLIP_ignore_last_layers self.eta = p.eta self.ddim_discretize = p.ddim_discretize @@ -141,7 +142,6 @@ class Processed: self.all_subseeds = all_subseeds or [self.subseed] self.infotexts = infotexts or [info] - def js(self): obj = { "prompt": self.prompt, @@ -170,6 +170,7 @@ class Processed: "infotexts": self.infotexts, "styles": self.styles, "job_timestamp": self.job_timestamp, + "clip_skip": self.clip_skip, } return json.dumps(obj) @@ -267,6 +268,8 @@ def fix_seed(p): def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration=0, position_in_batch=0): index = position_in_batch + iteration * p.batch_size + clip_skip = getattr(p, 'clip_skip', opts.CLIP_ignore_last_layers) + generation_params = { "Steps": p.steps, "Sampler": sd_samplers.samplers[p.sampler_index].name, @@ -282,6 +285,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Seed resize from": (None if p.seed_resize_from_w == 0 or p.seed_resize_from_h == 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"), "Denoising strength": getattr(p, 'denoising_strength', None), "Eta": (None if p.sampler is None or p.sampler.eta == p.sampler.default_eta else p.sampler.eta), + "Clip skip": None if clip_skip==0 else clip_skip, } generation_params.update(p.extra_generation_params) From 610a7f4e1480c0ffeedb2a07dc27ae86bf03c3a8 Mon Sep 17 00:00:00 2001 From: Edouard Leurent Date: Sat, 8 Oct 2022 16:49:43 +0100 Subject: [PATCH 186/460] Break after finding the local directory of stable diffusion Otherwise, we may override it with one of the next two path (. or ..) if it is present there, and then the local paths of other modules (taming transformers, codeformers, etc.) wont be found in sd_path/../. Fix https://github.com/AUTOMATIC1111/stable-diffusion-webui/issues/1085 --- modules/paths.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/paths.py b/modules/paths.py index 606f7d666..0519caa0a 100644 --- a/modules/paths.py +++ b/modules/paths.py @@ -12,6 +12,7 @@ possible_sd_paths = [os.path.join(script_path, 'repositories/stable-diffusion'), for possible_sd_path in possible_sd_paths: if os.path.exists(os.path.join(possible_sd_path, 'ldm/models/diffusion/ddpm.py')): sd_path = os.path.abspath(possible_sd_path) + break assert sd_path is not None, "Couldn't find Stable Diffusion in any of: " + str(possible_sd_paths) From 432782163ae53e605470bcefc9a6f796c4556912 Mon Sep 17 00:00:00 2001 From: Aidan Holland Date: Sat, 8 Oct 2022 15:12:24 -0400 Subject: [PATCH 187/460] chore: Fix typos --- README.md | 2 +- javascript/imageviewer.js | 2 +- modules/interrogate.py | 4 ++-- modules/processing.py | 2 +- modules/scunet_model_arch.py | 4 ++-- modules/sd_models.py | 4 ++-- modules/sd_samplers.py | 4 ++-- modules/shared.py | 6 +++--- modules/swinir_model_arch.py | 2 +- modules/ui.py | 4 ++-- 10 files changed, 17 insertions(+), 17 deletions(-) diff --git a/README.md b/README.md index ef9b5e313..63dd0c187 100644 --- a/README.md +++ b/README.md @@ -34,7 +34,7 @@ Check the [custom scripts](https://github.com/AUTOMATIC1111/stable-diffusion-web - Sampling method selection - Interrupt processing at any time - 4GB video card support (also reports of 2GB working) -- Correct seeds for batches +- Correct seeds for batches - Prompt length validation - get length of prompt in tokens as you type - get a warning after generation if some text was truncated diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index 4c0e8f4bb..6a00c0da4 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -95,7 +95,7 @@ function showGalleryImage(){ e.addEventListener('click', function (evt) { if(!opts.js_modal_lightbox) return; - modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initialy_zoomed) + modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed) showModal(evt) },true); } diff --git a/modules/interrogate.py b/modules/interrogate.py index eed87144f..635e266e7 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -140,11 +140,11 @@ class InterrogateModels: res = caption - cilp_image = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(shared.device) + clip_image = self.clip_preprocess(pil_image).unsqueeze(0).type(self.dtype).to(shared.device) precision_scope = torch.autocast if shared.cmd_opts.precision == "autocast" else contextlib.nullcontext with torch.no_grad(), precision_scope("cuda"): - image_features = self.clip_model.encode_image(cilp_image).type(self.dtype) + image_features = self.clip_model.encode_image(clip_image).type(self.dtype) image_features /= image_features.norm(dim=-1, keepdim=True) diff --git a/modules/processing.py b/modules/processing.py index 515fc91a3..31220881e 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -386,7 +386,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if state.interrupted or state.skipped: - # if we are interruped, sample returns just noise + # if we are interrupted, sample returns just noise # use the image collected previously in sampler loop samples_ddim = shared.state.current_latent diff --git a/modules/scunet_model_arch.py b/modules/scunet_model_arch.py index 972a2639a..43ca8d36f 100644 --- a/modules/scunet_model_arch.py +++ b/modules/scunet_model_arch.py @@ -40,7 +40,7 @@ class WMSA(nn.Module): Returns: attn_mask: should be (1 1 w p p), """ - # supporting sqaure. + # supporting square. attn_mask = torch.zeros(h, w, p, p, p, p, dtype=torch.bool, device=self.relative_position_params.device) if self.type == 'W': return attn_mask @@ -65,7 +65,7 @@ class WMSA(nn.Module): x = rearrange(x, 'b (w1 p1) (w2 p2) c -> b w1 w2 p1 p2 c', p1=self.window_size, p2=self.window_size) h_windows = x.size(1) w_windows = x.size(2) - # sqaure validation + # square validation # assert h_windows == w_windows x = rearrange(x, 'b w1 w2 p1 p2 c -> b (w1 w2) (p1 p2) c', p1=self.window_size, p2=self.window_size) diff --git a/modules/sd_models.py b/modules/sd_models.py index 9409d0707..a09866ce6 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -147,7 +147,7 @@ def load_model_weights(model, checkpoint_file, sd_model_hash): model.first_stage_model.load_state_dict(vae_dict) model.sd_model_hash = sd_model_hash - model.sd_model_checkpint = checkpoint_file + model.sd_model_checkpoint = checkpoint_file def load_model(): @@ -175,7 +175,7 @@ def reload_model_weights(sd_model, info=None): from modules import lowvram, devices, sd_hijack checkpoint_info = info or select_checkpoint() - if sd_model.sd_model_checkpint == checkpoint_info.filename: + if sd_model.sd_model_checkpoint == checkpoint_info.filename: return if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: diff --git a/modules/sd_samplers.py b/modules/sd_samplers.py index eade0dbbd..6e743f7e9 100644 --- a/modules/sd_samplers.py +++ b/modules/sd_samplers.py @@ -181,7 +181,7 @@ class VanillaStableDiffusionSampler: self.initialize(p) - # existing code fails with cetain step counts, like 9 + # existing code fails with certain step counts, like 9 try: self.sampler.make_schedule(ddim_num_steps=steps, ddim_eta=self.eta, ddim_discretize=p.ddim_discretize, verbose=False) except Exception: @@ -204,7 +204,7 @@ class VanillaStableDiffusionSampler: steps = steps or p.steps - # existing code fails with cetin step counts, like 9 + # existing code fails with certain step counts, like 9 try: samples_ddim, _ = self.sampler.sample(S=steps, conditioning=conditioning, batch_size=int(x.shape[0]), shape=x[0].shape, verbose=False, unconditional_guidance_scale=p.cfg_scale, unconditional_conditioning=unconditional_conditioning, x_T=x, eta=self.eta) except Exception: diff --git a/modules/shared.py b/modules/shared.py index af8dc7447..2dc092d68 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -141,9 +141,9 @@ class OptionInfo: self.section = None -def options_section(section_identifer, options_dict): +def options_section(section_identifier, options_dict): for k, v in options_dict.items(): - v.section = section_identifer + v.section = section_identifier return options_dict @@ -246,7 +246,7 @@ options_templates.update(options_section(('ui', "User interface"), { "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), "font": OptionInfo("", "Font for image grids that have text"), "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), - "js_modal_lightbox_initialy_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), + "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), "show_progress_in_title": OptionInfo(True, "Show generation progress in window title."), })) diff --git a/modules/swinir_model_arch.py b/modules/swinir_model_arch.py index 461fb354c..863f42db6 100644 --- a/modules/swinir_model_arch.py +++ b/modules/swinir_model_arch.py @@ -166,7 +166,7 @@ class SwinTransformerBlock(nn.Module): Args: dim (int): Number of input channels. - input_resolution (tuple[int]): Input resulotion. + input_resolution (tuple[int]): Input resolution. num_heads (int): Number of attention heads. window_size (int): Window size. shift_size (int): Shift size for SW-MSA. diff --git a/modules/ui.py b/modules/ui.py index b09359aae..b51af1214 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -38,7 +38,7 @@ from modules import prompt_parser from modules.images import save_image import modules.textual_inversion.ui -# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the bowser will not show any UI +# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI mimetypes.init() mimetypes.add_type('application/javascript', '.js') @@ -102,7 +102,7 @@ def save_files(js_data, images, index): import csv filenames = [] - #quick dictionary to class object conversion. Its neccesary due apply_filename_pattern requiring it + #quick dictionary to class object conversion. Its necessary due apply_filename_pattern requiring it class MyObject: def __init__(self, d=None): if d is not None: From 050a6a798cec90ae2f881c2ddd3f0221e69907dc Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sat, 8 Oct 2022 23:26:48 +0300 Subject: [PATCH 188/460] support loading .yaml config with same name as model support EMA weights in processing (????) --- modules/processing.py | 2 +- modules/sd_models.py | 30 +++++++++++++++++++++++------- 2 files changed, 24 insertions(+), 8 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 31220881e..4fea6d567 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -347,7 +347,7 @@ def process_images(p: StableDiffusionProcessing) -> Processed: infotexts = [] output_images = [] - with torch.no_grad(): + with torch.no_grad(), p.sd_model.ema_scope(): with devices.autocast(): p.init(all_prompts, all_seeds, all_subseeds) diff --git a/modules/sd_models.py b/modules/sd_models.py index a09866ce6..cb3982b16 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -14,7 +14,7 @@ from modules.paths import models_path model_dir = "Stable-diffusion" model_path = os.path.abspath(os.path.join(models_path, model_dir)) -CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name']) +CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name', 'config']) checkpoints_list = {} try: @@ -63,14 +63,20 @@ def list_models(): if os.path.exists(cmd_ckpt): h = model_hash(cmd_ckpt) title, short_model_name = modeltitle(cmd_ckpt, h) - checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name) + checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name, shared.cmd_opts.config) shared.opts.data['sd_model_checkpoint'] = title elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file: print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr) for filename in model_list: h = model_hash(filename) title, short_model_name = modeltitle(filename, h) - checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name) + + basename, _ = os.path.splitext(filename) + config = basename + ".yaml" + if not os.path.exists(config): + config = shared.cmd_opts.config + + checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name, config) def get_closet_checkpoint_match(searchString): @@ -116,7 +122,10 @@ def select_checkpoint(): return checkpoint_info -def load_model_weights(model, checkpoint_file, sd_model_hash): +def load_model_weights(model, checkpoint_info): + checkpoint_file = checkpoint_info.filename + sd_model_hash = checkpoint_info.hash + print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}") pl_sd = torch.load(checkpoint_file, map_location="cpu") @@ -148,15 +157,19 @@ def load_model_weights(model, checkpoint_file, sd_model_hash): model.sd_model_hash = sd_model_hash model.sd_model_checkpoint = checkpoint_file + model.sd_checkpoint_info = checkpoint_info def load_model(): from modules import lowvram, sd_hijack checkpoint_info = select_checkpoint() - sd_config = OmegaConf.load(shared.cmd_opts.config) + if checkpoint_info.config != shared.cmd_opts.config: + print(f"Loading config from: {shared.cmd_opts.config}") + + sd_config = OmegaConf.load(checkpoint_info.config) sd_model = instantiate_from_config(sd_config.model) - load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash) + load_model_weights(sd_model, checkpoint_info) if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: lowvram.setup_for_low_vram(sd_model, shared.cmd_opts.medvram) @@ -178,6 +191,9 @@ def reload_model_weights(sd_model, info=None): if sd_model.sd_model_checkpoint == checkpoint_info.filename: return + if sd_model.sd_checkpoint_info.config != checkpoint_info.config: + return load_model() + if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: lowvram.send_everything_to_cpu() else: @@ -185,7 +201,7 @@ def reload_model_weights(sd_model, info=None): sd_hijack.model_hijack.undo_hijack(sd_model) - load_model_weights(sd_model, checkpoint_info.filename, checkpoint_info.hash) + load_model_weights(sd_model, checkpoint_info) sd_hijack.model_hijack.hijack(sd_model) From 5841990b0df04906da7321beef6f7f7902b7d57b Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 9 Oct 2022 05:38:38 +0100 Subject: [PATCH 189/460] Update textual_inversion.py --- .../textual_inversion/textual_inversion.py | 25 ++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index cd9f34984..f63160208 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -7,6 +7,9 @@ import tqdm import html import datetime +from PIL import Image, PngImagePlugin +import base64 +from io import BytesIO from modules import shared, devices, sd_hijack, processing, sd_models import modules.textual_inversion.dataset @@ -80,7 +83,15 @@ class EmbeddingDatabase: def process_file(path, filename): name = os.path.splitext(filename)[0] - data = torch.load(path, map_location="cpu") + data = [] + + if filename.upper().endswith('.PNG'): + embed_image = Image.open(path) + if 'sd-embedding' in embed_image.text: + embeddingData = base64.b64decode(embed_image.text['sd-embedding']) + data = torch.load(BytesIO(embeddingData), map_location="cpu") + else: + data = torch.load(path, map_location="cpu") # textual inversion embeddings if 'string_to_param' in data: @@ -156,7 +167,7 @@ def create_embedding(name, num_vectors_per_token, init_text='*'): return fn -def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps, create_image_every, save_embedding_every, template_file): +def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps, create_image_every, save_embedding_every, template_file, save_image_with_stored_embedding): assert embedding_name, 'embedding not selected' shared.state.textinfo = "Initializing textual inversion training..." @@ -244,7 +255,15 @@ def train_embedding(embedding_name, learn_rate, data_root, log_directory, steps, image = processed.images[0] shared.state.current_image = image - image.save(last_saved_image) + + if save_image_with_stored_embedding: + info = PngImagePlugin.PngInfo() + info.add_text("sd-embedding", base64.b64encode(open(last_saved_file,'rb').read())) + image.save(last_saved_image, "PNG", pnginfo=info) + else: + image.save(last_saved_image) + + last_saved_image += f", prompt: {text}" From cd8673bd9b2e59bddefee8d307340d643695fe11 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 9 Oct 2022 05:40:57 +0100 Subject: [PATCH 190/460] add embed embedding to ui --- modules/ui.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index b51af1214..a59832041 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1001,7 +1001,8 @@ def create_ui(wrap_gradio_gpu_call): steps = gr.Number(label='Max steps', value=100000, precision=0) create_image_every = gr.Number(label='Save an image to log directory every N steps, 0 to disable', value=500, precision=0) save_embedding_every = gr.Number(label='Save a copy of embedding to log directory every N steps, 0 to disable', value=500, precision=0) - + save_image_with_stored_embedding = gr.Checkbox(label='Save images with embedding in PNG chunks', value=True) + with gr.Row(): with gr.Column(scale=2): gr.HTML(value="") @@ -1063,6 +1064,7 @@ def create_ui(wrap_gradio_gpu_call): create_image_every, save_embedding_every, template_file, + save_image_with_stored_embedding, ], outputs=[ ti_output, From c77c89cc83c618472ad352cf8a28fde28c3a1377 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 9 Oct 2022 10:23:31 +0300 Subject: [PATCH 191/460] make main model loading and model merger use the same code --- modules/extras.py | 6 +++--- modules/sd_models.py | 14 +++++++++----- 2 files changed, 12 insertions(+), 8 deletions(-) diff --git a/modules/extras.py b/modules/extras.py index 1d9e64e55..ef6e6de7a 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -169,9 +169,9 @@ def run_modelmerger(primary_model_name, secondary_model_name, interp_method, int print(f"Loading {secondary_model_info.filename}...") secondary_model = torch.load(secondary_model_info.filename, map_location='cpu') - - theta_0 = primary_model['state_dict'] - theta_1 = secondary_model['state_dict'] + + theta_0 = sd_models.get_state_dict_from_checkpoint(primary_model) + theta_1 = sd_models.get_state_dict_from_checkpoint(secondary_model) theta_funcs = { "Weighted Sum": weighted_sum, diff --git a/modules/sd_models.py b/modules/sd_models.py index cb3982b16..18fb8c2ed 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -122,6 +122,13 @@ def select_checkpoint(): return checkpoint_info +def get_state_dict_from_checkpoint(pl_sd): + if "state_dict" in pl_sd: + return pl_sd["state_dict"] + + return pl_sd + + def load_model_weights(model, checkpoint_info): checkpoint_file = checkpoint_info.filename sd_model_hash = checkpoint_info.hash @@ -131,11 +138,8 @@ def load_model_weights(model, checkpoint_info): pl_sd = torch.load(checkpoint_file, map_location="cpu") if "global_step" in pl_sd: print(f"Global Step: {pl_sd['global_step']}") - - if "state_dict" in pl_sd: - sd = pl_sd["state_dict"] - else: - sd = pl_sd + + sd = get_state_dict_from_checkpoint(pl_sd) model.load_state_dict(sd, strict=False) From 4e569fd888f8e3c5632a072d51abbb6e4d17abd6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 9 Oct 2022 10:31:47 +0300 Subject: [PATCH 192/460] fixed incorrect message about loading config; thanks anon! --- modules/sd_models.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 18fb8c2ed..2101b18da 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -169,7 +169,7 @@ def load_model(): checkpoint_info = select_checkpoint() if checkpoint_info.config != shared.cmd_opts.config: - print(f"Loading config from: {shared.cmd_opts.config}") + print(f"Loading config from: {checkpoint_info.config}") sd_config = OmegaConf.load(checkpoint_info.config) sd_model = instantiate_from_config(sd_config.model) From 5ab7e88d9b0bb0125af9f7237242a00a93360ce5 Mon Sep 17 00:00:00 2001 From: aoirusann <82883326+aoirusann@users.noreply.github.com> Date: Sat, 8 Oct 2022 13:09:29 +0800 Subject: [PATCH 193/460] Add `Download` & `Download as zip` --- modules/ui.py | 39 ++++++++++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index b51af1214..fe7f10a73 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -98,9 +98,10 @@ def send_gradio_gallery_to_image(x): return image_from_url_text(x[0]) -def save_files(js_data, images, index): +def save_files(js_data, images, do_make_zip, index): import csv filenames = [] + fullfns = [] #quick dictionary to class object conversion. Its necessary due apply_filename_pattern requiring it class MyObject: @@ -141,10 +142,22 @@ def save_files(js_data, images, index): filename = os.path.relpath(fullfn, path) filenames.append(filename) + fullfns.append(fullfn) writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]]) - return '', '', plaintext_to_html(f"Saved: {filenames[0]}") + # Make Zip + if do_make_zip: + zip_filepath = os.path.join(path, "images.zip") + + from zipfile import ZipFile + with ZipFile(zip_filepath, "w") as zip_file: + for i in range(len(fullfns)): + with open(fullfns[i], mode="rb") as f: + zip_file.writestr(filenames[i], f.read()) + fullfns.insert(0, zip_filepath) + + return fullfns, '', '', plaintext_to_html(f"Saved: {filenames[0]}") def wrap_gradio_call(func, extra_outputs=None): @@ -521,6 +534,12 @@ def create_ui(wrap_gradio_gpu_call): button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder' open_txt2img_folder = gr.Button(folder_symbol, elem_id=button_id) + with gr.Row(): + do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False) + + with gr.Row(): + download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False) + with gr.Group(): html_info = gr.HTML() generation_info = gr.Textbox(visible=False) @@ -570,13 +589,15 @@ def create_ui(wrap_gradio_gpu_call): save.click( fn=wrap_gradio_call(save_files), - _js="(x, y, z) => [x, y, selected_gallery_index()]", + _js="(x, y, z, w) => [x, y, z, selected_gallery_index()]", inputs=[ generation_info, txt2img_gallery, + do_make_zip, html_info, ], outputs=[ + download_files, html_info, html_info, html_info, @@ -701,6 +722,12 @@ def create_ui(wrap_gradio_gpu_call): button_id = "hidden_element" if shared.cmd_opts.hide_ui_dir_config else 'open_folder' open_img2img_folder = gr.Button(folder_symbol, elem_id=button_id) + with gr.Row(): + do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False) + + with gr.Row(): + download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False) + with gr.Group(): html_info = gr.HTML() generation_info = gr.Textbox(visible=False) @@ -776,13 +803,15 @@ def create_ui(wrap_gradio_gpu_call): save.click( fn=wrap_gradio_call(save_files), - _js="(x, y, z) => [x, y, selected_gallery_index()]", + _js="(x, y, z, w) => [x, y, z, selected_gallery_index()]", inputs=[ generation_info, img2img_gallery, - html_info + do_make_zip, + html_info, ], outputs=[ + download_files, html_info, html_info, html_info, From 14192c5b207b16b1ec7a4c9c4ea538d1a6811a4d Mon Sep 17 00:00:00 2001 From: aoirusann Date: Sun, 9 Oct 2022 13:01:10 +0800 Subject: [PATCH 194/460] Support `Download` for txt files. --- modules/images.py | 39 +++++++++++++++++++++++++++++++++++++-- modules/ui.py | 5 ++++- 2 files changed, 41 insertions(+), 3 deletions(-) diff --git a/modules/images.py b/modules/images.py index 29c5ee249..c0a906762 100644 --- a/modules/images.py +++ b/modules/images.py @@ -349,6 +349,38 @@ def get_next_sequence_number(path, basename): def save_image(image, path, basename, seed=None, prompt=None, extension='png', info=None, short_filename=False, no_prompt=False, grid=False, pnginfo_section_name='parameters', p=None, existing_info=None, forced_filename=None, suffix="", save_to_dirs=None): + '''Save an image. + + Args: + image (`PIL.Image`): + The image to be saved. + path (`str`): + The directory to save the image. Note, the option `save_to_dirs` will make the image to be saved into a sub directory. + basename (`str`): + The base filename which will be applied to `filename pattern`. + seed, prompt, short_filename, + extension (`str`): + Image file extension, default is `png`. + pngsectionname (`str`): + Specify the name of the section which `info` will be saved in. + info (`str` or `PngImagePlugin.iTXt`): + PNG info chunks. + existing_info (`dict`): + Additional PNG info. `existing_info == {pngsectionname: info, ...}` + no_prompt: + TODO I don't know its meaning. + p (`StableDiffusionProcessing`) + forced_filename (`str`): + If specified, `basename` and filename pattern will be ignored. + save_to_dirs (bool): + If true, the image will be saved into a subdirectory of `path`. + + Returns: (fullfn, txt_fullfn) + fullfn (`str`): + The full path of the saved imaged. + txt_fullfn (`str` or None): + If a text file is saved for this image, this will be its full path. Otherwise None. + ''' if short_filename or prompt is None or seed is None: file_decoration = "" elif opts.save_to_dirs: @@ -424,7 +456,10 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i piexif.insert(exif_bytes(), fullfn_without_extension + ".jpg") if opts.save_txt and info is not None: - with open(f"{fullfn_without_extension}.txt", "w", encoding="utf8") as file: + txt_fullfn = f"{fullfn_without_extension}.txt" + with open(txt_fullfn, "w", encoding="utf8") as file: file.write(info + "\n") + else: + txt_fullfn = None - return fullfn + return fullfn, txt_fullfn diff --git a/modules/ui.py b/modules/ui.py index fe7f10a73..debd8873b 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -138,11 +138,14 @@ def save_files(js_data, images, do_make_zip, index): is_grid = image_index < p.index_of_first_image i = 0 if is_grid else (image_index - p.index_of_first_image) - fullfn = save_image(image, path, "", seed=p.all_seeds[i], prompt=p.all_prompts[i], extension=extension, info=p.infotexts[image_index], grid=is_grid, p=p, save_to_dirs=save_to_dirs) + fullfn, txt_fullfn = save_image(image, path, "", seed=p.all_seeds[i], prompt=p.all_prompts[i], extension=extension, info=p.infotexts[image_index], grid=is_grid, p=p, save_to_dirs=save_to_dirs) filename = os.path.relpath(fullfn, path) filenames.append(filename) fullfns.append(fullfn) + if txt_fullfn: + filenames.append(os.path.basename(txt_fullfn)) + fullfns.append(txt_fullfn) writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]]) From 122d42687b97ec4df4c2a8c335d2de385cd1f1a1 Mon Sep 17 00:00:00 2001 From: Fampai Date: Sat, 8 Oct 2022 22:37:35 -0400 Subject: [PATCH 195/460] Fix VRAM Issue by only loading in hypernetwork when selected in settings --- modules/hypernetwork.py | 27 +++++++++++++++++---------- modules/sd_hijack_optimizations.py | 6 +++--- modules/shared.py | 7 ++----- webui.py | 3 +++ 4 files changed, 25 insertions(+), 18 deletions(-) diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py index 7f0622428..19f1c2270 100644 --- a/modules/hypernetwork.py +++ b/modules/hypernetwork.py @@ -40,27 +40,34 @@ class Hypernetwork: self.layers[size] = (HypernetworkModule(size, sd[0]), HypernetworkModule(size, sd[1])) -def load_hypernetworks(path): +def list_hypernetworks(path): res = {} - for filename in glob.iglob(os.path.join(path, '**/*.pt'), recursive=True): - try: - hn = Hypernetwork(filename) - res[hn.name] = hn - except Exception: - print(f"Error loading hypernetwork {filename}", file=sys.stderr) - print(traceback.format_exc(), file=sys.stderr) - + name = os.path.splitext(os.path.basename(filename))[0] + res[name] = filename return res +def load_hypernetwork(filename): + print(f"Loading hypernetwork {filename}") + path = shared.hypernetworks.get(filename, None) + if (path is not None): + try: + shared.loaded_hypernetwork = Hypernetwork(path) + except Exception: + print(f"Error loading hypernetwork {path}", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + else: + shared.loaded_hypernetwork = None + + def attention_CrossAttention_forward(self, x, context=None, mask=None): h = self.heads q = self.to_q(x) context = default(context, x) - hypernetwork = shared.selected_hypernetwork() + hypernetwork = shared.loaded_hypernetwork hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None) if hypernetwork_layers is not None: diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py index c4396bb9b..634fb4b24 100644 --- a/modules/sd_hijack_optimizations.py +++ b/modules/sd_hijack_optimizations.py @@ -28,7 +28,7 @@ def split_cross_attention_forward_v1(self, x, context=None, mask=None): q_in = self.to_q(x) context = default(context, x) - hypernetwork = shared.selected_hypernetwork() + hypernetwork = shared.loaded_hypernetwork hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None) if hypernetwork_layers is not None: @@ -68,7 +68,7 @@ def split_cross_attention_forward(self, x, context=None, mask=None): q_in = self.to_q(x) context = default(context, x) - hypernetwork = shared.selected_hypernetwork() + hypernetwork = shared.loaded_hypernetwork hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None) if hypernetwork_layers is not None: @@ -132,7 +132,7 @@ def xformers_attention_forward(self, x, context=None, mask=None): h = self.heads q_in = self.to_q(x) context = default(context, x) - hypernetwork = shared.selected_hypernetwork() + hypernetwork = shared.loaded_hypernetwork hypernetwork_layers = (hypernetwork.layers if hypernetwork is not None else {}).get(context.shape[2], None) if hypernetwork_layers is not None: k_in = self.to_k(hypernetwork_layers[0](context)) diff --git a/modules/shared.py b/modules/shared.py index b2c76a323..9dce6cb7b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -79,11 +79,8 @@ parallel_processing_allowed = not cmd_opts.lowvram and not cmd_opts.medvram xformers_available = False config_filename = cmd_opts.ui_settings_file -hypernetworks = hypernetwork.load_hypernetworks(os.path.join(models_path, 'hypernetworks')) - - -def selected_hypernetwork(): - return hypernetworks.get(opts.sd_hypernetwork, None) +hypernetworks = hypernetwork.list_hypernetworks(os.path.join(models_path, 'hypernetworks')) +loaded_hypernetwork = None class State: diff --git a/webui.py b/webui.py index 18de8e165..270584f77 100644 --- a/webui.py +++ b/webui.py @@ -82,6 +82,9 @@ modules.scripts.load_scripts(os.path.join(script_path, "scripts")) shared.sd_model = modules.sd_models.load_model() shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights(shared.sd_model))) +loaded_hypernetwork = modules.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork) +shared.opts.onchange("sd_hypernetwork", wrap_queued_call(lambda: modules.hypernetwork.load_hypernetwork(shared.opts.sd_hypernetwork))) + def webui(): # make the program just exit at ctrl+c without waiting for anything From 03e570886f430f39020e504aba057a95f2e62484 Mon Sep 17 00:00:00 2001 From: frostydad <64224601+Cyberes@users.noreply.github.com> Date: Sat, 8 Oct 2022 18:13:13 -0600 Subject: [PATCH 196/460] Fix incorrect sampler name in output --- modules/processing.py | 9 ++++++++- scripts/xy_grid.py | 16 +++++++++------- 2 files changed, 17 insertions(+), 8 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 4fea6d567..6b8664a07 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1,3 +1,4 @@ + import json import math import os @@ -46,6 +47,12 @@ def apply_color_correction(correction, image): return image +def get_correct_sampler(p): + if isinstance(p, modules.processing.StableDiffusionProcessingTxt2Img): + return sd_samplers.samplers + elif isinstance(p, modules.processing.StableDiffusionProcessingImg2Img): + return sd_samplers.samplers_for_img2img + class StableDiffusionProcessing: def __init__(self, sd_model=None, outpath_samples=None, outpath_grids=None, prompt="", styles=None, seed=-1, subseed=-1, subseed_strength=0, seed_resize_from_h=-1, seed_resize_from_w=-1, seed_enable_extras=True, sampler_index=0, batch_size=1, n_iter=1, steps=50, cfg_scale=7.0, width=512, height=512, restore_faces=False, tiling=False, do_not_save_samples=False, do_not_save_grid=False, extra_generation_params=None, overlay_images=None, negative_prompt=None, eta=None): self.sd_model = sd_model @@ -272,7 +279,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration generation_params = { "Steps": p.steps, - "Sampler": sd_samplers.samplers[p.sampler_index].name, + "Sampler": get_correct_sampler(p)[p.sampler_index].name, "CFG scale": p.cfg_scale, "Seed": all_seeds[index], "Face restoration": (opts.face_restoration_model if p.restore_faces else None), diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index c0c364df8..26ae2199d 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -11,7 +11,7 @@ import modules.scripts as scripts import gradio as gr from modules import images -from modules.processing import process_images, Processed +from modules.processing import process_images, Processed, get_correct_sampler from modules.shared import opts, cmd_opts, state import modules.shared as shared import modules.sd_samplers @@ -56,15 +56,17 @@ def apply_order(p, x, xs): p.prompt = prompt_tmp + p.prompt -samplers_dict = {} -for i, sampler in enumerate(modules.sd_samplers.samplers): - samplers_dict[sampler.name.lower()] = i - for alias in sampler.aliases: - samplers_dict[alias.lower()] = i +def build_samplers_dict(p): + samplers_dict = {} + for i, sampler in enumerate(get_correct_sampler(p)): + samplers_dict[sampler.name.lower()] = i + for alias in sampler.aliases: + samplers_dict[alias.lower()] = i + return samplers_dict def apply_sampler(p, x, xs): - sampler_index = samplers_dict.get(x.lower(), None) + sampler_index = build_samplers_dict(p).get(x.lower(), None) if sampler_index is None: raise RuntimeError(f"Unknown sampler: {x}") From ef93acdc731b7a2b3c13651b6de1bce58af989d4 Mon Sep 17 00:00:00 2001 From: frostydad <64224601+Cyberes@users.noreply.github.com> Date: Sat, 8 Oct 2022 18:15:35 -0600 Subject: [PATCH 197/460] remove line break --- modules/processing.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 6b8664a07..7fa1144e6 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -1,4 +1,3 @@ - import json import math import os From 1ffeb42d38d9276dc28918189d32f60d593a162c Mon Sep 17 00:00:00 2001 From: Nicolas Noullet Date: Sun, 9 Oct 2022 00:18:45 +0200 Subject: [PATCH 198/460] Fix typo --- modules/shared.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/shared.py b/modules/shared.py index 9dce6cb7b..dffa0094b 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -238,7 +238,7 @@ options_templates.update(options_section(('interrogate', "Interrogate Options"), options_templates.update(options_section(('ui', "User interface"), { "show_progressbar": OptionInfo(True, "Show progressbar"), - "show_progress_every_n_steps": OptionInfo(0, "Show show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}), + "show_progress_every_n_steps": OptionInfo(0, "Show image creation progress every N sampling steps. Set 0 to disable.", gr.Slider, {"minimum": 0, "maximum": 32, "step": 1}), "return_grid": OptionInfo(True, "Show grid in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), From e2930f9821c197da94e208b5ae73711002844efc Mon Sep 17 00:00:00 2001 From: Tony Beeman Date: Fri, 7 Oct 2022 17:46:39 -0700 Subject: [PATCH 199/460] Fix for Prompts_from_file showing extra textbox. --- modules/scripts.py | 30 ++++++++++++++++++++++++++---- scripts/prompts_from_file.py | 4 ++++ 2 files changed, 30 insertions(+), 4 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index 45230f9a1..d8f87927e 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -1,4 +1,5 @@ import os +from pydoc import visiblename import sys import traceback @@ -31,6 +32,15 @@ class Script: def show(self, is_img2img): return True + + # Called when the ui for this script has been shown. + # Useful for hiding some controls, since the scripts module sets visibility to + # everything to true. The parameters will be the parameters returned by the ui method + # The return value should be gradio updates, similar to what you would return + # from a Gradio event handler. + def on_show(self, *args): + return [ui.gr_show(True)] * len(args) + # This is where the additional processing is implemented. The parameters include # self, the model object "p" (a StableDiffusionProcessing class, see # processing.py), and the parameters returned by the ui method. @@ -125,20 +135,32 @@ class ScriptRunner: inputs += controls script.args_to = len(inputs) - def select_script(script_index): + def select_script(*args): + script_index = args[0] + on_show_updates = [] if 0 < script_index <= len(self.scripts): script = self.scripts[script_index-1] args_from = script.args_from args_to = script.args_to + script_args = args[args_from:args_to] + on_show_updates = wrap_call(script.on_show, script.filename, "on_show", *script_args) else: args_from = 0 args_to = 0 - return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))] + ret = [ ui.gr_show(True)] # always show the dropdown + for i in range(1, len(inputs)): + if (args_from <= i < args_to): + ret.append( on_show_updates[i - args_from] ) + else: + ret.append(ui.gr_show(False)) + return ret + + # return [ui.gr_show(True if (i == 0) else on_show_updates[i - args_from] if args_from <= i < args_to else False) for i in range(len(inputs))] dropdown.change( fn=select_script, - inputs=[dropdown], + inputs=inputs, outputs=inputs ) @@ -198,4 +220,4 @@ def reload_scripts(basedir): load_scripts(basedir) scripts_txt2img = ScriptRunner() - scripts_img2img = ScriptRunner() + scripts_img2img = ScriptRunner() \ No newline at end of file diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 513d9a1c5..110889a66 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -10,6 +10,7 @@ from modules.processing import Processed, process_images from PIL import Image from modules.shared import opts, cmd_opts, state +g_txt_mode = False class Script(scripts.Script): def title(self): @@ -29,6 +30,9 @@ class Script(scripts.Script): checkbox_txt.change(fn=lambda x: [gr.File.update(visible = not x), gr.TextArea.update(visible = x)], inputs=[checkbox_txt], outputs=[file, prompt_txt]) return [checkbox_txt, file, prompt_txt] + def on_show(self, checkbox_txt, file, prompt_txt): + return [ gr.Checkbox.update(visible = True), gr.File.update(visible = not checkbox_txt), gr.TextArea.update(visible = checkbox_txt) ] + def run(self, p, checkbox_txt, data: bytes, prompt_txt: str): if (checkbox_txt): lines = [x.strip() for x in prompt_txt.splitlines()] From 86cb16886f8f48169cee4658ad0c5e5443beed2a Mon Sep 17 00:00:00 2001 From: Tony Beeman Date: Fri, 7 Oct 2022 23:51:50 -0700 Subject: [PATCH 200/460] Pull Request Code Review Fixes --- modules/scripts.py | 1 - scripts/prompts_from_file.py | 2 -- 2 files changed, 3 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index d8f87927e..8dfd4de94 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -1,5 +1,4 @@ import os -from pydoc import visiblename import sys import traceback diff --git a/scripts/prompts_from_file.py b/scripts/prompts_from_file.py index 110889a66..b24f1a806 100644 --- a/scripts/prompts_from_file.py +++ b/scripts/prompts_from_file.py @@ -10,8 +10,6 @@ from modules.processing import Processed, process_images from PIL import Image from modules.shared import opts, cmd_opts, state -g_txt_mode = False - class Script(scripts.Script): def title(self): return "Prompts from file or textbox" From cbf6dad02d04d98e5a2d5e870777ab99b5796b2d Mon Sep 17 00:00:00 2001 From: Tony Beeman Date: Sat, 8 Oct 2022 10:40:30 -0700 Subject: [PATCH 201/460] Handle case where on_show returns the wrong number of arguments --- modules/scripts.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index 8dfd4de94..7d89979d7 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -143,6 +143,8 @@ class ScriptRunner: args_to = script.args_to script_args = args[args_from:args_to] on_show_updates = wrap_call(script.on_show, script.filename, "on_show", *script_args) + if (len(on_show_updates) != (args_to - args_from)): + print("Error in custom script (" + script.filename + "): on_show() method should return the same number of arguments as ui().", file=sys.stderr) else: args_from = 0 args_to = 0 @@ -150,13 +152,14 @@ class ScriptRunner: ret = [ ui.gr_show(True)] # always show the dropdown for i in range(1, len(inputs)): if (args_from <= i < args_to): - ret.append( on_show_updates[i - args_from] ) + if (i - args_from) < len(on_show_updates): + ret.append( on_show_updates[i - args_from] ) + else: + ret.append(ui.gr_show(True)) else: ret.append(ui.gr_show(False)) return ret - # return [ui.gr_show(True if (i == 0) else on_show_updates[i - args_from] if args_from <= i < args_to else False) for i in range(len(inputs))] - dropdown.change( fn=select_script, inputs=inputs, From ab4fe4f44c3d2675a351269fe2ff1ddeac557aa6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 9 Oct 2022 11:59:41 +0300 Subject: [PATCH 202/460] hide filenames for save button by default --- modules/ui.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 8071b1cb6..e1ab26658 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -162,7 +162,7 @@ def save_files(js_data, images, do_make_zip, index): zip_file.writestr(filenames[i], f.read()) fullfns.insert(0, zip_filepath) - return fullfns, '', '', plaintext_to_html(f"Saved: {filenames[0]}") + return gr.File.update(value=fullfns, visible=True), '', '', plaintext_to_html(f"Saved: {filenames[0]}") def wrap_gradio_call(func, extra_outputs=None): @@ -553,7 +553,7 @@ def create_ui(wrap_gradio_gpu_call): do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False) with gr.Row(): - download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False) + download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False) with gr.Group(): html_info = gr.HTML() @@ -741,7 +741,7 @@ def create_ui(wrap_gradio_gpu_call): do_make_zip = gr.Checkbox(label="Make Zip when Save?", value=False) with gr.Row(): - download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False) + download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False) with gr.Group(): html_info = gr.HTML() From 0241d811d23427b99f6b1eda1540bdf8d87963d5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 9 Oct 2022 12:04:44 +0300 Subject: [PATCH 203/460] Revert "Fix for Prompts_from_file showing extra textbox." This reverts commit e2930f9821c197da94e208b5ae73711002844efc. --- modules/scripts.py | 32 ++++---------------------------- 1 file changed, 4 insertions(+), 28 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index 7d89979d7..45230f9a1 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -31,15 +31,6 @@ class Script: def show(self, is_img2img): return True - - # Called when the ui for this script has been shown. - # Useful for hiding some controls, since the scripts module sets visibility to - # everything to true. The parameters will be the parameters returned by the ui method - # The return value should be gradio updates, similar to what you would return - # from a Gradio event handler. - def on_show(self, *args): - return [ui.gr_show(True)] * len(args) - # This is where the additional processing is implemented. The parameters include # self, the model object "p" (a StableDiffusionProcessing class, see # processing.py), and the parameters returned by the ui method. @@ -134,35 +125,20 @@ class ScriptRunner: inputs += controls script.args_to = len(inputs) - def select_script(*args): - script_index = args[0] - on_show_updates = [] + def select_script(script_index): if 0 < script_index <= len(self.scripts): script = self.scripts[script_index-1] args_from = script.args_from args_to = script.args_to - script_args = args[args_from:args_to] - on_show_updates = wrap_call(script.on_show, script.filename, "on_show", *script_args) - if (len(on_show_updates) != (args_to - args_from)): - print("Error in custom script (" + script.filename + "): on_show() method should return the same number of arguments as ui().", file=sys.stderr) else: args_from = 0 args_to = 0 - ret = [ ui.gr_show(True)] # always show the dropdown - for i in range(1, len(inputs)): - if (args_from <= i < args_to): - if (i - args_from) < len(on_show_updates): - ret.append( on_show_updates[i - args_from] ) - else: - ret.append(ui.gr_show(True)) - else: - ret.append(ui.gr_show(False)) - return ret + return [ui.gr_show(True if i == 0 else args_from <= i < args_to) for i in range(len(inputs))] dropdown.change( fn=select_script, - inputs=inputs, + inputs=[dropdown], outputs=inputs ) @@ -222,4 +198,4 @@ def reload_scripts(basedir): load_scripts(basedir) scripts_txt2img = ScriptRunner() - scripts_img2img = ScriptRunner() \ No newline at end of file + scripts_img2img = ScriptRunner() From 6f6798ddabe10d320fe8ea05edf0fdcef0c51a8e Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 9 Oct 2022 12:33:37 +0300 Subject: [PATCH 204/460] prevent a possible code execution error (thanks, RyotaK) --- modules/ui.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/modules/ui.py b/modules/ui.py index e1ab26658..dad509f3a 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -1153,6 +1153,15 @@ def create_ui(wrap_gradio_gpu_call): component_dict = {} def open_folder(f): + if not os.path.isdir(f): + print(f""" +WARNING +An open_folder request was made with an argument that is not a folder. +This could be an error or a malicious attempt to run code on your computer. +Requested path was: {f} +""", file=sys.stderr) + return + if not shared.cmd_opts.hide_ui_dir_config: path = os.path.normpath(f) if platform.system() == "Windows": From d74c38108f95e44d83a1706ee5ab218124972868 Mon Sep 17 00:00:00 2001 From: Jesse Williams <33797815+xram64@users.noreply.github.com> Date: Sat, 8 Oct 2022 01:30:49 -0400 Subject: [PATCH 205/460] Confirm that options are valid before starting When using the 'Sampler' or 'Checkpoint' options, if one of the entered names has a typo, an error will only be thrown once the `draw_xy_grid` loop reaches that name. This can waste a lot of time for large grids with a typo near the end of a list, since the script needs to start over and re-generate any earlier images to finish making the grid. Also fixing typo in variable name in `draw_xy_grid`. --- scripts/xy_grid.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 26ae2199d..07040886a 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -145,7 +145,7 @@ def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend): ver_texts = [[images.GridAnnotation(y)] for y in y_labels] hor_texts = [[images.GridAnnotation(x)] for x in x_labels] - first_pocessed = None + first_processed = None state.job_count = len(xs) * len(ys) * p.n_iter @@ -154,8 +154,8 @@ def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend): state.job = f"{ix + iy * len(xs) + 1} out of {len(xs) * len(ys)}" processed = cell(x, y) - if first_pocessed is None: - first_pocessed = processed + if first_processed is None: + first_processed = processed try: res.append(processed.images[0]) @@ -166,9 +166,9 @@ def draw_xy_grid(p, xs, ys, x_labels, y_labels, cell, draw_legend): if draw_legend: grid = images.draw_grid_annotations(grid, res[0].width, res[0].height, hor_texts, ver_texts) - first_pocessed.images = [grid] + first_processed.images = [grid] - return first_pocessed + return first_processed re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*") @@ -216,7 +216,6 @@ class Script(scripts.Script): m = re_range.fullmatch(val) mc = re_range_count.fullmatch(val) if m is not None: - start = int(m.group(1)) end = int(m.group(2))+1 step = int(m.group(3)) if m.group(3) is not None else 1 @@ -258,6 +257,16 @@ class Script(scripts.Script): valslist = list(permutations(valslist)) valslist = [opt.type(x) for x in valslist] + + # Confirm options are valid before starting + if opt.label == "Sampler": + for sampler_val in valslist: + if sampler_val.lower() not in samplers_dict.keys(): + raise RuntimeError(f"Unknown sampler: {sampler_val}") + elif opt.label == "Checkpoint name": + for ckpt_val in valslist: + if modules.sd_models.get_closet_checkpoint_match(ckpt_val) is None: + raise RuntimeError(f"Checkpoint for {ckpt_val} not found") return valslist From a65a45272e8f26ee3bc52a5300b396266508a9a5 Mon Sep 17 00:00:00 2001 From: Brendan Byrd Date: Thu, 6 Oct 2022 19:31:36 -0400 Subject: [PATCH 206/460] Don't change the seed initially if "Keep -1 for seeds" is checked Fixes #1049 --- scripts/xy_grid.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index 07040886a..a8f53befe 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -198,7 +198,9 @@ class Script(scripts.Script): return [x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds] def run(self, p, x_type, x_values, y_type, y_values, draw_legend, no_fixed_seeds): - modules.processing.fix_seed(p) + if not no_fixed_seeds: + modules.processing.fix_seed(p) + p.batch_size = 1 initial_hn = opts.sd_hypernetwork From 0609ce06c0778536cb368ac3867292f87c6d9fc7 Mon Sep 17 00:00:00 2001 From: Milly Date: Fri, 7 Oct 2022 03:36:08 +0900 Subject: [PATCH 207/460] Removed duplicate definition model_path --- modules/bsrgan_model.py | 2 -- modules/esrgan_model.py | 2 -- modules/ldsr_model.py | 2 -- modules/realesrgan_model.py | 2 -- modules/scunet_model.py | 2 -- modules/swinir_model.py | 2 -- modules/upscaler.py | 7 ++++--- 7 files changed, 4 insertions(+), 15 deletions(-) diff --git a/modules/bsrgan_model.py b/modules/bsrgan_model.py index 3bd80791a..737e1a761 100644 --- a/modules/bsrgan_model.py +++ b/modules/bsrgan_model.py @@ -10,13 +10,11 @@ from basicsr.utils.download_util import load_file_from_url import modules.upscaler from modules import devices, modelloader from modules.bsrgan_model_arch import RRDBNet -from modules.paths import models_path class UpscalerBSRGAN(modules.upscaler.Upscaler): def __init__(self, dirname): self.name = "BSRGAN" - self.model_path = os.path.join(models_path, self.name) self.model_name = "BSRGAN 4x" self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/BSRGAN.pth" self.user_path = dirname diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index 285481242..3970e6e47 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -7,7 +7,6 @@ from basicsr.utils.download_util import load_file_from_url import modules.esrgam_model_arch as arch from modules import shared, modelloader, images, devices -from modules.paths import models_path from modules.upscaler import Upscaler, UpscalerData from modules.shared import opts @@ -76,7 +75,6 @@ class UpscalerESRGAN(Upscaler): self.model_name = "ESRGAN_4x" self.scalers = [] self.user_path = dirname - self.model_path = os.path.join(models_path, self.name) super().__init__() model_paths = self.find_models(ext_filter=[".pt", ".pth"]) scalers = [] diff --git a/modules/ldsr_model.py b/modules/ldsr_model.py index 1c1070fc6..8c4db44ad 100644 --- a/modules/ldsr_model.py +++ b/modules/ldsr_model.py @@ -7,13 +7,11 @@ from basicsr.utils.download_util import load_file_from_url from modules.upscaler import Upscaler, UpscalerData from modules.ldsr_model_arch import LDSR from modules import shared -from modules.paths import models_path class UpscalerLDSR(Upscaler): def __init__(self, user_path): self.name = "LDSR" - self.model_path = os.path.join(models_path, self.name) self.user_path = user_path self.model_url = "https://heibox.uni-heidelberg.de/f/578df07c8fc04ffbadf3/?dl=1" self.yaml_url = "https://heibox.uni-heidelberg.de/f/31a76b13ea27482981b4/?dl=1" diff --git a/modules/realesrgan_model.py b/modules/realesrgan_model.py index dc0123e02..3ac0b97ae 100644 --- a/modules/realesrgan_model.py +++ b/modules/realesrgan_model.py @@ -8,14 +8,12 @@ from basicsr.utils.download_util import load_file_from_url from realesrgan import RealESRGANer from modules.upscaler import Upscaler, UpscalerData -from modules.paths import models_path from modules.shared import cmd_opts, opts class UpscalerRealESRGAN(Upscaler): def __init__(self, path): self.name = "RealESRGAN" - self.model_path = os.path.join(models_path, self.name) self.user_path = path super().__init__() try: diff --git a/modules/scunet_model.py b/modules/scunet_model.py index fb64b7409..36a996bf0 100644 --- a/modules/scunet_model.py +++ b/modules/scunet_model.py @@ -9,14 +9,12 @@ from basicsr.utils.download_util import load_file_from_url import modules.upscaler from modules import devices, modelloader -from modules.paths import models_path from modules.scunet_model_arch import SCUNet as net class UpscalerScuNET(modules.upscaler.Upscaler): def __init__(self, dirname): self.name = "ScuNET" - self.model_path = os.path.join(models_path, self.name) self.model_name = "ScuNET GAN" self.model_name2 = "ScuNET PSNR" self.model_url = "https://github.com/cszn/KAIR/releases/download/v1.0/scunet_color_real_gan.pth" diff --git a/modules/swinir_model.py b/modules/swinir_model.py index 9bd454c69..fbd11f843 100644 --- a/modules/swinir_model.py +++ b/modules/swinir_model.py @@ -8,7 +8,6 @@ from basicsr.utils.download_util import load_file_from_url from tqdm import tqdm from modules import modelloader -from modules.paths import models_path from modules.shared import cmd_opts, opts, device from modules.swinir_model_arch import SwinIR as net from modules.upscaler import Upscaler, UpscalerData @@ -25,7 +24,6 @@ class UpscalerSwinIR(Upscaler): "/003_realSR_BSRGAN_DFOWMFC_s64w8_SwinIR" \ "-L_x4_GAN.pth " self.model_name = "SwinIR 4x" - self.model_path = os.path.join(models_path, self.name) self.user_path = dirname super().__init__() scalers = [] diff --git a/modules/upscaler.py b/modules/upscaler.py index d9d7c5e2a..34672be70 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -36,10 +36,11 @@ class Upscaler: self.half = not modules.shared.cmd_opts.no_half self.pre_pad = 0 self.mod_scale = None - if self.name is not None and create_dirs: + + if self.model_path is not None and self.name: self.model_path = os.path.join(models_path, self.name) - if not os.path.exists(self.model_path): - os.makedirs(self.model_path) + if self.model_path and create_dirs: + os.makedirs(self.model_path, exist_ok=True) try: import cv2 From bd833409ac7b8337040d521f6b65ced51e1b2ea8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 9 Oct 2022 13:10:15 +0300 Subject: [PATCH 208/460] additional changes for saving pnginfo for #1803 --- modules/extras.py | 4 ++++ modules/processing.py | 6 ++++-- 2 files changed, 8 insertions(+), 2 deletions(-) diff --git a/modules/extras.py b/modules/extras.py index ef6e6de7a..39dd38060 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -98,6 +98,10 @@ def run_extras(extras_mode, image, image_folder, gfpgan_visibility, codeformer_v no_prompt=True, grid=False, pnginfo_section_name="extras", existing_info=existing_pnginfo, forced_filename=image_name if opts.use_original_name_batch else None) + if opts.enable_pnginfo: + image.info = existing_pnginfo + image.info["extras"] = info + outputs.append(image) devices.torch_gc() diff --git a/modules/processing.py b/modules/processing.py index 7fa1144e6..2c9913170 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -451,7 +451,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: text = infotext(n, i) infotexts.append(text) - image.info["parameters"] = text + if opts.enable_pnginfo: + image.info["parameters"] = text output_images.append(image) del x_samples_ddim @@ -470,7 +471,8 @@ def process_images(p: StableDiffusionProcessing) -> Processed: if opts.return_grid: text = infotext() infotexts.insert(0, text) - grid.info["parameters"] = text + if opts.enable_pnginfo: + grid.info["parameters"] = text output_images.insert(0, grid) index_of_first_image = 1 From f4578b343ded3b8ccd1879ea0c0b3cdadfcc3a5f Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 9 Oct 2022 13:23:30 +0300 Subject: [PATCH 209/460] fix model switching not working properly if there is a different yaml config --- modules/sd_models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index 2101b18da..d0c74dd84 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -196,7 +196,8 @@ def reload_model_weights(sd_model, info=None): return if sd_model.sd_checkpoint_info.config != checkpoint_info.config: - return load_model() + shared.sd_model = load_model() + return shared.sd_model if shared.cmd_opts.lowvram or shared.cmd_opts.medvram: lowvram.send_everything_to_cpu() From 77a719648db515f10136e8b8483d5b16bda2eaeb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 9 Oct 2022 13:48:04 +0300 Subject: [PATCH 210/460] fix logic error in #1832 --- modules/upscaler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/upscaler.py b/modules/upscaler.py index 34672be70..6ab2fb408 100644 --- a/modules/upscaler.py +++ b/modules/upscaler.py @@ -37,7 +37,7 @@ class Upscaler: self.pre_pad = 0 self.mod_scale = None - if self.model_path is not None and self.name: + if self.model_path is None and self.name: self.model_path = os.path.join(models_path, self.name) if self.model_path and create_dirs: os.makedirs(self.model_path, exist_ok=True) From 542a3d3a4a00c1383fbdaf938ceefef87cf834bb Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 9 Oct 2022 14:33:22 +0300 Subject: [PATCH 211/460] fix btoken hypernetworks in XY plot --- modules/hypernetwork.py | 7 +++++-- scripts/xy_grid.py | 9 +++------ 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/modules/hypernetwork.py b/modules/hypernetwork.py index 19f1c2270..498bc9d8f 100644 --- a/modules/hypernetwork.py +++ b/modules/hypernetwork.py @@ -49,15 +49,18 @@ def list_hypernetworks(path): def load_hypernetwork(filename): - print(f"Loading hypernetwork {filename}") path = shared.hypernetworks.get(filename, None) - if (path is not None): + if path is not None: + print(f"Loading hypernetwork {filename}") try: shared.loaded_hypernetwork = Hypernetwork(path) except Exception: print(f"Error loading hypernetwork {path}", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) else: + if shared.loaded_hypernetwork is not None: + print(f"Unloading hypernetwork") + shared.loaded_hypernetwork = None diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index a8f53befe..fe9490673 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -10,7 +10,7 @@ import numpy as np import modules.scripts as scripts import gradio as gr -from modules import images +from modules import images, hypernetwork from modules.processing import process_images, Processed, get_correct_sampler from modules.shared import opts, cmd_opts, state import modules.shared as shared @@ -80,8 +80,7 @@ def apply_checkpoint(p, x, xs): def apply_hypernetwork(p, x, xs): - hn = shared.hypernetworks.get(x, None) - opts.data["sd_hypernetwork"] = hn.name if hn is not None else 'None' + hypernetwork.load_hypernetwork(x) def format_value_add_label(p, opt, x): @@ -203,8 +202,6 @@ class Script(scripts.Script): p.batch_size = 1 - initial_hn = opts.sd_hypernetwork - def process_axis(opt, vals): if opt.label == 'Nothing': return [0] @@ -321,6 +318,6 @@ class Script(scripts.Script): # restore checkpoint in case it was changed by axes modules.sd_models.reload_model_weights(shared.sd_model) - opts.data["sd_hypernetwork"] = initial_hn + hypernetwork.load_hypernetwork(opts.sd_hypernetwork) return processed From d6d10a37bfd21568e74efb46137f906da96d5fdb Mon Sep 17 00:00:00 2001 From: William Moorehouse Date: Sun, 9 Oct 2022 04:58:40 -0400 Subject: [PATCH 212/460] Added extended model details to infotext --- modules/processing.py | 3 +++ modules/sd_models.py | 3 ++- modules/shared.py | 1 + 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index 2c9913170..d1bcee4aa 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -284,6 +284,9 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Face restoration": (opts.face_restoration_model if p.restore_faces else None), "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), + "Model": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_model_name else shared.sd_model.sd_model_name), + "Model VAE": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_model_vae_name else shared.sd_model.sd_model_vae_name), + "Model hypernetwork": (None if not opts.add_extended_model_details_to_info or not opts.sd_hypernetwork else opts.sd_hypernetwork), "Batch size": (None if p.batch_size < 2 else p.batch_size), "Batch pos": (None if p.batch_size < 2 else position_in_batch), "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]), diff --git a/modules/sd_models.py b/modules/sd_models.py index d0c74dd84..3fa42329c 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -4,7 +4,7 @@ import sys from collections import namedtuple import torch from omegaconf import OmegaConf - +from pathlib import Path from ldm.util import instantiate_from_config @@ -158,6 +158,7 @@ def load_model_weights(model, checkpoint_info): vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"} model.first_stage_model.load_state_dict(vae_dict) + model.sd_model_vae_name = Path(vae_file).stem model.sd_model_hash = sd_model_hash model.sd_model_checkpoint = checkpoint_file diff --git a/modules/shared.py b/modules/shared.py index dffa0094b..ca63f7d8e 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -242,6 +242,7 @@ options_templates.update(options_section(('ui', "User interface"), { "return_grid": OptionInfo(True, "Show grid in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), + "add_extended_model_details_to_info": OptionInfo(False, "Add extended model details to generation information (model name, VAE, hypernetwork)"), "font": OptionInfo("", "Font for image grids that have text"), "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), From 006791c13d70e582eee766b7d0499e9821a86bf9 Mon Sep 17 00:00:00 2001 From: William Moorehouse Date: Sun, 9 Oct 2022 05:09:18 -0400 Subject: [PATCH 213/460] Fix grabbing the model name for infotext --- modules/processing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/processing.py b/modules/processing.py index d1bcee4aa..c035c9902 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -284,7 +284,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Face restoration": (opts.face_restoration_model if p.restore_faces else None), "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), - "Model": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_model_name else shared.sd_model.sd_model_name), + "Model": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name), "Model VAE": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_model_vae_name else shared.sd_model.sd_model_vae_name), "Model hypernetwork": (None if not opts.add_extended_model_details_to_info or not opts.sd_hypernetwork else opts.sd_hypernetwork), "Batch size": (None if p.batch_size < 2 else p.batch_size), From 594cbfd8fbe4078b43ceccf01509eeef3d6790c6 Mon Sep 17 00:00:00 2001 From: William Moorehouse Date: Sun, 9 Oct 2022 07:27:11 -0400 Subject: [PATCH 214/460] Sanitize infotext output (for now) --- modules/processing.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index c035c9902..049f37698 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -284,9 +284,9 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Face restoration": (opts.face_restoration_model if p.restore_faces else None), "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), - "Model": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name), - "Model VAE": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_model_vae_name else shared.sd_model.sd_model_vae_name), - "Model hypernetwork": (None if not opts.add_extended_model_details_to_info or not opts.sd_hypernetwork else opts.sd_hypernetwork), + "Model": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')), + "Model VAE": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_model_vae_name else shared.sd_model.sd_model_vae_name.replace(',', '').replace(':', '')), + "Model hypernetwork": (None if not opts.add_extended_model_details_to_info or not opts.sd_hypernetwork else opts.sd_hypernetwork.replace(',', '').replace(':', '')), "Batch size": (None if p.batch_size < 2 else p.batch_size), "Batch pos": (None if p.batch_size < 2 else position_in_batch), "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]), From e6e8cabe0c9c335e0d72345602c069b198558b53 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 9 Oct 2022 14:57:48 +0300 Subject: [PATCH 215/460] change up #2056 to make it work how i want it to plus make xy plot write correct values to images --- modules/processing.py | 5 ++--- modules/sd_models.py | 2 -- modules/shared.py | 2 +- 3 files changed, 3 insertions(+), 6 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index 049f37698..04aed989d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -284,9 +284,8 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments, iteration "Face restoration": (opts.face_restoration_model if p.restore_faces else None), "Size": f"{p.width}x{p.height}", "Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash), - "Model": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')), - "Model VAE": (None if not opts.add_extended_model_details_to_info or not shared.sd_model.sd_model_vae_name else shared.sd_model.sd_model_vae_name.replace(',', '').replace(':', '')), - "Model hypernetwork": (None if not opts.add_extended_model_details_to_info or not opts.sd_hypernetwork else opts.sd_hypernetwork.replace(',', '').replace(':', '')), + "Model": (None if not opts.add_model_name_to_info or not shared.sd_model.sd_checkpoint_info.model_name else shared.sd_model.sd_checkpoint_info.model_name.replace(',', '').replace(':', '')), + "Hypernet": (None if shared.loaded_hypernetwork is None else shared.loaded_hypernetwork.name.replace(',', '').replace(':', '')), "Batch size": (None if p.batch_size < 2 else p.batch_size), "Batch pos": (None if p.batch_size < 2 else position_in_batch), "Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]), diff --git a/modules/sd_models.py b/modules/sd_models.py index 3fa42329c..e63d3c292 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -4,7 +4,6 @@ import sys from collections import namedtuple import torch from omegaconf import OmegaConf -from pathlib import Path from ldm.util import instantiate_from_config @@ -158,7 +157,6 @@ def load_model_weights(model, checkpoint_info): vae_dict = {k: v for k, v in vae_ckpt["state_dict"].items() if k[0:4] != "loss"} model.first_stage_model.load_state_dict(vae_dict) - model.sd_model_vae_name = Path(vae_file).stem model.sd_model_hash = sd_model_hash model.sd_model_checkpoint = checkpoint_file diff --git a/modules/shared.py b/modules/shared.py index ca63f7d8e..6ecc2503a 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -242,7 +242,7 @@ options_templates.update(options_section(('ui', "User interface"), { "return_grid": OptionInfo(True, "Show grid in results for web"), "do_not_show_images": OptionInfo(False, "Do not show any images in results for web"), "add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"), - "add_extended_model_details_to_info": OptionInfo(False, "Add extended model details to generation information (model name, VAE, hypernetwork)"), + "add_model_name_to_info": OptionInfo(False, "Add model name to generation information"), "font": OptionInfo("", "Font for image grids that have text"), "js_modal_lightbox": OptionInfo(True, "Enable full page image viewer"), "js_modal_lightbox_initially_zoomed": OptionInfo(True, "Show images zoomed in by default in full page image viewer"), From 2c52f4da7ff80a3ec277105f4db6146c6379898a Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 9 Oct 2022 15:01:42 +0300 Subject: [PATCH 216/460] fix broken samplers in XY plot --- scripts/xy_grid.py | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/xy_grid.py b/scripts/xy_grid.py index fe9490673..c89ca1a9b 100644 --- a/scripts/xy_grid.py +++ b/scripts/xy_grid.py @@ -259,6 +259,7 @@ class Script(scripts.Script): # Confirm options are valid before starting if opt.label == "Sampler": + samplers_dict = build_samplers_dict(p) for sampler_val in valslist: if sampler_val.lower() not in samplers_dict.keys(): raise RuntimeError(f"Unknown sampler: {sampler_val}") From 9d1138e2940c4ddcd2685bcba12c7d407e9e0ec5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 9 Oct 2022 15:08:10 +0300 Subject: [PATCH 217/460] fix typo in filename for ESRGAN arch --- modules/esrgan_model.py | 2 +- modules/{esrgam_model_arch.py => esrgan_model_arch.py} | 0 2 files changed, 1 insertion(+), 1 deletion(-) rename modules/{esrgam_model_arch.py => esrgan_model_arch.py} (100%) diff --git a/modules/esrgan_model.py b/modules/esrgan_model.py index 3970e6e47..46ad0da3c 100644 --- a/modules/esrgan_model.py +++ b/modules/esrgan_model.py @@ -5,7 +5,7 @@ import torch from PIL import Image from basicsr.utils.download_util import load_file_from_url -import modules.esrgam_model_arch as arch +import modules.esrgan_model_arch as arch from modules import shared, modelloader, images, devices from modules.upscaler import Upscaler, UpscalerData from modules.shared import opts diff --git a/modules/esrgam_model_arch.py b/modules/esrgan_model_arch.py similarity index 100% rename from modules/esrgam_model_arch.py rename to modules/esrgan_model_arch.py From f8197976ef5f0523faffb2b237e9166fb2bedecd Mon Sep 17 00:00:00 2001 From: Greendayle Date: Sun, 9 Oct 2022 13:44:13 +0200 Subject: [PATCH 218/460] Shielded launch enviroment creation stuff from multiprocessing --- launch.py | 178 ++++++++++++++++++++++++++---------------------------- 1 file changed, 87 insertions(+), 91 deletions(-) diff --git a/launch.py b/launch.py index b0a59b6a1..d1a4fd6ae 100644 --- a/launch.py +++ b/launch.py @@ -6,40 +6,11 @@ import importlib.util import shlex import platform -dir_repos = "repositories" -dir_tmp = "tmp" - -python = sys.executable -git = os.environ.get('GIT', "git") -torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113") -requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") -commandline_args = os.environ.get('COMMANDLINE_ARGS', "") - -gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") -clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") - -stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc") -taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6") -k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "f4e99857772fc3a126ba886aadf795a332774878") -codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") -blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") - -args = shlex.split(commandline_args) - def extract_arg(args, name): return [x for x in args if x != name], name in args -args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test') -xformers = '--xformers' in args -deepdanbooru = '--deepdanbooru' in args - - -def repo_dir(name): - return os.path.join(dir_repos, name) - - def run(command, desc=None, errdesc=None): if desc is not None: print(desc) @@ -59,23 +30,11 @@ stderr: {result.stderr.decode(encoding="utf8", errors="ignore") if len(result.st return result.stdout.decode(encoding="utf8", errors="ignore") -def run_python(code, desc=None, errdesc=None): - return run(f'"{python}" -c "{code}"', desc, errdesc) - - -def run_pip(args, desc=None): - return run(f'"{python}" -m pip {args} --prefer-binary', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}") - - def check_run(command): result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) return result.returncode == 0 -def check_run_python(code): - return check_run(f'"{python}" -c "{code}"') - - def is_installed(package): try: spec = importlib.util.find_spec(package) @@ -85,80 +44,117 @@ def is_installed(package): return spec is not None -def git_clone(url, dir, name, commithash=None): - # TODO clone into temporary dir and move if successful +def prepare_enviroment(): + dir_repos = "repositories" - if os.path.exists(dir): - if commithash is None: + python = sys.executable + git = os.environ.get('GIT', "git") + torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113") + requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") + commandline_args = os.environ.get('COMMANDLINE_ARGS', "") + + gfpgan_package = os.environ.get('GFPGAN_PACKAGE', "git+https://github.com/TencentARC/GFPGAN.git@8d2447a2d918f8eba5a4a01463fd48e45126a379") + clip_package = os.environ.get('CLIP_PACKAGE', "git+https://github.com/openai/CLIP.git@d50d76daa670286dd6cacf3bcd80b5e4823fc8e1") + + stable_diffusion_commit_hash = os.environ.get('STABLE_DIFFUSION_COMMIT_HASH', "69ae4b35e0a0f6ee1af8bb9a5d0016ccb27e36dc") + taming_transformers_commit_hash = os.environ.get('TAMING_TRANSFORMERS_COMMIT_HASH', "24268930bf1dce879235a7fddd0b2355b84d7ea6") + k_diffusion_commit_hash = os.environ.get('K_DIFFUSION_COMMIT_HASH', "f4e99857772fc3a126ba886aadf795a332774878") + codeformer_commit_hash = os.environ.get('CODEFORMER_COMMIT_HASH', "c5b4593074ba6214284d6acd5f1719b6c5d739af") + blip_commit_hash = os.environ.get('BLIP_COMMIT_HASH', "48211a1594f1321b00f14c9f7a5b4813144b2fb9") + + args = shlex.split(commandline_args) + + args, skip_torch_cuda_test = extract_arg(args, '--skip-torch-cuda-test') + xformers = '--xformers' in args + deepdanbooru = '--deepdanbooru' in args + + def repo_dir(name): + return os.path.join(dir_repos, name) + + def run_python(code, desc=None, errdesc=None): + return run(f'"{python}" -c "{code}"', desc, errdesc) + + def run_pip(args, desc=None): + return run(f'"{python}" -m pip {args} --prefer-binary', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}") + + def check_run_python(code): + return check_run(f'"{python}" -c "{code}"') + + def git_clone(url, dir, name, commithash=None): + # TODO clone into temporary dir and move if successful + + if os.path.exists(dir): + if commithash is None: + return + + current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip() + if current_hash == commithash: + return + + run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}") + run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commint for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}") return - current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip() - if current_hash == commithash: - return + run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}") - run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}") - run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commint for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}") - return + if commithash is not None: + run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}") - run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}") + try: + commit = run(f"{git} rev-parse HEAD").strip() + except Exception: + commit = "" - if commithash is not None: - run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}") + print(f"Python {sys.version}") + print(f"Commit hash: {commit}") + if not is_installed("torch") or not is_installed("torchvision"): + run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch") -try: - commit = run(f"{git} rev-parse HEAD").strip() -except Exception: - commit = "" + if not skip_torch_cuda_test: + run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'") -print(f"Python {sys.version}") -print(f"Commit hash: {commit}") + if not is_installed("gfpgan"): + run_pip(f"install {gfpgan_package}", "gfpgan") + if not is_installed("clip"): + run_pip(f"install {clip_package}", "clip") -if not is_installed("torch") or not is_installed("torchvision"): - run(f'"{python}" -m {torch_command}', "Installing torch and torchvision", "Couldn't install torch") + if not is_installed("xformers") and xformers and platform.python_version().startswith("3.10"): + if platform.system() == "Windows": + run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers") + elif platform.system() == "Linux": + run_pip("install xformers", "xformers") -if not skip_torch_cuda_test: - run_python("import torch; assert torch.cuda.is_available(), 'Torch is not able to use GPU; add --skip-torch-cuda-test to COMMANDLINE_ARGS variable to disable this check'") + if not is_installed("deepdanbooru") and deepdanbooru: + run_pip("install git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru") -if not is_installed("gfpgan"): - run_pip(f"install {gfpgan_package}", "gfpgan") + os.makedirs(dir_repos, exist_ok=True) -if not is_installed("clip"): - run_pip(f"install {clip_package}", "clip") + git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash) + git_clone("https://github.com/CompVis/taming-transformers.git", repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash) + git_clone("https://github.com/crowsonkb/k-diffusion.git", repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash) + git_clone("https://github.com/sczhou/CodeFormer.git", repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash) + git_clone("https://github.com/salesforce/BLIP.git", repo_dir('BLIP'), "BLIP", blip_commit_hash) -if not is_installed("xformers") and xformers and platform.python_version().startswith("3.10"): - if platform.system() == "Windows": - run_pip("install https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/a/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl", "xformers") - elif platform.system() == "Linux": - run_pip("install xformers", "xformers") + if not is_installed("lpips"): + run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer") -if not is_installed("deepdanbooru") and deepdanbooru: - run_pip("install git+https://github.com/KichangKim/DeepDanbooru.git@edf73df4cdaeea2cf00e9ac08bd8a9026b7a7b26#egg=deepdanbooru[tensorflow] tensorflow==2.10.0 tensorflow-io==0.27.0", "deepdanbooru") + run_pip(f"install -r {requirements_file}", "requirements for Web UI") -os.makedirs(dir_repos, exist_ok=True) + sys.argv += args -git_clone("https://github.com/CompVis/stable-diffusion.git", repo_dir('stable-diffusion'), "Stable Diffusion", stable_diffusion_commit_hash) -git_clone("https://github.com/CompVis/taming-transformers.git", repo_dir('taming-transformers'), "Taming Transformers", taming_transformers_commit_hash) -git_clone("https://github.com/crowsonkb/k-diffusion.git", repo_dir('k-diffusion'), "K-diffusion", k_diffusion_commit_hash) -git_clone("https://github.com/sczhou/CodeFormer.git", repo_dir('CodeFormer'), "CodeFormer", codeformer_commit_hash) -git_clone("https://github.com/salesforce/BLIP.git", repo_dir('BLIP'), "BLIP", blip_commit_hash) + if "--exit" in args: + print("Exiting because of --exit argument") + exit(0) -if not is_installed("lpips"): - run_pip(f"install -r {os.path.join(repo_dir('CodeFormer'), 'requirements.txt')}", "requirements for CodeFormer") - -run_pip(f"install -r {requirements_file}", "requirements for Web UI") - -sys.argv += args - -if "--exit" in args: - print("Exiting because of --exit argument") - exit(0) def start_webui(): print(f"Launching Web UI with arguments: {' '.join(sys.argv[1:])}") import webui webui.webui() + if __name__ == "__main__": + prepare_enviroment() start_webui() From bba2ac8324ccd1a67c78e5f59babae8323ec7dc6 Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 9 Oct 2022 15:22:51 +0300 Subject: [PATCH 219/460] reshuffle the code a bit in launcher to keep functions in one place for #2069 --- launch.py | 77 +++++++++++++++++++++++++++++-------------------------- 1 file changed, 41 insertions(+), 36 deletions(-) diff --git a/launch.py b/launch.py index d1a4fd6ae..f42f557de 100644 --- a/launch.py +++ b/launch.py @@ -6,6 +6,10 @@ import importlib.util import shlex import platform +dir_repos = "repositories" +python = sys.executable +git = os.environ.get('GIT', "git") + def extract_arg(args, name): return [x for x in args if x != name], name in args @@ -44,11 +48,44 @@ def is_installed(package): return spec is not None -def prepare_enviroment(): - dir_repos = "repositories" +def repo_dir(name): + return os.path.join(dir_repos, name) - python = sys.executable - git = os.environ.get('GIT', "git") + +def run_python(code, desc=None, errdesc=None): + return run(f'"{python}" -c "{code}"', desc, errdesc) + + +def run_pip(args, desc=None): + return run(f'"{python}" -m pip {args} --prefer-binary', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}") + + +def check_run_python(code): + return check_run(f'"{python}" -c "{code}"') + + +def git_clone(url, dir, name, commithash=None): + # TODO clone into temporary dir and move if successful + + if os.path.exists(dir): + if commithash is None: + return + + current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip() + if current_hash == commithash: + return + + run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}") + run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commint for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}") + return + + run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}") + + if commithash is not None: + run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}") + + +def prepare_enviroment(): torch_command = os.environ.get('TORCH_COMMAND', "pip install torch==1.12.1+cu113 torchvision==0.13.1+cu113 --extra-index-url https://download.pytorch.org/whl/cu113") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") commandline_args = os.environ.get('COMMANDLINE_ARGS', "") @@ -68,38 +105,6 @@ def prepare_enviroment(): xformers = '--xformers' in args deepdanbooru = '--deepdanbooru' in args - def repo_dir(name): - return os.path.join(dir_repos, name) - - def run_python(code, desc=None, errdesc=None): - return run(f'"{python}" -c "{code}"', desc, errdesc) - - def run_pip(args, desc=None): - return run(f'"{python}" -m pip {args} --prefer-binary', desc=f"Installing {desc}", errdesc=f"Couldn't install {desc}") - - def check_run_python(code): - return check_run(f'"{python}" -c "{code}"') - - def git_clone(url, dir, name, commithash=None): - # TODO clone into temporary dir and move if successful - - if os.path.exists(dir): - if commithash is None: - return - - current_hash = run(f'"{git}" -C {dir} rev-parse HEAD', None, f"Couldn't determine {name}'s hash: {commithash}").strip() - if current_hash == commithash: - return - - run(f'"{git}" -C {dir} fetch', f"Fetching updates for {name}...", f"Couldn't fetch {name}") - run(f'"{git}" -C {dir} checkout {commithash}', f"Checking out commint for {name} with hash: {commithash}...", f"Couldn't checkout commit {commithash} for {name}") - return - - run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}") - - if commithash is not None: - run(f'"{git}" -C {dir} checkout {commithash}', None, "Couldn't checkout {name}'s hash: {commithash}") - try: commit = run(f"{git} rev-parse HEAD").strip() except Exception: From 875ddfeecfaffad9eee24813301637cba310337d Mon Sep 17 00:00:00 2001 From: AUTOMATIC <16777216c@gmail.com> Date: Sun, 9 Oct 2022 17:58:43 +0300 Subject: [PATCH 220/460] added guard for torch.load to prevent loading pickles with unknown content --- modules/paths.py | 1 + modules/safe.py | 89 +++++++++++++++++++++++++++++++++++++++++++++++ modules/shared.py | 1 + 3 files changed, 91 insertions(+) create mode 100644 modules/safe.py diff --git a/modules/paths.py b/modules/paths.py index 0519caa0a..1e7a2fbcf 100644 --- a/modules/paths.py +++ b/modules/paths.py @@ -1,6 +1,7 @@ import argparse import os import sys +import modules.safe script_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__))) models_path = os.path.join(script_path, "models") diff --git a/modules/safe.py b/modules/safe.py new file mode 100644 index 000000000..2d2c13716 --- /dev/null +++ b/modules/safe.py @@ -0,0 +1,89 @@ +# this code is adapted from the script contributed by anon from /h/ + +import io +import pickle +import collections +import sys +import traceback + +import torch +import numpy +import _codecs +import zipfile + + +def encode(*args): + out = _codecs.encode(*args) + return out + + +class RestrictedUnpickler(pickle.Unpickler): + def persistent_load(self, saved_id): + assert saved_id[0] == 'storage' + return torch.storage._TypedStorage() + + def find_class(self, module, name): + if module == 'collections' and name == 'OrderedDict': + return getattr(collections, name) + if module == 'torch._utils' and name in ['_rebuild_tensor_v2', '_rebuild_parameter']: + return getattr(torch._utils, name) + if module == 'torch' and name in ['FloatStorage', 'HalfStorage', 'IntStorage', 'LongStorage']: + return getattr(torch, name) + if module == 'torch.nn.modules.container' and name in ['ParameterDict']: + return getattr(torch.nn.modules.container, name) + if module == 'numpy.core.multiarray' and name == 'scalar': + return numpy.core.multiarray.scalar + if module == 'numpy' and name == 'dtype': + return numpy.dtype + if module == '_codecs' and name == 'encode': + return encode + if module == "pytorch_lightning.callbacks" and name == 'model_checkpoint': + import pytorch_lightning.callbacks + return pytorch_lightning.callbacks.model_checkpoint + if module == "pytorch_lightning.callbacks.model_checkpoint" and name == 'ModelCheckpoint': + import pytorch_lightning.callbacks.model_checkpoint + return pytorch_lightning.callbacks.model_checkpoint.ModelCheckpoint + if module == "__builtin__" and name == 'set': + return set + + # Forbid everything else. + raise pickle.UnpicklingError(f"global '{module}/{name}' is forbidden") + + +def check_pt(filename): + try: + + # new pytorch format is a zip file + with zipfile.ZipFile(filename) as z: + with z.open('archive/data.pkl') as file: + unpickler = RestrictedUnpickler(file) + unpickler.load() + + except zipfile.BadZipfile: + + # if it's not a zip file, it's an olf pytorch format, with five objects written to pickle + with open(filename, "rb") as file: + unpickler = RestrictedUnpickler(file) + for i in range(5): + unpickler.load() + + +def load(filename, *args, **kwargs): + from modules import shared + + try: + if not shared.cmd_opts.disable_safe_unpickle: + check_pt(filename) + + except Exception: + print(f"Error verifying pickled file from {filename}:", file=sys.stderr) + print(traceback.format_exc(), file=sys.stderr) + print(f"\nThe file may be malicious, so the program is not going to read it.", file=sys.stderr) + print(f"You can skip this check with --disable-safe-unpickle commandline argument.", file=sys.stderr) + return None + + return unsafe_torch_load(filename, *args, **kwargs) + + +unsafe_torch_load = torch.load +torch.load = load diff --git a/modules/shared.py b/modules/shared.py index 6ecc2503a..3d7f08e14 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -65,6 +65,7 @@ parser.add_argument("--autolaunch", action='store_true', help="open the webui UR parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False) parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False) parser.add_argument("--enable-console-prompts", action='store_true', help="print prompts to console when generating with txt2img and img2img", default=False) +parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False) cmd_opts = parser.parse_args() From d3cd46b0388918128af203fda37fa63461c46611 Mon Sep 17 00:00:00 2001 From: DepFA <35278260+dfaker@users.noreply.github.com> Date: Sun, 9 Oct 2022 16:19:33 +0100 Subject: [PATCH 221/460] Update lightbox to change displayed image as soon as generation is complete (#1933) * add updateOnBackgroundChange * typo fixes. * reindent to 4 spaces --- javascript/imageviewer.js | 168 ++++++++++++++++++++++---------------- 1 file changed, 96 insertions(+), 72 deletions(-) diff --git a/javascript/imageviewer.js b/javascript/imageviewer.js index 6a00c0da4..65a33dd78 100644 --- a/javascript/imageviewer.js +++ b/javascript/imageviewer.js @@ -1,72 +1,97 @@ // A full size 'lightbox' preview modal shown when left clicking on gallery previews - function closeModal() { - gradioApp().getElementById("lightboxModal").style.display = "none"; + gradioApp().getElementById("lightboxModal").style.display = "none"; } function showModal(event) { - const source = event.target || event.srcElement; - const modalImage = gradioApp().getElementById("modalImage") - const lb = gradioApp().getElementById("lightboxModal") - modalImage.src = source.src - if (modalImage.style.display === 'none') { - lb.style.setProperty('background-image', 'url(' + source.src + ')'); - } - lb.style.display = "block"; - lb.focus() - event.stopPropagation() + const source = event.target || event.srcElement; + const modalImage = gradioApp().getElementById("modalImage") + const lb = gradioApp().getElementById("lightboxModal") + modalImage.src = source.src + if (modalImage.style.display === 'none') { + lb.style.setProperty('background-image', 'url(' + source.src + ')'); + } + lb.style.display = "block"; + lb.focus() + event.stopPropagation() } function negmod(n, m) { - return ((n % m) + m) % m; + return ((n % m) + m) % m; } -function modalImageSwitch(offset){ - var allgalleryButtons = gradioApp().querySelectorAll(".gallery-item.transition-all") - var galleryButtons = [] - allgalleryButtons.forEach(function(elem){ - if(elem.parentElement.offsetParent){ - galleryButtons.push(elem); +function updateOnBackgroundChange() { + const modalImage = gradioApp().getElementById("modalImage") + if (modalImage && modalImage.offsetParent) { + let allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2") + let currentButton = null + allcurrentButtons.forEach(function(elem) { + if (elem.parentElement.offsetParent) { + currentButton = elem; + } + }) + + if (modalImage.src != currentButton.children[0].src) { + modalImage.src = currentButton.children[0].src; + if (modalImage.style.display === 'none') { + modal.style.setProperty('background-image', `url(${modalImage.src})`) + } + } } - }) +} - if(galleryButtons.length>1){ - var allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2") - var currentButton = null - allcurrentButtons.forEach(function(elem){ - if(elem.parentElement.offsetParent){ - currentButton = elem; +function modalImageSwitch(offset) { + var allgalleryButtons = gradioApp().querySelectorAll(".gallery-item.transition-all") + var galleryButtons = [] + allgalleryButtons.forEach(function(elem) { + if (elem.parentElement.offsetParent) { + galleryButtons.push(elem); } - }) + }) - var result = -1 - galleryButtons.forEach(function(v, i){ if(v==currentButton) { result = i } }) + if (galleryButtons.length > 1) { + var allcurrentButtons = gradioApp().querySelectorAll(".gallery-item.transition-all.\\!ring-2") + var currentButton = null + allcurrentButtons.forEach(function(elem) { + if (elem.parentElement.offsetParent) { + currentButton = elem; + } + }) - if(result != -1){ - nextButton = galleryButtons[negmod((result+offset),galleryButtons.length)] - nextButton.click() - const modalImage = gradioApp().getElementById("modalImage"); - const modal = gradioApp().getElementById("lightboxModal"); - modalImage.src = nextButton.children[0].src; - if (modalImage.style.display === 'none') { - modal.style.setProperty('background-image', `url(${modalImage.src})`) + var result = -1 + galleryButtons.forEach(function(v, i) { + if (v == currentButton) { + result = i + } + }) + + if (result != -1) { + nextButton = galleryButtons[negmod((result + offset), galleryButtons.length)] + nextButton.click() + const modalImage = gradioApp().getElementById("modalImage"); + const modal = gradioApp().getElementById("lightboxModal"); + modalImage.src = nextButton.children[0].src; + if (modalImage.style.display === 'none') { + modal.style.setProperty('background-image', `url(${modalImage.src})`) + } + setTimeout(function() { + modal.focus() + }, 10) } - setTimeout( function(){modal.focus()},10) - } - } + } } -function modalNextImage(event){ - modalImageSwitch(1) - event.stopPropagation() +function modalNextImage(event) { + modalImageSwitch(1) + event.stopPropagation() } -function modalPrevImage(event){ - modalImageSwitch(-1) - event.stopPropagation() +function modalPrevImage(event) { + modalImageSwitch(-1) + event.stopPropagation() } -function modalKeyHandler(event){ +function modalKeyHandler(event) { switch (event.key) { case "ArrowLeft": modalPrevImage(event) @@ -80,24 +105,22 @@ function modalKeyHandler(event){ } } -function showGalleryImage(){ +function showGalleryImage() { setTimeout(function() { fullImg_preview = gradioApp().querySelectorAll('img.w-full.object-contain') - - if(fullImg_preview != null){ + + if (fullImg_preview != null) { fullImg_preview.forEach(function function_name(e) { if (e.dataset.modded) return; e.dataset.modded = true; if(e && e.parentElement.tagName == 'DIV'){ - e.style.cursor='pointer' - e.addEventListener('click', function (evt) { if(!opts.js_modal_lightbox) return; modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed) showModal(evt) - },true); + }, true); } }); } @@ -105,21 +128,21 @@ function showGalleryImage(){ }, 100); } -function modalZoomSet(modalImage, enable){ - if( enable ){ +function modalZoomSet(modalImage, enable) { + if (enable) { modalImage.classList.add('modalImageFullscreen'); - } else{ + } else { modalImage.classList.remove('modalImageFullscreen'); } } -function modalZoomToggle(event){ +function modalZoomToggle(event) { modalImage = gradioApp().getElementById("modalImage"); modalZoomSet(modalImage, !modalImage.classList.contains('modalImageFullscreen')) event.stopPropagation() } -function modalTileImageToggle(event){ +function modalTileImageToggle(event) { const modalImage = gradioApp().getElementById("modalImage"); const modal = gradioApp().getElementById("lightboxModal"); const isTiling = modalImage.style.display === 'none'; @@ -134,17 +157,18 @@ function modalTileImageToggle(event){ event.stopPropagation() } -function galleryImageHandler(e){ - if(e && e.parentElement.tagName == 'BUTTON'){ +function galleryImageHandler(e) { + if (e && e.parentElement.tagName == 'BUTTON') { e.onclick = showGalleryImage; } } -onUiUpdate(function(){ +onUiUpdate(function() { fullImg_preview = gradioApp().querySelectorAll('img.w-full') - if(fullImg_preview != null){ - fullImg_preview.forEach(galleryImageHandler); + if (fullImg_preview != null) { + fullImg_preview.forEach(galleryImageHandler); } + updateOnBackgroundChange(); }) document.addEventListener("DOMContentLoaded", function() { @@ -152,13 +176,13 @@ document.addEventListener("DOMContentLoaded", function() { const modal = document.createElement('div') modal.onclick = closeModal; modal.id = "lightboxModal"; - modal.tabIndex=0 + modal.tabIndex = 0 modal.addEventListener('keydown', modalKeyHandler, true) const modalControls = document.createElement('div') modalControls.className = 'modalControls gradio-container'; modal.append(modalControls); - + const modalZoom = document.createElement('span') modalZoom.className = 'modalZoom cursor'; modalZoom.innerHTML = '⤡' @@ -183,30 +207,30 @@ document.addEventListener("DOMContentLoaded", function() { const modalImage = document.createElement('img') modalImage.id = 'modalImage'; modalImage.onclick = closeModal; - modalImage.tabIndex=0 + modalImage.tabIndex = 0 modalImage.addEventListener('keydown', modalKeyHandler, true) modal.appendChild(modalImage) const modalPrev = document.createElement('a') modalPrev.className = 'modalPrev'; modalPrev.innerHTML = '❮' - modalPrev.tabIndex=0 - modalPrev.addEventListener('click',modalPrevImage,true); + modalPrev.tabIndex = 0 + modalPrev.addEventListener('click', modalPrevImage, true); modalPrev.addEventListener('keydown', modalKeyHandler, true) modal.appendChild(modalPrev) const modalNext = document.createElement('a') modalNext.className = 'modalNext'; modalNext.innerHTML = '❯' - modalNext.tabIndex=0 - modalNext.addEventListener('click',modalNextImage,true); + modalNext.tabIndex = 0 + modalNext.addEventListener('click', modalNextImage, true); modalNext.addEventListener('keydown', modalKeyHandler, true) modal.appendChild(modalNext) gradioApp().getRootNode().appendChild(modal) - + document.body.appendChild(modalFragment); - + }); From 9ecea0a8d6bdc434755e11128487fd62f1ff130f Mon Sep 17 00:00:00 2001 From: Artem Zagidulin Date: Sun, 9 Oct 2022 16:14:56 +0300 Subject: [PATCH 222/460] fix missing png info when Extras Batch Process --- modules/extras.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/extras.py b/modules/extras.py index 39dd38060..41e8612c7 100644 --- a/modules/extras.py +++ b/modules/extras.py @@ -29,7 +29,7 @@ def run_extras(extras_mode, image, image_folder, gfpgan_visibility, codeformer_v if extras_mode == 1: #convert file to pillow image for img in image_folder: - image = Image.fromarray(np.array(Image.open(img))) + image = Image.open(img) imageArr.append(image) imageNameArr.append(os.path.splitext(img.orig_name)[0]) else: From a2d70f25bf51264d8d68f4f36937b390f79334a7 Mon Sep 17 00:00:00 2001 From: supersteve3d <39339941+supersteve3d@users.noreply.github.com> Date: Sun, 9 Oct 2022 23:40:18 +0800 Subject: [PATCH 223/460] Add files via upload Updated txt2img screenshot (UI as of Oct 9th) for github webui / README.md --- txt2img_Screenshot.png | Bin 539132 -> 337094 bytes 1 file changed, 0 insertions(+), 0 deletions(-) diff --git a/txt2img_Screenshot.png b/txt2img_Screenshot.png index fedd538e3cc7ea14ff5bed224dfbcd9765ec35f4..6e2759a4c8aa2d05e1334e871b2a451f1104ba60 100644 GIT binary patch literal 337094 zcmbrlXIPV2_dV>0f{G#*K$NN=3IU``FM@#dA{tsoS_BlOOCJPOM2fT!TBLUnLg+;S zsVPz;gfdbRIzoUDLdbjLJo7xi5AV1Cb%l^}ICnW`uf5jVn`l#G-E(KIo;h~x*g1VY zZS!NtPK+Eoc6|F3Gw?~5tXmZD>kn^pU9Dr4Se_-|%?YTck>;^u)$yzcPfh~wPruXy zdmlT-)_L^vN6(95r(?$$1^U{Wmaptr=>Z9sp-sf-<%!8Q|7ZKtb>57$?BU@tRnm{= z*D@SiCq0l$m6b~p$V#PAJMpaL${Sgt4_(E=Bdtzo{q^|Ub@}RO9fwP0kIk=-Yl4nf z-xe--uOlEFqoEfq6!{|&EtG^7`eSo?BcOf5n=#pWL)uI8bRQWRxa>>$_3PbB$|>p4 zKzdBGwU+kbZg=c8q>-PXOE$aKDZ$9!PYDtc6#JVohKGN0(&LR|InJ*;txxvk->-l0 z?+A9SUfzz_YYORx_EJeFHKV26#PlALo`b6 z|Mv`rd>cLIx+PAb>^$0x{x;Y3fnH$UJt@f$`sZc>6D_4rHh9hnMOrt9PF)c|JLzY~WBUgrkmuiD)qXDkZ{9?}fB36@yGtTR0WweWzC<*JTsiybBieAW+ z3|n8UqDBS>2?n>Kwkp7W5}u~jtCm}L6&%Oa#!(fv{pzaXzT7iw877EZUJ!d&5&=Ju zh*EYYSLW3dsUyMm#_K&RQ#WYw2nE~^^PNFXc>6+HhC4rRqqu=?FsE!^V;=WL^c&=s zFgA%T*tF=?p;F3$RxI5Q<8Tr~MiO^xq{f2^p#3~^=fEBDslVL`TU zj59&^ni-a%ZXQSoSH}Dzme(9yC6jfZwyPfz?B2F09Hpn+5NfsCjx>9q5PHj|{y8ks z*(U!WdH6J!aqrhWuC?v8{v3+);rlfr>gUdQGH;2mg3Dh!%9epX$6uXW>94r_1F0)t zZY^ejc6S@O7T-rKa0#V7Qi6W1p)53HBw4UHjn?ecxb;ArV(H3t5Bm0T`Q1~i;R5~i zA;h;wHFR-r`;ZNn7jnM>XC@ZyHw15Y(tbuD1BWZbN8U3)e|?ggq08KrTp%8#wXY*1 zuM|^Xm7yVlto%r}0;@P!Bkt|aHRm?PM+(ij_YoDox-%O|Ll0^Bt$Y$T#lF9wVb`7> z3L$*@jN~VsDaf+lK1*p@O%q&h#~J26~=%1C=hy$3M`_mEih*A zFiE9;w|wR-mz_f9*vm$8azi5{LUI{z`|TnW;J)YsIq%NCJTKWq7hUJ>uuxMgM74Jy zzj6s-ct?^aWh;wi{%qknIgx(np06^Sfe(^06&pH1RUaeOosBjYa!6%T2H&*%<}i~2#Mm-`GRM=)U*ZY=H0AURC7Mf)Rz1+yn!xl zZJNuw8_Co~r7zyyZ6ECYvoh6Ym738t=~d`M)x>q522o#+03)bHwA-6%tm>?HVo87FzBZJxllH%2+?_i^8QJH*ZcdAqjXh7He>_>5LSI2w z8)e>jQ;v62yRj`dl9@V3l&YwWond*nuXriUo})e1`1nQ^kD$x$T0QYG^TrAq7k}TI zr+xB0H2cPXNNE=r3G-mY2}xiAyqyG$E2|#9ZoOea@Hm_)QYxhBe2RvyU$xM@^OsYaefdw`iB9oF9zC;^I zZ+{HC;XgHHanmFdl#$8$z9cy@CFQbXHm2GpenCgW1x1a5Uk)V6JdzC4_?JTT<{$3gLVHDMafWgl$%`Y5$u$ z-54ChhV+;3Kj7ehFkV+t<41m5C*!(udT_%P-=1h!aLxJoxS-ej6WA?*9E_7#rW9ph@r2@pe1TidT@` zuzmP5yC>P5O%zuDs42s(Q*w%~ZF`kJZWL__7cR?yAMWR5mLC|(SK0IX*1Gz?_sd*O z#;km9-|gK*x-i;fU6JjZO*5g8HcPGuiFdK)H9InjHAS{o72>d@N1t0R^#)*y^2$soqZN%D8&_a}rS@UbD3vHQ2}VkoIWGwD04)r}G) zG<~)1GXy_S>P1k69e_R$yU$4Vd?~LFs8Ebo;2WhJF10FSb}^fe<15RygQm}*w&Kz5 z2R~YMH-qKZ2M1Vh{VQ)Ki_19}r8yGRFn!(m-)UQ`GT&VZt zv?m4PhLxF|X{q1Ley{YPj4h4y)_XGBzyJ*$KBUu}$fdD0D-9uY3p^1U=hGZfcKy2< zj;L>$kAhaieDqbSj&IDh;3_OW4+W31K%llw*{vpC(=rg!A%VROK@L0Q-l7u-FSY8@ z7i=g$DjOdX=?UcR-x<93xseED3x4^}PhRV)UNbf+=4#7)+~n#}JLq(iq-P)WxL(6d z%Dd;L0~~JcX}%*^y#h1?*Pg(Acro5W*lVUe8^m&_^~Hw+>YaA_&GF6L@os$u0<)5J zvG_PgNGD91%(7p3X}vvth0B+$bADJC!_|nfuaVXNrtES}5jG^Bwy@$%7Ho!ix7`8B zL2iaPPKXek50w#(lDF_X0p$}hlJg{I-146UVhky9(Np=4;BdP-`_wdgt`u%ObWj$O z?)O3#!e#;|wMo(Bm_nvrsUDWal?>MNt@fa59-0V}7f#VcLhPKNf8ke^ZZBiA6C-Re zu!B*SNmyW}dII!k_3psg+c~C`QL~_>r-2{jn06m(6`v%%<)b`R{;BA${C6EcS@{C4 z6QP`?5vQS_OS&h-hY5Ow<8E>$qgN>R-vS5luxO?$RDm z?7nMW^1Iwb`2{XjYJ7McN zp}+?<4jDLrNgx@!9x&k-&y!Kgara?V-!rc;UmHA~X`TEWr?AnB15TgMLbi_o@iI`d zE*x7vuXb!xN+F+eiI}$*Ndgrn!62h+Q|XK=Q_sw+6pZ$R-$7*T-ztuI@DtXhXjiL^ zqpwAXYuBTw_f$!_>l_K*aVH@}PtYZ;t1!oXZ;4h2xrHJ#uknF_z604-@c39$WnwVy zcBcM^j45sx@l5WtZa2*xd8N z?n!OhZnej=s@7vq(5^WOVJTC81yJ6Wy!Y_jtUNxzyM{+q}8 zoc2)7@q1ZwF#EA*;cL)T0piWG9IM;H@dsLOJXmesOxzklBRZ7r4@wSdgVdKOUsQ)) z+_RC~t@U|tnxsUI8V}aAZnlswmxnx~+-(0vF!;XPTmLmoLUJ!kaOWHQ%6;72imLhX z>RQjTSAxlK-vw;A+Emk4fmm#!Mea@ItUzSK3|L3_yhm z3!nTh_#Th4_nytvfMGbnD2hf)`MwMoHR5Zn6G@5I=OJrBZe2QBIfAZ&VUa!_jlY(p z#y^YyfUmY-v;Xo+Pt7k=wA`^dEpf3f)uE-%v8GkFO%$vU6a;egwx)sQyoUx(a?(5%u(&-}q;byyW%0{=UMoXL*;t z92q_{3ZXaUe!6?k zH9PY&ePDVw@EET<<+kZK+c+P_&lT*E#nyCVirm+1;x!|)6q1}Kxkr7pH)H=nrpT^q zg&LFdo733E8gsfeoF%t?>O`Ywo)>UQ&|9FD95zOT{XV-vVfZI=D}RPqz^{thJ-0kl zWu!G9Ps=@+--)d2BQI}{5%bK7G~e~t{<`IHfeM@aRhz8UU~`>6Xy+^vfQz;$GaP>sgv@`0^8^ z6Ho7u#n4>Dr*N8%iZ`RUOnmiE)>H8TKP8iYD#oyNqiU8NDfh16*IZStM@KKI_wGnyzY>$^tWAHU@BXZ_uEjc)J&w!o_N$4VZ`1E;11UHL?R=k=T zM+i4+^(U%yM-^}=4cGNo+S$zcZrgS^FZs3WR((tGKI`T-M`79xr*Zt)lVSxzPQ9KV z4v4KX#gaVy@Bu2$wU87r9g;}P)@KkBen9_;Jsaq5wR+Vun*6I%=6C2NNZb=)mHWt4 zMb|pqf*fdt4z_AfL%bkDgR2DyO&-DUM0R)I*9^xo5|_NhRJ zxwnF>T|^6~c3DH8+!7cmRht%#LGK2?=Ja@+L4Gak*U3XHG)v*8 zrF&$04l~RxY5)zR-5~l>t4S~IlRcDEhgC1|kzYe@S&*LjC~{-ZBPHh~3XMBjrr(QS zn_7{>tyGBDtQ3c!Tn7_gVGe!MC_@t{V%yifU{XIl=vb9^k!4!FwU><_y5t-}_D9Ih zt`+iJ3uJPRKgK5PYJ5rtA%es?#kr0jQ8VU+9+FI>Ve|#p7FeSU<+dFbYJ{TiXfxerYhcV1xnC0HWrf`Z&Znw&tJ~*%R zHl#j;2gNonQjy>2P*lGEce!nk34N|66{+itkXBPsJ|1#+ zxfJ8XSk@m=CGQuA=mniua&BzqNJI7|F>M?^y^e z;vREW`44IQh>)L6;yie?cm9`#w*-64%f8(Yk{{j;E50NYRO|n;R(f8|#2Ej{jFVYw7 zbq8Ns%D1#y#O?VNuK0R64Rm8^9J*Q_zC%;u^->rX|C^ydPQY*;5=MUd7raC^^XLSF z-^|!U@z%%})Pr%^WzR;EfBe8){G>>HytTM)=Zw-*;j9{ybd!Ov;x+|NqgG&4U`+Xh zmxl5Exo#gJe8Ku6AGjIty($T!1!bG8E~E&Lb6R?@moJ&utsy^n_ITW@#P`9R_X@Ph z+{m$D=5k5c4QR7o&rQMSKXqyKuz!ptxBnLp@4xQg|AQ6yMeWIzH~)KhU~0L}$o9L zzy7fK2?dD|Nd@Dz3si@)-s;%1ojxgP6xnIQt9UNf{|y^DuFysh*;?@XSf$QRB|2Vs zhZ0a|qg!0h9WCb?)w*pm_M(5GTY~3r0(k2n;EvK@JW6A-Hj7+y(G`pO)7Jvr%#BbaH$Kt z>~(r?)2Ex>bG;MV+AHawDx=*F%j3M}ewAvU*h#1oi*UO*hAvOI#-ZnWxvzWx^?oc^ zs(wy!_HOpX%JfcD&i`)nnNQN^uAhR`w#$UyiO?H%TL33-x7I?*gkWuSZ`I#WZSFIfEYyBZ(N zDeZI@1JjsOk>gy(nNd4GZpu7Qv>2WRK?JbDQ5uBrT4O9I)3NbDp9&6Fc4!*bMzGIT z_3C@%2Oj&1L{Q~7C+9;R>kIiKR=sx&>mS2<g^pC{vxKF7^cFRUqfhui`+np&WyOXtC!jm@6GfLQCl)hi4a2bcgNo}waq~)4#w&uHnvNff90zJXvJTn2z zuBq*FpseIOthu~U<78oF+$@{yn)E>VqC8p7c+xYqGU17RCPz{O?W`6Ef-*;2`oKzf z#B5QWhRxs0FB{ zbX3|}omT2nZ483KVF#UvsuQm~$z+yVshNvK%KLrFA%IHlbDe#&sZE2HBA8(DG=Ixv z6wfIch|RkPwJ6PV#{*J}-Mz})tO8Yl40OL_1+IJI)TIt%KtDkJjcAPuS$8^ZY^fYUEqhxe&<#aRRVq((CJYL7o+ z!S9piJL**-Z#1$YcNkQ}bZ#K`jNltr$(}PTSuUO|*42)P1zc=i9qlZ~Ymh1tZpEMC zsFvW^*1b8GRiKO=0=_=4giYkIb1?&sBieK4yr4@Vt8b=oA`b;L1>990U@DPS6>>O_ z!6WJjKJin!P$}30D?&fp%k(co+AYumpu07g^Jk45(f-@w?L3ljdZHpMc=dqR$zUiW+SZ)91&EFM4iz-WH84? z%xPpQ(-We5wzeI#Z4Qms8qX|ehn`f%3G~^D*Uf=SZ<@UYw`pWjt+fTP-}Q-eB=6<) z2F?d%$~Y^R5dk@ig&Gx7f{MBc&I^#U<^ThIS$Mk_~)e3l2tEnaYp~!ZS(n_oZSxUdtWsHYzv(`8$e+M>a z95?N(1L!IZjsHAc{-+%m-jV`elQpms>w3@b5{*S3I3jcgjb#jjG_4vY*klxT$8*Y$ zqFO&yWHSxA`2#B6a~k=9t@av1BIz1S#ocDlm#(HprWd>oH}rDGMn=jRy(m(D8SGJm0BosFVHfS@)BW}3YJx|mb78#h&DnbvjZXIThdi|(OH2UVS)VTLo#%=WI zoQ?BXmy~#=NlxA1gPIKKT{x={w-P4!AlNuWz0x?lPydn3(emfqjV|PMY2)nQ{iGC- z`tXt?LeZ7z{x$zZy2(fY@%M;l*kbmlOIR5QM#|(70v#vYD`Sy$$6b-#rLB&g!_}RY z<)RUg34lL8@YR#q$mpJQ*TM6`9^O4bb|GecM zm6%e4ySwl;Q8LO@p#R_pv~O|KS1t}fpdxS@?s;sFnhpO$z0YU>J!j*PciL#64{I7o z>2hC3#L2mAUayDFSBMo?#P05buO~RxkwLt@)jlXKG(I%~*lz+Ixz;0TP){8a95Ym$ zI|s6x1N9Z(B=R)mMP5Dp*!tM6jPXleUjBiFMUri!SJZ##Pa5IQUJzdxer_Y^APZ7% z*(r&9(t_NRHhCvQ_?WtAXc2A|482-dZDi<3)60Q`i)jm*`7T9p4?%;YSCxn7Q`4Lw zRjGX;t)}jYA*?qC(l;h!IbxjmlvAv4sF&w-UTFoMfY{ z?Pu9g=e?D&9eYx^Tx(8C5!{g6$7GEdCG-%G2vKRTa#Gjt7@YJ16=|g{l!*4*lE7xb z7jh=Kl}HC5BgzxbxE2C1W8{-bO>=~=-xa&^PUcbGdQEvbuK46dec2iCn0b^C8KrSC z3ksxdQ<=m?B{DCxP%G6{7u?pQQold%lR=G>fyA8df>*J-E&&UhdXvdD6%6|k_-8d- zUaIFX?trk=3I4)un;p8Cc{aHV~#D8ACp@jvX5D1Y`ayJu!+kH!~9n%kf|NHDr! z0=5!v&em;MSwgJed@o7*$*d4{Cye%D3yGi`X0K5*TUuL2`ec*zO6^ju5eBe_>p!K= zg!~lMHppEbisqGmF?&U9Ov0h>4>;bxnrFo390JxamhkjV=Rgs-k@FO)-*?z)oTTJ> zhZ5pjX4%+y>I~|&wwCx{NUAas4CH#Y6Y#2|{H2!gXB+co7Id(M%lQc^3mUpA!+%Pe z;7Qa5o9X*~TJXz^swqEd_9kEHdk0qLloVu63KuNU9gN{8JRchaUK`z4#%A6@@PPk4u}o_ z`WmyKjh?w^|4f|@P-Q@mnpm0&@6($KY#`$k(rew}3KffqJ)DeR zvr%*Hod1HMH;wr}TQk0$62skmmhzQywpUhc)I7`eT7VAK@Quo)(3vgCq9u~>A3Xqy?YBICjsd;d&1g`kzqV#|= z5nlD}#Bt0>ac6k}=r`Xn&eu%MVX83IK{uK$l3NW<%N^haYa@f;eskTi|Mrd4v;3Ud z%(3j8q3t!d3x96oa2t0l4DsLpa8MSuQ{^;&gSdjW%DA?2_`=J#gHg;u7U7x^X6!S9 zVCZ!E{mC5W94ppp;K_eDasqw{(lwVSm)f_z$?n2YQyg|{MBBJk#wfMbLUh=_dBZZ? zw7E9uP0K8d6roii5cmuOTb^W-?X6C}HJ}INTBZxY?u1t@-XV|n)##-c^r*%ts;N$Idn5X0bB8cOV3*UHB!(D@@|x*2`k1atV%oM{I8EA>kpA#ab#jvRvAj zFf7*rl4hig4n}3UD2{6PDAPv#GM!^XQq?_sz-c}d_=XA+^)Ai3`7-dz;=e4SJE-DLm)Am-)>+ z@KblCb@M&@CFOf{_z5tFljDQ=J8#lyKa*S3p*3sf8OrwxH@YRG0)NHi%sKyhCpF~# z{EFEaP!|{(UZn*>HfB+f-t~m>OGr@`7)j0Dv zu?7(Ft^q*pcmVLVURLR>9dkGyKrHo3MkVj)bnDB$L!tppw7Y~4j#k2Y_>sWuddSzl zjU~p})bz!>Wj?Ta^bzVa`$GU3QG~0YM~&T~QT=8f4D+!qBZWW`tSi z?6%plz`_rE7<)2eBep;$dXa5PwoU3D*o?pUs#2bLpfY)&yhQmT%*+0HxyfNJDJ9Ug zg;+>97Bz8S1e>@C=qTsVRiv#HF>ySasD!g^-Aq|iiVzqs4L=2>nJ(U^eQ`)9lorU2 z^+Y0iM7!sR`+>{^`v4sDL1-o~Y}_4yHOzqwWr<)*!LLWUN@Ad#-Fe9kAj3UgVor}) zb3q?JF|ED)9*|mU7F+}t>N_)XkJ_U)aBe=Wih9W}TQhCJCv!pQvP*(`p2BWT*b9AhJ$t|AVbIQ4IN+G=Wt^l^5ZbP}vgW{ZXnNzO+xCuVu!LX}` z(~?Bmy;~y(X|9KO75?Bw!2l$~ZtLeRL-X(nm0^%~T)a1St9gE3!N2c3haaDvn=^l~ zlk%{w{{)81ElSZl_-#2{&WMv2o8rNH8n{Ez%9yQ_xoG_L+$?8ySF2B`%(Xs^^n$Vt zl^9WNu<36SC~8IlHvtUhXrplXq*I-IKk1Ab$iUE2-={CMei)8MrUE!;ps4`wmmLvE zfWm!^0Gwp)(J=TBkXjja9N>`&nOGUb&?lD@7sbtB8OtZE16Pqo{@6tQIQ!wsf_8+h!gHnBZprm%f%%*iZf5S zPyyn%TvP{W1IY6tFc3$(Y0#ysEOst!#G3#$d>{?X*3=Cs#CSlz415o%oAE%zh_6y(H+nfd>E3zK8D;NlW(WR|P%PTgTd*st?mC#(&^O|RX3`f>#BS4~W0jvutj zx$hCuVrl`ly2I{TS*mT|pH^^OpY!4GD0hFD3GnPSX?L9-Zl`qP7k=bo}&sk8IK!5OaL9-rkI2M@+(qooBH=3FBuQirHNUuAC z`YeMnK($jdAS=QM@GKg-hHg)Boy(3V3XXKgU8|Za(lM`Snv1k{oZ~2IdR*52=lkc< z<6jTIhISn8OTCM{D#RTLw-Uh|58wnC$FlR|>b|uja`FfeW*1!AW*;dSWwIOK^^#p5 z0Sb2x2v_u$e`Z0_MVFGNcsQ#zc)&56TJBAzHsb^af>F}tSDvJD5kUT{$b-{D;P4-8$X95j4<`1KOMue%A;)ZCx35P|fQTv)T*FkM_oUF1_vu7b z>aq%JDTia_{;MacaH^LhzqH8-DH14ZqQaQQpbO9MhLf6w-PQl(rkn?=&58O!k?9+T z_jK5J)vhzPAcqF@C&v-(^!D|{Dp;4+S{Q8&?}ZP$FfO;DzNFG^8q~ge4-a>7w$?o@ z_l%_VFjiLHZV<1x6+w@nN7AF{e#mHGK8;~E8L`1L&>+(+*@04#OuY!9m{J%t>S8bQ zju{Co2I$&nY^4TAqz-8^g!)qeYb)-yDg!M3Z?gq}0B4M@Z!CcHK%59pc>b1pQb+V# zFP8?I3Maa=>(y%8@mWfKx2*>|paDNd8sO0Y9g-GHF3=Qb@0GRGlkSzRu*ycc0^hDF z0oGOarGG&L>Z0L7+`q@m4WR`{8_X3Co;obm}|gkb!I0;KWQc@$-^xDeF>$1 zo1WGL;y1b?zL7onZ7tUgBT$bK5^qmE1E2Or0>n2%|J#UW4PO;%*AXr|-O6g{dL#!GTPMLobaF5hrGAX22j#$MoxEalsrl`@dyxiRayU=b0!C2Y`w{z6 zypsDE&zkA*daXCnfF?)Y_`G7-8PD}e`FW>u#xs9wlu-&RUtWdQIhc6v@cUZcwbkZm zSXf{Vb9CyWN(4x3B;d{ab(y2>DT3W%0g|7Js|g8!62FX-r~)j~>RU{$4}fMX5!TwZ zo#+-3L9DbgCfM%Mh-qfQP$d@mL7NwlTS|VWdH>w3aQcQygb=GTuEy;;yIWzpcZQa@4%wfhCUn{s zZdU^-9ikO(mS-NU*I2I2fP||mlH=PNS3P>$_*1i5o))bbTc(G75?h!G5Oan+FdSb? z>D{Lre}eZ=pKe)f_0ChAW}-*bb5TX}d5MkYH3jB=M(^4W@L9zP!?AwWm57x0a>CL< z(?MP>W8%s$5Rks~_mg;zPuKs%IV-k(n{3YqCHnkykFJ9_WJ9Q-xh+r9_Ve7=q2p7g$nqd6mp$y?m?qWb{pMDry#CGHJX)%t$ zl}^u7(=3WTx6cPmTd+SmRMaNv5TP3Dn>dQvg9Kl`0t(Zep4xmyYoMW6n@rW^D#g3) zWo%839IYgCEsQF*f9nib|ENxsA2Vc?Lv*$VjT&l|xR`H^5r##@zvJgRhX{q~;)Wr3XU)DaL&xQO@(C5lCyZpO1c1aikPB-I39nBti6*bHk z`LF|o=DI>d`=+0K*o+C@$%5Lap%Tigan@|w@T$J~xPUhSHmGL9sK;&ZJ!BwBSx~^2 z1y`^iky?QCIxMAMb}r;RN=ANZy#2g1&^q8F1JYfEdKBf#yWqq8C~!?TfXhnUobRjA zPX2%;kd3lRYh;sESm$oS0EW`l*Z`tfS6Y>Q!h%T~;B*%c0mpLqO=m1@8h<<^g`>ry^r(NvCEbW5CO6L4ew7qYx?%bH;zxH23OiiHqSM z*q(N2-ucc=G&rcrPONs1(;wv~Pp&J_ZeA7~0&!HS3QK~d>hal$AueGDUA`*9O|N%x zv8j*NZhcJofrrNF>*IcIRVH~wv>%lm<} zj5!DUjU`@-`H$hwR!)o6>tZqem^s8_5y{Doy0;7G4b489`#KejS7j`0?~Ker4BN)y zyLaTOaA%53O`5X9zvWsg;WUPF^h!FoDnIkpkKr z+l=2Z#E+^H36%h;xq4)(xuIaQY;uNS^fmu2lfyps6jzg(0+|2o5YB_KAc0&(^bR=- zM@tQVPbu7Sr~s%dXU-eipMTLagkP^;{>=EyPw+bXUbCJx=LArH`2;9+Vv-{BRc&w+ zv%U`kmBS>?Lq59+pfNn_sc|~h`qqDhtGMn%yvz&+&HKyWy|_>T2+S+?A4EXlF_Io-pY2jasD>Ew&MP>8-|}1)svs^56T#d z#DO6%ueNgtJ}%!WICrj410JB6+aeX}HIZhAr zr8&Qe!H|%fJG6GYPcMKUa(TJVWq7rk_JFVVdE$luJhm1jx`zkXX@djWktm4XxegX*J%>x+~UW@O_~mCJ&$f z3cbDC4lcr-o3fJ37dF(G-Fed?=vh9uu!KLElS}?NSA-bYp%0SLBsl+yFaqnQtVzK@+4{fE&a7G0>I#vd7;P`*>u5~=czwUOg^S8 zq?r3YE0`j^nGZdOP}W0G-u#L9*%s~n3U@XO2WGoSQxg8Ly=Ce>!*YLMzh8rmIlq31 zav^nw5On`nHdus%TT8qtY&Ia@K5SXubR^(%!ejM7wjc4WPOn}bqNm?@tIkN7tnmXa zA^EIF2!<6F`(C3wEHHHEv#Mz5GmT4nwdGR7!O)45`K5)JDYFI>-H`K5!(T(64M^Q$ zo{~6`e95)(cWuK<3Hu)4sJDh)Ek&@w7`*43CA~BhAj*#l8(Lt9Q>M0Z;`*K6n%uND z2JqFtWkJ<>(r)_ZF|sYshiOQ_nhliBw~8AnyY#2vo{`35@4e6;?mjf95M|8kgUe;t z-fD<_EZm{zhfZ0#PX546?k0~^WQ#sh6PK$82aL6Nr;SL;55UeQzB!-Tcix4%qi&S2 zUynIx&dU#tZhXGxK2#gjG9oE6%!-gDWJo+xx4UlSj!2&P>94<&zz~9Qs*3Cohyy#g z=|tk%y@ErGNP)-ZG1pr*Aiyx?Jb4`RNDpAwaNLknlq{<<#%uYa&7hIh%YPIktI%M8 zl_BQ|!2ITRlLfqC@@t^+!%~LHl~`u*!;|x52gpvwVfsdtnvK$|%t75nx5eG1rLa?< z+6=o3+kec)te#-3ZPx_LHZ|Kjq-=!#ge^bI@dNvvTjlVtd_s#K|D$ghvYm1xW}cF# z?st5EhvB&iTX;$<@$KS)*mC~3VIV38!7feri5#C@Xqfk|47QA~OfxkX5;m!9QM?(m zj&N8fxeOEmx*SEz`vaeWX{#L&E0{mzuED9cC$S>>f55&v63-ItZ)~t-UXLN3ji#mpRH_Al62 z;A?O305*jTnjcJ`KdteDw+%LfMx}>Ntv0`W;^U#;>g zn;Y@R63_ro>rJAl^grfuVnvORbhWb`L+Q!o!PkGN|CYhI;#%iASb3e7q=q}Nwdn@o zmNF|2N{McxjgEy>tZXmJo2&GQ)q1}~#O?F>6h+W~q&k|OZL)h;1Ty37_WdNRgdG~~ zVU@|EB!ZN$P|oI-F90P=+aB=!i;jqr6EVu)YnzXH68DAHCh2Ve(Yd+TK-^0AH#0(c3NzQ9|R+=nZSEGLk!;lSeq4! z$l~96QQ&f-Zj;NrdG=vROx<$}GEvsmHl#g^-(zg_`C-LKJLI~8Ez3l#iG^df4$71S zH#x-0ysK`|isTUN&lOVsZ4D`Xe`)iWmAL~Q7sC8UYE^e1yEFHoC(h8Ln4K)Qcz3`C zn&EcQs;dQd2H*!>(7mCcSI@R8=`?N=CI z=t3i8uRzJBn7d6ZsX9K#!1ew1LGuwuWovj5XDS0}@sC{B-ddE;}&f?rl19P%L<_v2IbyeCCO+s`PuLs3z$Yq|vk-s4 zPL#sUlq&~qy<8e(s7`VHK*Xy^X}aKOKhP|+TX)YFo?jL2tWP7c3GR6N!^`r{z?5eX z0-nF#4)*zId3n*)U9F*)Z@BN0Q@LU&HEN=BXfEn!lX3jtD)FbVa$#&MY8s@hFobg! zp=o9Vrps*n@#nn6YKZsE;XrC9!YOuVwoW9Ndf zX~X?Ba-<;N_xkfWSGf0bx-rua?(}%ojAL1^5KL#rr3ozv%UU_Uq>D&;b{@3%$KlP@ zBYSs9$F;(=vuQ61=S`9%`V|5$Z{ND1_r-Ht=tp=sr(bcFs=paOQ~ap)tq~=*)gI$Z zZWGVP-T?Io6;{y`>3*N6Kt##m1WWDHInK<(OOCj(2-f;cQ=esi4s*EdKsiumoCoiT zWy0H=xB;;j&*6r-#?NlM>?*ktY;YB?F6kQ+)65iTDzT59r(N3Cn`*sryp{^ZRYL%@vk640e(=hiaj(Z9BHzJLmhqE_l)No|r3- zJj-)xl5M5rb37epc`zJazt9~{9on0I*0{f9v(<^{?i2PYFya1M25F|xfiuL18?BbAyi0u(1$ zd_D<^4j#3XkdLmgtu*|uH_*nks*zj(xH9?hCa9G(QB^x9l^BSWCA{cu+>@*UuUu|| zT$uMc&5RjBN5Q&}(6A_4H8tY)kj2U58*@ANXFGXXI4Q1r_v^3e@%K2yyL;ynaNpDMu?tJs zG8LSO{*<5B&&iSVdRNY@&hem|#Mhej_FP&kga1}^s>rTZfie`+S32>5V72rFs7h<_ zM#*-Y_%`>SPe*B2E-M*V2vC*Il~63o5L|$1?3FQSo%3d=%C5PP*NJ45%O-~hC~WGg zA(F%EoVobAp+Ow{jcbZ4NwS~HDz|^-WP8wjxK_p7m(Z3at+g1SobhPwF|izWqpE?fNL4Q9V)_dash#_SQLam^jMuFm~-shO*{WE5?LhRP6qF zb6US-zRNblgUIM8SO4bsF*eZfDn$}@jfJE~j|{stIv1$lxodA8V_cWRuyV7fnwTr^ z&P+T<2ZFEGsos{8Os7?)7rCTu#DoUmS&-eG7`ys*sD!60BD(nx8to7iFwq1421V*ZD^u zIy|-88_2H^79UYcbRoC+l_ymn;ssMy@fqv+O~X&+`TtI;L;c)CCARRU1OL0MfK9k~ zApJCG(H!x|QEJ8KptR2YNhcCrI3SCb5$t7^jrrHbS07je6lS?vWdh0doKSi+zsxyU zK(5SMOUVy;zC7_-t75sZM>*}*{DZHoM2)-Dv;9OO=ʮRFpc<5qey>Hv2V_s!2ofdaEw_4?^^z}wWB4CmUsQB3v2pjSKnZU!%o4{Z{z!Cn530VJ zB>;QFfRCUv1kSl{T(a#4lP4U_-)?^QF|2JH`gM41H__x2D&$2BPn!77B}R^Ae2DQ? zzZL=1-p7b)iUDu8^6+-@J_h;ht+l{Nx)r@*{7^l0Lcu=y4W;|R)Cs*>C{vD0Aw@jN zy&2(8ILI2eIuC>9hYp`@1iCA3!bgwF&$&Q}Uz~mIItaP&{(tPfWmuGL_y2p_gn&0GNJ>d7Fd~hFbV{ex&>$@}C?NvU zF$^*yNOuiLD}uxj(hW*V3nMwi{~W#bxqrWBAN$zHez9NeR~|CM4A*s?>pWL{zH2Rm z6t<0Ul63KIR=v)q_IHOIBVDD=RQ~)t$G30ZA+S3~*1|sDQ_^^IIC6tULKFze2X%Zz z-}n6m-1dd!Ea2cg$@f~(YE-&?{Nk_8+2k)xW0$Lk=@FL=xrV+oga-zdKRuZ>I*xhz z_gIXONnWj*AlFY+#vCN@Bip6W=~9W-tucNe3d})5p@xNuk&wV;YLGi~<-ceEodZZ$ zJECwrvwNrv!(CYtd*g)?zn*~G{1@rk<0G)6o}L-o!=sXO#FZCUqnoru7q%38rnU#F zCRJptR5!Syu*vA%fXZxFw1=IyPlqa;6rL4oHy`H- zZW`R`VNiQKd0cEfgVKQAE0ao{K{BfPF?FAJU0N^*1J~8ufM~|31iC&fB(pT{xhm4< zxKAys@7!)6a?Pq~NxiAOFOz)m8Rg4o&r1XVX0)bK%-I#ub=9cHFa9@)>t-~ln{oNE zq;V=fK0qH?qN1V(K7amP@I~Ss&?`Ejl3TamJn{6SEj*mf(Xj8E=ZZN4lrf zmDII+XjMCnO80;*^88mXm4&w=1injlWUCS`9N8#m*|jxxPClRJgG(wAK=4CoK8e-W z73U7kg)i9#ny90w8}gxbf^ceW`6YW34njcZwbnT}(}|2?cfbNLNyx^}Adt2mn)? zixf)d1RU3e=PxxX{8lW~7Td*WTsS>Fa2tBPWoNY9Q58%M;qH(mXpwHNR@ny^yBTDvRnjk&oAH*eu+3->wXlc|9F+Nn_Dt{4Bu}x3tf$INytK>rq)d-V7gj=kVIt zy}8aOpijll-{!?oDK&Ex18?Hm6?oTfql?1e2>!r(t z!mf0n5m4>+_FA}9&h)_nglo9lujaFjviv*lKYW8~cB6E0`F2Ef=*Wz+L^*hTja907 zi14l?&Vkem8D4Ik5Bcr9#VD29hOpMvcb{0FDFq6^wgWovqxq`R(#Y zC=r*-7+mT!(5qq&It`$(-Ca>3k=NE5{uAs0b^cD{9sppe54ny^^j3-~dZg7##fKuH zFo|5=7zdH`M!Ji0fQrdx$>Z(kbA(>91i-kQ;nxVXkcpl_COAs8&hv0NZGzm^3%G6Z zA*u%_!XN@jc{V@-9PS{T=0(wuYoSBdl-)8vx$?Y0Zf4GI=sUw(mRgyY%4VqpeJ|o zlIln0q%Zo&*dU z4g?2v`)twAb9Z_NC_w4&9`JAKq@Vk^>Z;6b3gDIdLzKqsl1o!TfP$Q=Om?d#pe-~gI?wZN&4sIUm37U>N->bwc|zl#Q& zV>Z_;n|$x<-lCJ-Lu^TasxJr`Ez?7hHsJ)CONATvFvH%+agZxyY5^z2@%Gc^IJaH>=k`wO+sO)3#et z_M5eCFA$|XUG#Vpuv$oERXxaEIMFNc(iI4NaE}{lJC3wb0CjusH}^vhkD%I^%kkZS zMg&TceEsLn{CE{<#FhTfZX3NpLM5)5*Fwmxp4~!|(QP@peD+xq=?2cwAi;gGrh#|M z*bZ1G912iQXJft$Y7+VY*&4wiO+V6n-}5By2uJ|{#E7)?L_BYo(>X(!~aZFGhZ%KZyz@!~eg}k6(>eiyO z1uMDW6Ck=M+=?EfOl)d_a7Uf7bBBcQY&i!;E<>1OZNuw60sPYq z7CPvSgU$fO13YTeOkl}?ap=JVJ`wLf5)JXoscbyGJD29VbvC#iIFy488BH_q|J>QT z2|z9Ene8Yp6hHjNnnwMMMHw(AURgb!m~t^LoQjL^(#;-ik0jx6-P^HNB2l0!H=`yv z>oam6+<|?Azv*m+5j5JrPeBE$H{#Mg$`A$$`(Ozd6PU2}gC`lfk;)QGL#sbEA>Lo@{OH50?`XrAaF!H}{`Ykl{Q4>tu=-cfd0xylBw0zS~^4@=0Pc*l+=Gi~i2wbHHzIiE#lZ;Aau& z_BAWu5&@Pc4SYP3UV9Q(D_R0V5}+EpI+z#AFSiy&Vg^CXtrq@gw@h=;O&`1k*=QV~LMM0E0EAr7Wg$ix(-6kWA{kQptIu4Tl8{zztc{c!Z!=o$3 zh9mAg>^I2?;x>yC?OPXQ(RW@ppar*_Fjv(wo#LBRWpkZfCdEzeO(_RqoXBz9=kgs& zy%%LSWevzjpW&dYm3!`)+y&3BM_x40b-ZGD8eQq%c@28sao@nqj$UIaf zP|S1+=}QhVtVx8R`k@t&Dc;Pp(fzkJgO1~4X|XVI0~ znVXfwt5T75y)D0Be+!t;0|?Ly&44w+!*aq2?z##5mwjfR9o^JkQg(@waD=r)=0nXaS9# zW8GYGZ1vNVu-}P&3VS*M$(!>x5jM}OfCSFY#}MSlBE>VR|id_ zDTqb7{TW+!hVw8g4zVQh7@;n<&L|SLKl(%GzzNQwqF+31&d4&Ay^l4`4myM1@!&TFxwdhaD`&nN=LAW;w(D8FhaU7vrd9w?lGFrh z4(M4Pw#s=LuhheX=*qla!C}Qpf1P0GpV}wjXZIweMOWSw>0|8jpaF4gvPj=;I`B_C zxh*(d|NTs{A%?UHs49(M1+H6z*Z78l(Bgc$H70pqca7`4RO<_Gv)!;0A9EQC1Zj;m zY&w-a?yP&TjfBQAa%qrI2rMB;fD!x)W4~sdM@?r7#6)=fl6wZyFDX<`f#=L`X2jDS zRNhxL-yL4wVwqmMIv;9q^P5;DIL;n{MB+-8tikIyYgaS5x5jKFz_G`P4qC12a|GGA z*hfHLga(SFHt;qAYTc~cGRDB$0|K^yhTDTae4PXfA1#2%K4S#xFNw#~HiqmIhR^Ne zOa8N2BKPnh8!GM)<-Cm&$Jo?Q3_EDSR(!}L4wMBb%KJ4tXJ=iG5H&92rWe*Ml4Fu% z>SJcOixzTE_sK{G%=$03b&XraPD9{)cybOPKx@n7j11}93 zaRLg%pZ+S42qb`@_kzghYqJ4Biw5(3{Yw6qwl=L6ILcm(T>~hM23dC?7rGQ*luazC z?U8Orl}cgr)31JMTVrq?cQRVrS4gMXT`|N#_IklU3Zx8ZP;hMW0`y(xPgp+*q}4kbQ-Ug;M7usgP_3=~xN{!KLvT3$PE+Vj!X$3|MaJf8hA%7Tjh? zdL0jhU6+Zc9XiD())MRvgU>(#Og>gFliQCT3H>HfIo#@*YS+^p@;`mK8DhwS*7TxE zhOV}MSn8Teq1y+`D*3!~4ZOzHc=w0?q>kQa1)T)_WS>1Nt!Z9_7>;=^JjIQ?`(%Z_m7 zCIHM$HVh=TpiP(iODc*$VbbtcWdbDkBe#joW$>a05#~-^hvB5ph_vCLD+q1|A5M!E zB-k|Ni1yw_=HXNifQmN&jyoCPNVVTYQTfcW6#Z+(g5#ZU&?0|?IOA@&XWspSVX3^~ zRm&>9ymMzqWjr~LVclDPPsBiHh_TH|piJLkXYJg%2#g63rG0J}6ruL7iK~93*n}J2 zzGmH&l;&9{*6c2T+m57 zq4I{{AjO3&Ah;xFABv#EUXAC@1*+u1a1f1&|QvFd7C5iSZ zqQdBX^-q^vV8yjpYL{J9AoqJZrb~Ez-NR2A@3Rcw8mbeEeK%U59!@)Z({&lJWTY{m zW;|kXCDkkCkU%rpXd}gjE&UVN98+(2AP!Si?rj2B=(ZW*MK_gP&@fsI3gpSUQ{YFR@I`^%vttJs zeS+{FBFP#&@^Km)`?yYUb8d3iknxaP0RgGy8w5=ZPGVs_BKz5T$pPqBDlQNqZAxV! zfuGuTF+@i*?KV;pb%`1d_7%;s!GWL0vVS#CQ5b#ioohbDecTw+j5Rtxi~qS3T0h`u z&o?ZO+8eOW&o36rAJk@sxUQG_7`j452aI_qdaDRu`0zftN`NXM>=jS*9&J8Mpi?HQ z=mrvx1GDMws~|cF1sUl-(IhT88VW2MxIBQ0ryMBgt-M5y%SeUa=`;K$rVJcaJ$N3P zn+xaWCXC-ThZ|fsZp!zB0!CXITvu=5ZLS=gahTxba1L_I)K&)Cplt}Sr{T!TWbjqM zj#r#3Zv#8NMK(teciA-=gb#WO{CemHF?7Q8{EvWulQp-1Evg+Q6Q#U!SduTSC-Ovm zXNz=FoNtLID4QaPyZaR}aLS ze?vQotHAQbuK-0mGp0}eyFuNo;-#JRSnfdn5ChI8?A1(wBv59h5DNy@<`i}UZ!;9n zy`|_8a1MJ*lZ(kXzpMT2vHfnY( z+4@hTgO5}6+a`8DI~~k#9b0?|SX;ZXFm)Ko*++`B%bC~|0$X1=XM@H1;Djns{M zr{`n|?^gHyY^?C{cJ~I=v3N}LT4Zgvz(wg^vzxv%EUUO^B-UZiO??tU>6s^-pEE30 z0(=QKBK{uj-z8KYQ&QPFV$<()XfL3i7X%`D85z)xpvLFB=_fy$EvMN;ovQ~z1&vex zDRkJ(*GDimea^qk^c?*F8no*@@=aOw{YoBv2mqPjNPe)QHXXljN&EeI*Hnd+KWbmAu^Rk^ zfIoAP{_QWU?05`i`;J|1SHZO|`u$T}()IskJEK@$^@pn2v?s?e1x?%urBm&*a%NP+ zXw$e)MuuJty_$5;a$OV3PPTDQ2VVjI(|ctg)GmIS?D<8F;9qhB%twTcA1jbr0W z)PHHgxMtIf2ezuZHz;U@XeDXetKZKD`sFsZGdvRA2>p{0QXZ&s&pY_pOn-j5hF?l=Kw-pnTB=chHDZc;XkL-dKm!d!7By=|+?Zl}^HpfiD^iBylW zjR(nx7>S}(d0{leC2L&O!IS<@-ywe&jpnUcUE1$oK}Tu1WUd|2O!tklndEAeIhzCo zBlbSHnI(MiJtUoh?7vTVxZ&G&l0?p?LQp%wMU`t^_S+C$%Nb^ zcA8y4w>!4|Y*KtDGJTf$WJYAQZkW^hw2`E1^&ymBCQpZVodcu(j#?dbX7t_ncUSK_ zM~&$_RigKIt+*PnM1RO-td9M^+6Z0;rbTe2mRLgSh5c_5m392)%`SyQJvp*to~^eEOw7?SLY+m|wjMFM{+%QoHc4PSfq!RzF1leJ6*u{6 zZM1YvYAKdj*Q0eiNH+9=jMifoImHGw-JAC<&f{nBw~i{jJOH=$fo@p|(ixAp2Lll@ zb<7IJ+C$c)q_bT5A}DT1{Jen4m6o8^2iY&ySO!*;4=1ZuJsOAZ-H(4+pPmmvd5XZ2 zKLJl}g1i0KwVd@zoErSeZ`N@`9O&VGndP5BqmYaF#HdsvaqI4OXSVZh#QeQIG^xa! zu{+-iRT2_oj`F7}Z+lNc1|aXLocz62y;vYuGFgN&Y&lr;kB@LmIh?YRH|qhnVVM|* zzQ45Q+%O-ONktC82ygctBvr8x2w4{7XDL%7p_O&THmlska(e{{MyQYMS$F4m7 zT{?a%L9`1~#;5u8m(4_|LWVPLkrwO!q!BbZtp^GBJJcDzafol7=8HAmlzGA$Vi@JP z3VEO9s~c%aX5(5v<&$?KhFuBS7sBtK z+nndpQpo#$FgxBZ85#Z9;7=^Xdk*&G&6j(23(>5UoVu|KWONF*lo)S%n3%oAwha?T zdbFsqDB!7JQ`Hl{?D^xnguTFXK}S$gx=MdFAAOeBVZtYbXpw=}Pfq)0eCdd4Y~H#R zG52e^A)}+LN6{M!(ghCPBD}b}A?dbE|39A{-pas}j5#^Q3TBQ?*$6pB=LXwtQn}G# zdpN6u7nRoaq6_&Mn5HGgTS{~97C&s5CXy?0E4f`#W-*)+X@Xvn{uD`C@4O~UID#22 zB6~p^Rm)SbDPdY=g`X@f9ojCb_T+eO)uj?uNcUe~%MubnMjw0YP&6jKGoDs|RBtwF zC|UVO#itemmp-Z&5rne=Okoly(rS7pUhr>=(V^Ch@);o(t=g5`q&J%^8Zo(xW4ZDR zV;o#UbMLU1P_fsx_mv6a^k2%luDwxKtdBKrt7~%SD1g8T2$D*CkI7uo>rT$dqFpzw zg>7%t-jw%}*ge{`EBbr}ALqTfX#eXE43TC1u0>?{^M4p0_fS+a+7XK*YgXH&(ZRJmpV*XN=dRXu1@N)6>B_QYSy=> z8lp{I1Sp7n;k46X0_#1EJ_bYffWVV1m5G1}p2~P;Bqse{Tt)dnx2SMX%4AD~XXCAB z-^$!t3F93#Nxc|-?gx2(jF{vdF|Js%%8J!k8JZ8=VMfgK&|=ahS*HDnlV)PP1qPvk zTgOevb_Ue_rkUGnFwCp+ibM3{>8SzAd<^aH#xratJPqb%@%GG8>zc*6wu;r4hCqoLm>ok`Uo!;^bCpuhKD=ici zUg%u69dvL7bXB7kyS%To51&n3M2{%gb&8Ddb*?D19BlaSnBN*&&8|Lbpq)f%Ci8K} zEfz{7TFzdHCvpvvD3zU_*+^pOSk9fkGc>G*Y}48M?X$>Mo&h%$_e4w)f0~HPlPbn zS>3)hP~3#~1~NGuP4h_}4krw(**PvVQQ!405zC(RI^43fGA41fLHffoOznAL?2~0k z8|8(B9sR5!?ev5l>VS23-=dh~J@|fCV1R3}KkTkdtZ~=qGpo=SCVf4k!tvDJpoPwO zE1hy4&XnoS2C-I0!TVj)d`|FEhk5MSJ7urwre^GibNm~Pe2eo4uV#rKPZ$o7hjdDBw{D9gu9dV|r?#;WKI9_jg$18uc z!JmIi&b>J-3p>cy73@9BMRFCvEgrPdy8KKy=c~`7-rVnEc|CsgQ-=TZAi5@9Cuo`% zGA+-kk#;PVsf_jatjQfk!?U;*2`G6g-s{^f2b|qrA z)i!*)^|#tRMEO>|di6FdCVen3(!4BfqQ><(mPwMk+`T1Pf_3TP82IX<_PFuN>6!%p zSwiHIewzm)t863{XMVw;K0DVtGYvv~oN<^r!Q-Im-su>GajUlEmpj1RKz;MY<0rNal8`Ot?k zgvcwK{w+RV_zpdm&%NGyqmz$$6qqCmtNFCum&t5Q`P;1xiRF!=;?#u+R*i*|N5pxw zM6-CDWVxufn?k)!iMwUk`x!D;A+0yVz(%6uy?t@#I=+4q#lo> z*FU44JKx-}J=)MfRIk3#el0XfC>x>llC0iJ#a`4!PrLK*L@le~AxF}dO6-|^HRe^j zX<}2rbER4K@4nqmuhbS_5GzpdDfr84D%a|7)W!M}iNrag*0jHR!KJ0M2cypvx$e~W ziQUTG6~iNa`K)HoHj1}!N>EeuS|H|~uzb*oh!=X)&29DpVtNZ-+(odNp6ivGabg6> z7sCQ&DaD+fvBP8oh49+IQmSlhY0ohktClOVG|{p&O4}J>7cE?k33-`PuFBb@ioBxV zhrH6qbB~c?>4{DH6*40+66etQbh&>4Xn0H}saYE3_@eQQlD9V;npmt9IlpdP=ay(F z-dxYExRyCDB%ZCel`&Ad|JMqvhJr4v_esWHGVN21U|SA4&cKtxa}^7NSsK9}wNKT` z&1U$5N9onOs#gaq{94F%DxC+d_MSSsd8d2&9sy=9u%rS1hffr#rA?FL}V z(xPR%?D{t=B4;~!E0JeoP6get@>j>B5)O;*FKWKX<8#)iCC#Q_B_Y0X+&+~<(3J-q zXUCaIZoHfdY5I!V?E0Azi##km`p2w+)s&+dl=NtZ#RIc@oXUQgH@az!BqYd7pqZ*+|i$yU5$^ zhg0STJ5|D^n>RQrsY=TB%^IYWU4(DuIy^o`8bgEw=iWG`-qC8Cm<|@|${?BYDH{wQ zUS05cEYurJxb5>-#-0x0@-zB+qc8t`9xLaCxLw#rlR|ht?Z}Eoch{Mt&>I`M`wbk^ zf#GV*k|58M01dEOIW66|ZojXZ*b`Q;cs%N7y+;PTk-A2X^t6Hz<*n1BW%aHqrzE#i z|Lle^^!>n4WzoKg$tsy1Q~Sf|_Q))3U%!t>^ADaA>0X(a#uaXbd)WC3r$yfUd)7

8<;(ZoP8%Avw-U$Igqo3y?Vh5`dwdDGr-UO|rCvIi{N0fE{ZZG%l=Vl$FUi-qhZ;3j^ShwBJzh@^( zu`8A*!HyMjPMntS%@>>$FL`y`EOsQ!-FJ+|fUJN+1`P{Sj!+mk}eADYCW zjRgnSAFQIb(|Y~G(@90Yt$2F5!TJrl}2h7FN-kpj$s3( z4_2+;RNugdADJX??Ulqfy*uWcP-8%xoO)}y+sAanx_Rm+r?$r0-VLtkF+%{bDcBhRnNt*y{ z`r@ZLvuLcq6?0fXKVGDHFW=`0;fZ$g5be$V=q9l&mY1Ab88(OE=<|8(VbX;8 zG9r9x0Cew}ze$j51KHcstmO1-V%*-+YHG$DmVTB%OhDRhrRXN2#YReafE^XGV~CcYZ1kjE^V{LJKsD z^I~gU5{emU6I}X66?Z{f2g4dnG;Lc${aH=e8H>@ZsMSb$=KNHrN^DgcgJA{W%+MUNwwq5hZY+_~)OJkJV za?LutPX~;>&oHz8Op8lT9Jxf-M<9nyS`y<;%3YHAiCbzqsaQmWrDHVukdGK$!n0Hz zJllC_U%5UfEWFoAKVJR!GP?HQ0PDM5{_8bosybmrsOA(xjlfuTjQ35Yx@&ppGVuJu_JZHcvTqZ}w|hg-sMc zIX*d2iHgaUJT0m7#4wzBpysnz?s!tv$mxutML-y%g1nN?@apkesE!L4d5xOQ|B6f% zxbjO%nw+F7L=y)H3|)xNCPPY7ST5uIU8Yhz`+n;fJJ_4(}XxJxLSn zh}3v-VCa{jRUHMBK0RupvYzYu=(c(@OMw%_oQ|z<=#jm%wXvrK; zaX(rC%aMM|xo9)c#AG<_iqYWmxe>z6K+*r@^eb@Hn#fXwrm~WAev{%H%1=~dQSzA3 z^ERqd(@GpWSaj7Ls!|lrP z-65HB30(RF5y=W=2Q(3s^+mBD`KA5jSOE9N7)2_OS^Kv&#_EE(s(xQXahbY|?+RUH ze0O(PA3piE^A0$H_bP&S9f`8#Zi#4=M=bihgik4RaGh`y9!j#c6@3J*tC<*iK`svP%@Nf*3 znAEeWs;UCn$NPWEGFJyzzhEym=}mDPlI!TLE)I>FDd<>oz5cjdX#Kr_KV7))ZA9%yQ+V%sO#KmG2{!b2uoRnJv z|HE9N(X1Yy62YgU8xiqUkIIKj=H(Qj(*30PlxFjjRnl2m^YD7Zf2XEu(lqMb7L|UG zFb4Vj9utvb5xn&`kgFUup2r9UOJ}aGekG+AgGCpOS1VGgrgJBFzvfrw>%h9}qRs1- z?ovc%FV9_Y&iP9+Eq;GlEm?0VxuYmN!FuNOc((J_5VvVw>p<3G)}=7BoZ-^9=!R?C z+vpa1)`%`5J=a%M%o4R0EPAmlAumdu#m!tL8;do%MdHRf{=Qj$y??d!!A-Rn2ljF= z$Y}LnYkG<#-sF@8u?iw}Q$qUjt5S)H{Dq&HHk;O>x9u0(`Ig^#7ymrZa4jVvLPGMS zM1y*zro(3it-qhZL&xvp=V#d)j6ZtFKUC1BGZr&d;+npCaDTym^aexsya`9)S8dxT<%BvQJq8cTn_Y`4O_6RUD*}<}8$cW3RyXLuH;U~a*q-!nh#JiiB=pXE9Zta9n0Dz>=A zQotKJ(|^`r0smZ7G$#YK6-OD+3I&zTBexYG5w^_U=gORnL|3px4ge0MWf zHxr85OfsnmquJZ9-uy0y-!+*o6F zqNS!%=Oj)cDmN%v5OI9}bjym|o-&PacbCX*^SDhaFKfO&QMw-#fH<8a3)DvCGg(X} z-=!QC9#j+e?ipt-mvTSE~S6x3FXv%gGiSo5cEg^fH#oDacv z=B^{B_~6*dQ09Zh>bN`A?!nwWn-BBpuS8dVJ;#ai{?sTssb~L;WwGNkE%@s9Kt3j0 zwjbyxn? z?ptMf0kE5VF@9>?M)e(lee=D{X2>yxPki2PtwqaimpPr4BmYL~o*SrO3}3iXP_oSB z>>bpKAKm>ZK(Ck_KU6{M<+rMY8^ZXlcm)wrs}r^&l_KSFFU&SaWOnf5ZtF&*9M~0m znJ{>8X1{G8SXi&+bXRfsaj@i$FLt^&23;b)2R7tdV#xYmzu6H5`KMbHc)S;m?uZWc zrXAc&Hjc99H{t6nij_N&$zXF^McP%YBef7slL5H|Arslt0t*z)V!@vxz%DfGSQGjj zAQRX9M_YZK^38tJE-0wY8aSVMv4w@48yL>mflqx?3>EjGTx6r!EhT=-k?kN5Ai3Ey zDrG)8dE@&bo}9-S$#{(gv0a0p@uENca@Q$weQS;b)C2a_rr1nrWlpxA#97LBV@7>uOkI+NI={wm^ZWN&aENPI>J?8jmHt37O*(88rA@NDZBOj@ zDk2bNwv#3>?v>wzWE%9IhfC9WTJtxaDXa)MHe7{`=Lpt`3F3zulJe>BniP!k&0X)H zowY+ua@}*j2fG(t9LFJcYQpR!e#$ha=2vSSLc_QJ&%|;M!2{T{Eo#aC)lK+&@cD<{ zEWC>i6Z+}La(!I81K(huD|s~X7K1S7^yI%r-h-88mX~YN(9onzvfMNMN0GaBfB>cdj}Wj&n6`|cIAJU zL9S{2b=G#MOJ=XfH`4BHhum2qlCZSJ1&+ zOUhRwB#1jn_DJ@~_Ney2AbJlyF*;7;*Aj_)^p4v{5${5TMXiouqtt`qph>@a>M%u&13wfOMC)|1BeQw31Gx zLGjlj|5}Ga?s5P3Mea&vK1CR(r16%0vrc&Kyc&eUX?C7rzNkGiAG?}31co#RTQhW( z#Hh(4J@rlX_EGDzn!NJ8sh6CQ#nxd99-m=0qm?yD8ANBvs1Zn(y+83h9?R5qi zDYo?EDDhFx!3=J>+O(pQB$0&FM$z4Y~++u1SKT&;6As*K@Fgt-d`rzF|3HFk^I zPY_ltLkkUwr4zgqw$go4!8 z%hU!Ce?1}V+rS2B)g3+k(j3~>_jekk&pmQo_?01mNx)7LE1L3(X{f}kx#Yt7!;2`A zb;))4b#-aH!#lMyp{eJzR**7opOx9Wn^AIiU9cL}bzqP@MZUnkLHWZye_w^_(rM1c z-0mwVgKE!V-SN6*E@8c>lCtEGRE3^)BkPass#-PP^^W((63@2J$HU(mW_V4hEK*p% zv9J*|)5yEjCroL7EpSglc#`#(XpwjkijVrI(+y5|dstrU9-3+lPnfLfDDV_TdK@WL zH#eGa4vY%H40sE>)D$ftu+(vwg3#tHFDt{N@BA{;Oa^P!1~uLe4O6kPIunR$^{&ya zDy01(J)cx)ii?D3t-R7mKn~oDO?Gmd#JW>7EKAcSwcBZ}xJ-bYXNYr3Qep|^_yj9+ zA7ga%XW;_3-T{X#tX$oElmWH9jd5uk$lunlO>AnEyX!c~;h?vX82rSrB*8Iwi-k*+ z4Khm?s{?E7nPe7bon?p}7$~mr!JHGeljuvBHbYeeo){vUxASf&^YqPMl8R0$n&xy6 za)!s()O68%w6E+)}z%NKwvV=7r7DY6W*5r4iM)BuoD(D4CZ;U(#rL6&KjXc!*8qvF5 z&l?X|dneM&`WwbsMEjkIJ9$NZ?Ntgi1Sq2$;LKI9k;)h$9euShwJoF7WEr!5_Jb>b zXVl>o&$zS{y+N~POS6`>E$^@T^MQv4>YcBmXn#jr@!5HG3V0%bn_*W-RTELL!w0rYVMBq0W*`2Rm7g6Lph4 ztOiGWB8N}5?hB3bg99P9_vxQX9l_(mUxN7WNj4inRT`oXnbwwjlHws0S`@X}G{WJ$ z)dE=SMF-Z3(?_5Cbpw?oI6-GN<*o&fMx#AuEzj}?&tnidJKRUSrV5{7UYWb#zK6q z)xhQf{1a!+3}5ALMCY%5&MR8n&3ZNc4So?n^xafe?fG5}b(Hk0Re7HOqvZXsKb?nl zD9eB|hdBQKUJV?taUs_Jo0>zU%7y>ByAJq}k8S+aqK;KpJ*11BCv8{C7ZTMX(~@?@ z{W`l`ZXN*x-u?E%$eRwqV+^T@kM1HeB|?sTOgo^`9|<3g?_JW$2Rkz_V*1rYoR9 z7o=`w^7=ok^dFCx>C!u=kO?{h;1py5-}Rq)gt6!Uuhu1)eNasF|KyGDdr0kZDBVVU zbuiHO{MyR?u!XVb!Ndx$jDiO=uS(vrX7M~aGnd4PpI|>%_)o*g`b$VA&)g3e5CCY- zNP6P@&tA6dOd-c9|GvEE8XxcC+SN?!0Px3|!;a{e;^dfE0qmXs{=%ynm&OpA#-GbQYeh?%Xte+BrIgX2t(@oWJ;mo{{W>d;B67jSqPm_`N~AR6*wh!7w|mlYpMEB8$a2S;OPz0IjLb;xF+NiOGw^=?yhd$;dHB}g zuWX0g!|qwVnotcHw8;peqbja<7{Sl)twjwaqa{V>wzz)%6(y<3r%iP{A%^nDql1*8 z6C|nw2h&tqJ{hJZb#iolmwWh_cDL%OR;{o-vO+Xq5)BCKY7B`db({&v?Q!t9`&u^2 z%IV{>5=Q5rJ9yI|knoT^QyCw1gpP9iYEkSTSglk;o%2ScP@nr`)c-5=s^mNScfMcq z)ktV%8I?4IE$@sNav`mJV5^*4`ix&*8CQi?X z7utN+zp|iBM`?IGKdIe;;M%h`e_gFuWG?ouv%U`T#K+!b|WDbQ!ZXE&cLSF#av1Io zPu=|zH#=bs^?ie&f4Zm-1v!kRt!@#0=biT1uJ@9cho>aI$21R4UJ*YsqP9vV;l~7b zyNRBOQcrEbsE_mV*<|Vv??kGwCv44dxXN?!y(W@tq6H>9L)S>9Kn0fddQph=FBA#8Jybe$>{WX3l=#rv(GVsfD@^PnY}7 z(Noi%qxUoXAFY-cAF4u$VWv>TBYMw~SPA|c)JM{Z!fY-LRt23m_;!h?r>vK`w)ta*A$z+hkN5C>mMKst`qVA?=YR?gvlWgZ^$Y3*Mxgi|!;JF7@^x;`=1N+?72i<I#;KLI&dZ=U9v9vkbA{N!ZLFaM%UQWUNYG?1T*}2j4XuGVJ40}BvoGZy*gcT z51B%Iad|DRReY^~R(a#iV5s38a&mi=sMXFFy9&oMj#`=yLE%an%Hj#8FcOAXB6oIl zMW1IzgD9-ws#%#~7l~t%!ACYUmpc_YHD*86^Ao!0MAaD?$Nyy0x3bsN$NR$*#PK;( zOU9vIr}+MPAq4AXyhgZ9N0E1z)ok_QK;SBIZOGzmG5l+(b9`6b!=0w_y&tNI8D4*e z69%_e86WwgR5ouAqLdkW;`&*wwGMaBaUeyQVe97-Hs-%VUZyZ@$u#MdB!6{BFARn9 zSz$#*7qnlS7(F&41$*{FPVBCbM#@M$iYN^E&4~_qorx=Lr9VTHg9Kbf6z|be_u) zIzJ*!TN{~T$7CnTWOW7S8U2^GBCz{QQlPZb&=Sp=%iKjLtApq#tS6*h;~p{kBAbpQ zR-q)j0@7KSZ21s0jb)6!8}gm8ZOq6&o7IJhk;%2(JOvtaFJz6MV_}_N_OhS@h9WP+ ziWlqOQyOu`nj{ao$ujD)4a*2)`9|+uJ{k9txlR&V$H{FyO?@lsc?f}uqfrN{q0=wER|72N~wd>xu4k0L%GQJ@cL;gJV=LWw*IQ?GY1vinhVx6-~ zJ`t0Dj-Zpv-$c8i8;H+2>U{A^f8E9kui{!sDfAoPuZXUiwke%O(A@U=V5vN!pByz2 zNdD&;WkxhVXIC)6g3eE6*6J@@UY=eAp3w@g#0LJ&Wf=4mUa^xoc$f>?dmg@1+&vR$ zXMD!M4?K&7#<#5O5~UD<{Hq^BIP+IvxxDSxPuv@ z<1O{;GY0FVJ~Lj~y8QcFvRBq@Vsb6LRJ=ceFM?%yLTSFa(ksKUezsepZ-6NoMW>XSf^|R(DW)y|9{)ZI$9M^|B zy54UVAQaG^NbKT)oqZ!w0B-O-C75Nvf=EC>T@f-SrB;_-Ot6!s$?_$!5pl%NjCfu^-qlt8~$g$EXv8> zw=GQ@PSk{{>}@(}-&bY0E{enw_y=z_-6ro)VS^IX;#c)rGhD+t`Vlq1D7^&{baMUT zufO?66hU}vB{mFi2u1szzt{6O4s~tPB8Et^R3I=X)lq{s1=Olty*RVm^4 zZx$Ztpk4X2`6gjJD=caK{rZM4YprjygcOq)c-)qoWS=}LWg_M@@R+73Dl@63sMi&; z=xK4jnMC{29DP7s$;V@(G`XfngI)p26)IbSU>Yw&q>a{jPqj!d+I0J2Y}VP?9lmjW z)1qTI^&R_!-B0kVLs4+zZ*@~N1Bw1IlF)SCe~MSdB<_@r@=phFR4Pm6UD3Ri$4JE` z+O`1OVhH|XO5%(qpU+(xmS^=)a+zg{2a(S2^lr7y)2NmsF5eh{gy&$w1QVGW-ji(; zp?U{&usZ5t1&ByZ3)TT~(_7aw7z1U1qkQA2a?KEZC+h4Bck{+TSv+kL8gU=b1QlJ> zZ)_*Fs_e=TzCm2L+7{MoLGfhY3RaeY#=@dL@Fu6i(!|UOg8~7@w-DNh#$CJ5>dE9~ zSTG9Y$uMtterws236>RSp)p8^A6%c8yia>S9wa=!6gqzlarzRsTmv(q*hc+hyWfwj zLuxN~R4_p?cg75g3E(Pr*v=c-)5A0%Pg#oliR@?ny-;rP!EM7M%Nt9mQSgk{Gt=Cl z8L|fwGt8j;SX+KHY-ST;i#;6-(-(Xp^rF$IuaxPtQ6p7lRf>xfYPkL76W5@j>I(mo zfK!T!B_$Q>a?kA92BVATby$G!Y3fHPN*;YFG>iD_dQ+;E9b zflEAMZ?4w8)}?>N+@=AuJXYdjZr-pzM=lPDpM^bM3;8vLMgHLYc~_zTN9n>ytwn+w z($W&tc)<&cxhdPbnW2Ge+hK}k;+o>IA0xWJMF(=Y(P`f)GMAEB`eboD2(H1Jwv*uDO1pk~8+O8$Hb$7}oY*zpKA5)W*?dvOwSdY_8m z0Xc8#!l-`)lc{sx-T;+n7wWZY4N6|KYTJuWTOc%6g8VF_x_)G}*0dwyeD-S9frp=P zv)VPGd&}VVc_ZQDSN%_#Ra?NJ*K=X!wQ6n9uvt6fN_etz^r%i4dp4`5Q=Ymj*O+yP zh=@S8{eC-PK(!5#a5zQO)d2}F@hxB#B_*`jvkzeSBv3$$S{L+1Hj8)i^Dp{)7YE_C z+AiE*vTOT1AM|5diGn2MK^d$@V__9=ii^i9^^M6lh*t=9%p)$lm1mTuD&Zpc9Vqn6 zFTHl@nqJq$Ggzc%Ut>^{d(&urZuynjB}TmH!Y6mfO=Jx@nFXcl$W8`^VqaOrEg_aB z)q(5pZ)m7p3)m_*eQZ~2H{xpfEim+RmPMx4R0U9-NHYG`U0_p-F)t^>8%u)0g8dGa%JsmsT&yQ0+hLLBHIb zU1r-kgTwS?F(1S~-)`;H4Dm-=eADObI)}*Y#PQkT2!&wq6 z1o`^N4W4zai^)S6Vc^6efFwgnyP&hHBx(Y-I4F)OWMWvFR+dE;s0&henU6M4L#QGP zs~DUz$#H%iZSi428mEtKOM$fMQ!hr#a87*}v1x$1ELwEhke+i~+}P;9Bl+4iGFDQS zd=5IpMXBaNcD(zRFaqlASGOOfM`a&&ixQZtn<8SDvrvG)=Jz-TC}7ZCwh;(6F2F^o zaz&`p9DlHkr<0=<7?Mrz-C0z>%Bj$j2QTj%rShXgY>W0=3VeJFjIYeFvx~44!Myy= z6zq@XOJs+<#~rQJM}zfwWGyliWP3ZTYw>M)*LK~iUAN&`7dxOO4BsqU;;W}$GLpOwH?h6_6O-g&t*%fvTG5Ga7 zj8k*xT1mDo&9^O0LV8QUG+pzg;UBf=>mZd_E~@OH_TYusEGW+$ZW#0^Gdlxn_epEL zO$n0Oo^(IM)8OO6^#`GBss(>%v4nkcJ%Cs?^bwoe^xKzmoY8v@9JA2{&3N;?U<%im zE>=Ig{r041vYw^Lx0d%OH_6ATU4rbYEJ0tBU%Lu&p_Yl4W{pqFmn;4a>*y0U4gPD> z7OlFdZpFv3-OBbt$sAI}&lc=grtK%W?`yM3I{Pugx z9k4~OBJgA6WaKtkt~)Gt2Qs8MXKUW}h{?_P|KE65F@r}YEP?E?D<75w8@#esA7_v` ziO20Bl`U!D+!I9eFj(H->9ei+tt0Llft z-_-avL}>5OI<J7bWOY6JAas;#LEQaLGL4jkZKl|mM20A zLyv(U1f6KIp|nzBgQ1xTTx%r@Ou4ZF8kt=()6@J7)8f|WPq;JOg~G!*Q1TXY=G{OY zdBrbj^(inft11?st>A_2xDK~1e!}_aZPF_kQJ7x)$ZQx@?k8q9*erDGHaDja24Yq7r4Gz0y>sh{B?L&(6ku_^yY>dW@$dlcB zf`?<>i^+w8OAn;^Xfa2}WC4P*58hHEXR%e3%DW#0kEM7%H(IjUDet;u2VoF|PUk+x zyT2swi!FZtJu2G5zwzDAn!nGsnXrypyR37>JpoCr#@>OPebo76d5Bu}f*>-EPT$*o zNvwQZ#(#XNQ2(`g3MhZWLv$`>f9ECI)*G{qOWfX}>_y>IyoIFJrk=8TV6qecQx0n8 z*^LsY+WyroJf-xhvE#J9yzef)Tx<9C^*znwRi0$pn;2*!OUiSWX&e0+s_>}z@*_-NzP48=`Dgwxg z#>=<+x?QXBEB)iYn>Ar%9-z8ACiGuapYp~?j1Rw-)VQTuMD4frgp2u0cf9j85%Aqw z1sTg^R_bx#i(rhX<%OU(k{P-mQO+H*Wb3!UQ-n1jywe-1=I)dEEsBCW|GV0a{c#6y zK8i5>?1a=>&w|R6emwiuFt9j6^t8^!c#&65C+E^%`KG?wQ0P2-rqTbD34a+$34kFUVhM9h+r1qJ<$G zap8E@@GN$IbIbSRSi_-40EheA;i+ddqsBuDIuGq}BpZcU8fMSon!5*cJg|v&#zR>>PaYJEee#tyOU6f(+4v;< zK1!eT6I-~)*n9G(L;349+2r->6%a9UX&~$|d1l2q%aY7LE>x047u&M^hu+DrG=VmQ zY30T6c0`v=PDx;sA8;AgWz|Z)Qwx*DcXl|S{lL^*yRwX0Q#d*h?R^4nONv)(01-^+ zg=irq@0*n(tIR(k2>EZ}ge0R5!^v-3g4W_+O&cB_qPlI|QJ!TY%8&HPeuZdw8gh4o zQ(14K38fsyMQ5=xtkA6cqfdu(ul?C7H1de#eMVU-c?P>v_5~q?4hrWt3zRnw3{BV1ZxZpI zv#8Rs{kcGP&G>ifabqrdxJa9Q*jLu_(CUR{K))o&U#4>#B2t^*b?<*uG@E;GX&w~8 z76290Tx2C(g<4I?xd?O`Jf3Q}YjF3&(mLBeQXN+5)x;b6=dCWvf1MlbLLdz%#i)8* zYuZ06x}cxFq|#r$HhxgqhO1q2gw2~fEA%qclZHx;+qvTOpDhI@Ow%r(l3UxG1gyE( zC@gWyHlSKI!*xdit-=;TK0-y(-~Q_+wbEaj-Fc{&NVcyJ%G2U;BQqTp<9Cuh_{LD= z(>f-T!Le#Y;%4ND@fb%fJ@`?6&OK6OkJ{Bmn8|k5`M}rhjYNL)5**i*_11MQY-0r} z?zmn}d-h}3iHy!r$x}=Sy{&DE>#K)?ZJiTm@t&bfc81$$$1amz5 zIl=@~SmOH%i}B$qw{)~4Fzd*ZUYUKYFxnILUTey3^AYGr;y(Q&<+jCm;B+!cFLO}3 zjZ=#>Z z=WklW*;-weq(Fp&M0_~Gh-oI=-Db%HsGpd=ih?J2FU@XXV>L{l3bxXLDb>k;s%AXJ zr~TUS#xh_nWIFU2WojmWa7VSSl$5ep`@VETzp0n8pO{3q)swY9mIUvIy8mPzw^_lB zLZO`Ksfk+EgwEd&y11>?P*d2@`WQ{Ti(wXld@l=)4TLLQlJBG)HWC&3*iTg!=@5{8 z*)h`u@2x?(XmNb7zY2D*6lsu^cmUltD^SZ(%Km)_NC-J`e}a6z@ZpP&alryqYzo!w zFsL2^WUet}4CbN4>5os>$_}v*n%)G9Ipj6r4Myfi74pp@A?=fS>1R?(Ql!`u<5U#$ zkaO=3~b!Z^Ok30(*^ z{GC+#X1aEZf^pe1eU8$=vnq(46ABkvQBj)p0xyP2_>f^@hryfJCuK_ID=wMB#!X?a z%$nKho-F(;*v3|{OLnjUNW-NysO4-QMEX4lG_F6%9Kl33VCgaSSDc4;*l`|Ob5^g# z7C+i!|J1X>W07I82|IV_V-Y)oMVQ^qLW7TLR%0CQNs$W&qO-9_({!yg!w@Y3I;a&f zaG1lRBQG`2H^a9!Q4hLm?8T><{qSn2mUcD^tlom;hx<1T(~wP=IeENmvJLh&3%hpf z-YeA465fGy8efWo2%V5eGatFPJ9G1YeAj663aGfNnSIe4eQn&bBG#U(X}wOV`aq;p za~iuo_0+lsrol(}p3HC2$~H#W2Tfl!Yf&pwYlH^kcZ7II3~$=bawaynNE)Tc&V*@u zb-FXuItJk~>9PuGB|7^z;J}12Irbq?eB=J9b`-FIOaxVCn^(jtLiYH6XGNxayd71o zXK2p;SA+2D`Q61Qg%|7IxBfdOPcbM6{%iB?;Y?yagiK8Hl!Wj{4Pg5aP1f7s*T?_lx(U^-v-4jlRWJkB-3!V07q^+IkQHdNBFjir z6eOP2jdLy%Mcy?!&ZO?E)-<_RB15FMlw!QRlkt z-C8S!d&v?I`w-#6GJ0=FdYaY5E|b+ndqIIZ@I$p6NvtgCy37-E#5)JhQ)_w81Vp$c z+Fij**6hF)u7;^WvW?f>OKzUh=3P-_RJ^M5fW++?XPwhLuzt*t+APeF!A#=R8(_># zd!{dMso)i{=b$0qyIEK~QBTtvyDtwf_XpaBVM2hcJg^-b{C?AObH`nmBGE-Ykmq-4 zf8QvaLGgPW_cNkfw8i^Pq&c=KAY>%xs*vdHM9t0{WV`qcyfPCkd@-n2i7l`Os+}EF z_Z%xvuI$v-q0sWbz0o2}uv&kdJzFI+(6Q!z_)ZVqeWDG?sUATn;#K+)g?mxkI;wfh zJn1U!T?~&`Z`ArE(Pesr84Uk6|E6LeGz3!do}CP~g{a*5w?(Zt5Fn5v&#`?Drk!mk z`7RiJ{S9YjH|z#HcF_xS^=MH4h&>{VZ#KsUo$J{Re_k#+|EIuFV%JKR!#&fw7t%fp zJ6$|DW;P0_IMY)duA%>P3o3PniR=#kKh;sVGavrd(TaFkT|0H`79&b$e6#Gk{^#4@ zq_?wjy@da#(v!_&elJ6PU9FZPH?^#ue3#>cmx|DCt+xbDosPz{8zGJKSrJvwQee`V zEY$67U!$7kMxO-RzK)#1ePGW#aUDs3YJ~Q zl`D&|b|~n!C;3+V{hF4rD0Y{47WZ-{rxW)3mvV0?B$pfE-tNLy2ZI5Kq{uDNHe8Y3 z!(7$(Z;*eoA~K-o8kBZL-z?lP^gwukdP#+*N#sca1SIo=BlwD~TgwN3A45VSsHE%( z=a8&UBXwVXvf!eRSO^nQQ-jPn&v}W*-`2VF`kH`3bG1Xmo2+gg_FUUu=u_L` zy4fOm#bM0f7+@>vHt*WAZHGuRH&)i%y09-YoDH=Gs8=gn-L( z9r_qK3)0D_!)ZaZ^1qA9Sz+eo&=?1Na0udV36l3UPP=_JC6AHdV$f*05UZ#8kJxgN z7l=%OKpsv1Ze;aBIVvsD6hcIqhvhF}A4fw6kvuGu;jtUEAX$}x>OCnIvxTjGB(QDi z)`wJ~Abn5*j*S*{Khd6{2_D;4D=#&3>u>a17O3cSxVKo_47Ne19Fbzp{yVPoB+Z-a zS9aWgWkDN*B^dpIKf88kn=49!$%-%&u3PrN<1@DCurAZ)z+)Ct0OF9I@LTu_f zNi(2=2b`B{;#CVQQ$em1=u~}^)+n#W$nh2fxqw2%gSViAQo9po&GeW)Pj=3>a=F?n zI!d?0oj(e9yEELbICHc$3|ybyrA89hqTDzkYtA<`r*9CbQ9RGHVmp2sk7`nLZp`gP ziid>dROdCkheff}w<(&eJaTQD+$=ZHEM_?{AD=B=UAfMG8 z>F`~?Q`}UnO@cQN{ssvRs409e(I$7};Hu5_-8WVO%eOLyTQqO*c%*oAnE;*Yl;Y~= zm|j@A8KTJ*_t%+4Lt#7cz%n3dEhyf12~x$lfwWVnTZj#oB@QZcyJl9HhH%=#g6!Z* zz(a_4v_+p_iFpSicy3`!FguZ$G^3}H|5Eiaf*>uKO`gFxnDoZhg5ax$ztv(_pK0+g zm{wle*~yTk$jS)~GQF-=36#cT{E3V*Erf&R?xYQK0lnCn>Dl2)C;PV6EN{cGI$e#z z%+#cp6#o%B@qqn1yJ;JpdR7$wRpvBiR(tY63o{pTGqq7JBl)AjX>1z3mkrM812238 z10;nQdhb}%r-K@9jS7Y<_mZ%$fEAh-BDvLJnNKpA&=sTk=bt!EvBjd3DTm`h z_%Rs*xe+IOK!isQz+s`Qm?G>Gx%BOcNXWX>WFsXI4hx#2dkc9q?g))N8;00PEz7ZX zcU^l+6CArQd*P8oCQ+|i2~7TW$?-MKDb=(RpM>4#)I_qoiGpbs=~8731vwp2lZhxE zC`Jt2J{fyF>pNMkgSM<@uE`gdK3p~voie3a*cYR{6EBb;Op{t-YlB>EpkxMbB8b1N z%a;-P& zepYopbU&H!5nJbq=?ssx+^-sGu5TDqOb!%XWD=gwv9XTU=M}6fOPZF%n2F9m&YGiv z#ur2b>y~vfZ@WTp-?`G8CwoumP8Rq?d@mR;o(?+5Ufgg58iT3H)m+V4y?Q~la>4!4 z9PAX?tvPMS=I->Fvu)QleS4mzl-zU#t$3vh~#eCpw!I{-^$zxD;-+;eVIr z*$HU*4Q=>g0PfNv6(2GU-@aKJQ4G)Ac$dcJRHHk4LSANz)1CDP3mhw;Pc$IAivmNR zS1GNO9=7BZ+taw^pNsV}BU-lEo1Sl4^#TXBmDnVa+|`IAB8 zw)2-1J1hQizNurtK`JBSMN-%E?G1n_DvzZQBxGk?0sUmk^{$@TC(MRKiNcf@gzyD@^R>UV77@_9)J9vKKg@9 zeAo2Fpt_awa=zC;ISU$)>BtT0X#Ph$PCU-J?_%XeJz^8WBSwH7JK{z_v=6d%Fo=tR zGzm6O^Gc=L>)1>3EY_I(1d-D{56lUth+0<^D3&(o@``f{S$qe|fEgBs;OH=^kXyz; zR`{FidxsGT_wXrDg;^_s88Xf0n7Guok^cNyt=#A=5ARU^Nw_F)XopJOzv^|4Z}IG^ z|B6Zds-nAt8~(8Ugi2TvR2>W1lJd7CPZQo)-3vlKDJ^&;As7`pc;k(c%Xi3w6M3$+ zIMSfmYN@|u-u8Xw%gqCn9pnfIM!Oj;SaW(Sq(Vs0$LP5dXb2hshPwH}vG60ABtSLFL876Ju}7 z2>WgBlMcJF)`@AYJ3f}Kyu_EUNG<`z$7Tpjz|s2#SOTjX6Eq8R+tXnhN-g3nh^!G4 zW}F-4_2t-b4=7qSHXv^K!95M)68v&q7Xu;D*JW_x`rSoQ3?$L+FIm;3Ayja>SsIiX zU$^^vpcRtaT8G@tM0^92P&*1Oo=!fzw(NxIi!3a{VT*r5pKr^w1F;>k?UXn7I0IJa z2Gb^3SpGe~xp2=ZA*i9oq!;w0mU{I2H!kSo0)a`4OfmRC%jwC2i_RJtzJ5+WMpHxRVhm(LlU>Nmsr2^ys25=tr`)4Dc zN~@M^K>ruG;;bw0vEu9;tvGWm%fI@%+5ZK&8?;T>LwvP?$drURM5hYOD$@-vlWv&RbR%^5 zc!~k;yPHh;@+lP|Nnm(CHj(v%Ut)wJMBWAa+**<$TA*UoKIvtvK zz>hyyE|dXNBYEj@wq4NI8F;&{Z|xJ1BY!hNPAVi(6HN)E>^En1rIjRdzcY}tnSzQ5 za7wOE`%dbAZ*kkv3E^j!JxLq;^NM558cFeUwk1;Zg&fPoQ;@!t4Mdu?kukK9Vh)LFx;f zViD=p#q>)h*~E@L(ag^F3DXy^ZM`tPNn<6P=&v1o8Nh6VHr^_K>^qOb3~7^8DOiC_4Wn*8=SawDqXB={uwq z6fWXGu^<~ffOK-32qnvb4rRY(O zkfL^6SWp(B#-dMdBO3u~K9j^;gK(`EpO;rGZ?Kw7*2kV5LLQbN-+@d9SB;;hKg)}@ zKyySJU{os${mD>h(5`Q@5OP*t#+y{KIBKX=9Z0o-9QH|n9%;ixUdr9K*Y^AOOlhGQ za2dK|Ou&#dh*qOf>JoQ&4k`Boy1E5^wwG?Hc4cvi4-)@Hy5DU6R}C?EtqG=P`e5}E zUHSpcSC=#WWTaM{gRNxdHc(e37QEK#){y6&bL4^j!su&tXYx~{dC8gncb7+_NACj+ zFJd@l7}6M=a-J)59_Yg@Q)$%L%;ti(qNR2hgJ;|0&OUj6UT9>`n^5RnNna$hrDJ*1 z*X2||oNW_)xZf6T-|V-J;6A%9IXmjnivDSR_5UNj-hrccZl01UfYn%1B6qjg%U9Nu*-*eB_3uG_<@rlocM(B~XPbAh>6> zV}0LL=l*G+69{l$NUD)mxP{ixRBEs>*O_ZuEf1WDOR=Iyl{1CBn3T&k z=g0UUVe%wvb}+n#9=5!-)!cc8wEcs6j|gWcy|+gP7TRt(HOFiQWiyeRe{1lv`z^-c z7hN^hI8yMOV%>{v!%PWUXReqqDqC<$~ zEErwnDWUMoj>k(>;XHPflO@AbbRl+x4n2YZ%fBXL#8%7f8e7IXMUF! zF6Poi#!AmEk>fd`m2EkRw4^If6-8EXu);9Wz?pYVbLWIH`QNYZI%j9VR$DyCC_)k> zHUE9HknFnJNVMyFm7X6R+84qjgBwBXG$MAjIK86f;_i`nJe5;iFdn$EV)aI~iRTaB zATyxm1)u$hPdVa7^>Cz5e)-Szj~a~PLHZgEh~p-I;An_)Wv+))Uaw$r^3YD%KVmbc z)li#3&VFRNM~isCJWGq*k=kk9omSRi;XXe6Ctkjum+m*80oSWa3ynr~v^ydmaSS#5 zbuzBip^qkrkaxDpdxzBj!#6Yp?JynZMaY^tRGP>q<73Z znC5NlKIafx7z+;c2 zU9l{H`z?+>*;;-AAvyfi}x{&B{&`wI`ZllbO^Djf(iFQQjjykd%yE$!2aVz^a9T}|9Is58^qqRTcI9xvFMz!a9~$k;lSmc3@q9H`HV)PH+HOv z;+44PMC;ZD5`lpqs+Y9!B7L^0DG{+>e@EKjc`GK8Gq*7_OP z_M(XleeY($tKjmQ)zX4r)K6DxAg{mnjL3xqmxF&t(rDCk=AuJ>E zVzZ&BqZ4?ndXDu14MpB}GA6|h*h%hgYrKV&a`tCu5K|y6VMUfJdX1W3E$W;q^M=c( z1E-i=cNYC7au*2;df1N~Y1(dso|5B{OeNcZ$gwY26m+)pS0&`{v9&g)<}B*9`u+2o zzY$&m-6tAz8pRQkUqN)`WhQrzTAPh7PAbAM!yn3;iv$!VLkuTF4s!3N-4=YuEK>L3 z1wy1W{ZtZnOQ}nGD_r=OuLHh1t(kZtL|(M z&OY7MJ`e7S7=3#427R3Co=B+u%6i;zPWs=o*kPOE*Sv4O#fYi+U{IkBmgRZpl@4?H zn}Z(%;ks5r+L6wbV#mOK8F;w6UH2t&0j4 z3am3sP8T67DQ73SUhtMd^-BLeCh(w|$x9(8hteHB5~b!z`JN8onl|9WR@l?&cX9@` zeKE6;0Zo?pOpYq1#2IRKtDxI~S0MJkh>~d?qr2_PQ0(BQrZ4*MAbn><=ADAN*@}V^Wjnf;uMQ$toyL4~KFe5+<@b&PCLzE4 zA=Xczv3qCoj z;*{+=Xvure#zXCi8`8IiUm`%tSff~w`eu^++uxxSBXl+_6&|>#CaXwywp!>1U3vx^ z{~~R(ytXL$z#20;Q@mR)j15Ckl3`sylLB!%$}iUhaJ^sAN))A^zh~n0^i2&{58E{( zuYS)+5AM~+{?EB0+9KnoL!0*dNVZwlw=bZ{?cW+}hm2ZB#+ zZw*Zk+lLh|OZr$(6qPDYJG(VvYh7wbr5}g?`|#xYW&YZ&(4%vee&xd$R}ZM;X6tsB1I+Gz+ejP%GDgP{wR6n1%DNet=ma8`-;2QNLh?7FXi+X1(}Z`+^R7; zs~Kdq{zl~U@E%z4&s&41{-2!3aDO|(1m^ZugsV0p6Sr!1cbpyCQ5=g`c77&@_bjrkwco& zT;bHZqN;-HhxjAijC9S;*5PUL0^22ZI}{+QU#O;LTI~w&vvD``UAzeZQ{?Mo(J67E-H0RAGUtkFfr);eMVwPAm|h*q+}?5UtbKRk^zOpt$&!bq+%OX0%+iyoyS@~+PB~`*NH1OL zu72J!`{2n+-!-?0EmgI5<-86Oj88H{=IEK|T>1AU09>jfUy&3ZX}9PK=;k=5!p_c& z1|1U=wNtJ@{#mth8vYd$;og4Mx>PfMM@XvN;0<6Jd()!>+cwM1FJZbt;gMvGQaJ{Jx9_V`?L~n3@HPkk`JzEvdj+<0AxLCk31|oaGr)pOFw<1cJn9ZVW@b7ysA8x zJkJfH3Z13~PelP!kEPI*dTA{&!SgTqo8?MkDBFDXW0cQUaQ1@*zuEKv)3YK6kFIsU zZ&<`q|9&kqKKWPrq)$rYgDKtF}Ne4xhW?h%Bf>={bJb~5%lcdc< zT6H4XtMHXiZWr8C&1!6~d}xke9pkJ;w{-O!7cVz8_Uskh=~$X&Z~LbT>&NE(`VC=1 z-IbcRIcm;qigNJIFKm*Z)mQ(>E$dN>(#D?c2P?*(>P^?4cV)JsZfnO;e`K8!w6=tVNr6KAH)JT#I?cJWlpOy#Xx%c+9-bf3lp-X(izUR0S`4P|iB0GP@7jU(ieS zU65QUV4~Ep>o)18PgOktQlXB_76RW#(W`89O zF`W5^#h|XLnu53nlScL>VM0wOatb>n(A$XK43QgYFAVj{0{p6M0mGjS(w_> zf<%$$8;J*uQU8S+cPHYY&d&Xzdcm)IU)<_GP&dz(2p^1?rFLIwS0!FB6|n6Zqa(z* zD}#GeuS^#B^lYTE8=O&A`vgbKOVt21&sVMM-&wkEO_le23ZT29xhOQ3+V;&rpg{80 zw0w+z`&f^NleuRZP03{AszHqH?OI+Ug}Br4 zw7c!%)soKns(&cOt>p*~ttQPEN2a9kHr(yOV%v;rQ7f{_#eakJi$pQEINEBbrVSu0 zjz0ML@aIF`UcU%XSLU4>!A5{@SYDlPUfton{<*N0)VetUC=zxYEY}w%X9CxYyS_0I zAaQEz3F2|2_z0eJp1iT-u{3C6hL97yjtPkLVA*WBT0uFh>2l$AqX7Mqaiy%2Znhts zrG6#U{_0tmsEr=m$E{ED<5?aTEZl>f&V$n)x-mtczuiT5E3DTbh9!PkHnMQMF>oj_ zssVBQGv4ZJLs=^rIx-%5*P;qkE3c_bop>Q;R{zm%_vULHg$KZM!oJ*iN}aW%5a@$) zs_p$uGf{KQ43Mv0cOg@N80wF`Z#g4x81=}Pyb!yAcEHY-;t1dTdZcSfT>z+0i>UrZ zaROTCD{d%Co|f<^1$g0@t4R$lzf!R2|HcV;tf+i7LN3O=I(N2-rT}Fm!4>$0jVK;Z z0JxW5DIs2UldULZD=+oFXaQD2T3s2er)o zdc{xS0jB1rn58RGewHtV+zdx>S0mmG@ljCs zskvqp{fpzqAg4<5O?zNP!M-LCOCp?(L;yZeSQES-!k{p5&%8>=QT=ES*1s=Wv1HiO!jmAz(1EcXpg)zox)O9y=m!Ei$ z!}E#{`FRwz&tj|HE~GRfq1-+dlF{z&tf!C>kO{o_>nY<)6tf%6%S?bUXHP`Z9l?Ec zzh}cOsJGAep58lG+PLD`WUh14OUmg?-9OzgVr!ZBs=6Nk0Sssy!@BJb85Mp3`e5w& ztzMXOi)Fs@TdR2XLAA5>+H^krE@uQCU?Pnt9%PXv{*@ZwS)$3$x%Sdh)Ppw>UiMCm z?#E+Q12ZA;GuO#pchy@-lv7_n8fSq#xS<*mTk>ocr zZc|t&Wl*-VSyFbMy9>DII{l@nHS^wN|?|^^K53Fu}mmE1>JsV`m%Ck-~K71Ht?zTa)+aSdO@PK)?Gi1{0xz+n`cjv)md;S{Fn^ATC&(#hB7&;owEvm!RxRpW(-MR5ubS(;j~f4AW?7h0R_S{euZ1?Bw1 zr@g0ir}}(5cAQsi#AqB$T0l|t2VYgGEJmYmRDP2Uvt*zZ|7gUT`%;skn<}i)gZEEp82cY6&sT{ z&LV0Lp9bBwyFHvzgO4ith=w+Erbar2oDf>Gk2D1kTX&u9SPh)+m6+Rr$**=ATeyZ56XxKG?|<`a zrbFhq5j8rL=D#?&y9+QAet{0Hy)qp-rTqF@Ifkx8ZPj5!N0oEqYRB)RpesKK-V!mY z@|`DS&M<5Xd3oOV^ri)g+THxQgJ<8jg2yh>3S9_*tZ&})CE20CJbua~5ly_D0Tu&w zq2Rn7!2#ebK=jD^e&Bb5$M|a)9>sf5NM4JPhWHz_iZ>o(q zDO{H=rKi(bwL2FM#giWT6bc%tc<`yDI^70t311`QS{+njsClm!r{to94>c38z{PaQ zErjY5U_k#v=@Y9}XDv>!SqWZ^;(;i&0z%FtTSDZ$&NI!$HGQQI+GyaJoUmlIdW;+Q zHbn+tQd~z?BwhOve_%*21r0@!M6bkbv_)}`UJtt}BDE~su^`ESp@FH?ZoEFPxnlho zH8#rc%JpT%ox>O41I#z6B8K-g%~(-7?@6!uo0(12OR@pD?D5iFC?_75M%tEHyW0wj z1MV(OJ}N%>s`R*+fHzHmZ7U~bF~iT7v@M`PUxOYi5N;MS`JH0l`>ka_`~gLY1CVmZ z`S21+Wh!1M6Y?Li-J$s6poCH3 zb&?i|^zMUGN%T*&xDObgCjFGgG(XBUyq#q)$VvbI5%%TLQ2+hgpP6AWmdKVt(PGPP zMs^djWE)Ger(~N!gp{q3h%AwPvW#sSjD5)x4a&an1|>p>QlX;0&*yvp?)&$A??0aN zoRf1p{%|<+dcR-S>$H}<}Srfj=q)=+THPfAI#)u>egXz`ZDgRVwnp& zp@Oal@%FDwH5FHtSKL7h+>)WQ&ID@c0S(8kUXa*RpYp&81?1hG@TZT7MDf&LR}WU* zG~7yaQ;E^E`H*i)7k^QcplwAE%D6|cZ=bXallFfu`Z)RcX15~-4e?&k7ap)hO#I2M zZ7dHM+OnBIVOb~tJpZ|MBJJJn$N{!@=JF@$`VW0(Zj0Qe)rV| zhN`MQInQ`b;v&ms?1STJ_Z|NW*)rf1laK^rPs#pTg9L+A+gF{2L;`wImPfIsQ#8#0 zF%c$mwdsg7r~e^X`cyO&eg3ZiXDiW}?HTQpsxI5VQ?+8XfXa=tkUM14Ge$EgMH-tK z%5;%IZ(hGaD5b^j8Rc(IVZPK%gnkESI47E}#$5)go~*h~No%JcBH1+S4DAXiw;d zdP4%UF_*<>AUDJYAY-=ahUX_IbyNxfU%=lThvMDk>}gg@fWgJnp`R4pPKFOc=}3nMXEN`|VkBuwDl|4dZh;F(AS>F_*e@Lt%l@_YqH}lcHa7t;l*t4c z#WI;pD1V6d9k8*5$HuH+js*NluV(|wu<_!=5V-}Mv>pu&U{wFQtd=zUCZ)=&57Xef zU80YeU9ySdo9Bc~^H78t9aruaD_mO+)6M(n7oMat4NNms3ilqTh`F#2qub>srkb<` z0;|<>Iq0D_j5EXs=!KEp@$lRCdyoA*=CR=g}m^WOnx^u)gdf5VUFC-#aKU^DiVrfc@C!ymAIFP}ZlFuN*reD{@ zuoz5MxNiX-NZvvS@tzZ2D%$f7*`-R*hu6gQI{fM&}eks#B*2a2`NmT#aINnfvQS z5ZW-RT7%&A@3JKwt+4)s`MnuQe-}m|Hch_7_^@Xpm#`An4%Z5>eG)<%sv!-MS^Hg5 zZO&HDqH#OuD)Co%TirGKV*4(w9$i3x)P>fj*`$+7v!CY0rJ26g6W*e#y;U1oDvbQm z(|+{h(TYh?ag+bz@JmS!RB^jM4---@wSK5x0>@|=#VUh-O1BgCO4-x^|9>Wi*Hn{_ zwEbi?M0(G?j?F}H(6l&mfg#fUck{xU3Il}1dlsRh8(!>rft?>9cO*7G(=S-&L2@TI z8d$+VWP8_3$~2>d#rm1FIj;fTT6HM_OqzsH+yt6ZeY$`5qcJj1I4-Q!1yZY<$xtJI zjd7bB)+Pq?#$Gk78!5bsJF^NaCc0|pNXT)?H!R)IFUNzC85MN9JKE6pn68%2B;c>0{0ij_!L-JVb%qg>|xfro9hvgUWjSM z5>2aAqerhbJr{84K%rNd_5q8~o3^}%Tby1m;bn<@7ejffiPqSr6DbG{2^Sfoo_W^} zNL=8JXuM;Obj@9xl;cvID*7~H%e00I`-oZ(S<}!phTAGP+r|`{1DOLR8&xebeTvi~ z@%hNn#^`zK2O0Nl9c9)9hDsxQihpH+tj}xnb25`^QzA5!i9JwFPUSzcbHq(I#Iz(- z@RcUBb5){)vb_S|XGMEZo$jsSQ{u z1p{hyi5@3kK!zD8{=#3_=2@{Y+xeg3(Kt9cnpMO!2RCnVj6mX;4c20TkX7{ z$7`&2NJbY5r;!-`=hlr71mW&cJFA^f;}2n&J^N$?8zXL51&1#Qm(u$RtxLrDkk&LC zwHI#RB+R;Fh*#f*zL{A)6aZiTZaQ}52O62({^hEKH7g_TeMozI*b>q@wP_JRt!S+L zf>aG=dDsAMoMK#@D#?X7I*f^KmaF1&tr#}Uc`it4h?iz}Ri9_?W;gZ%D<}IVOlAJq zT|VRAs{WHB{_=8>zXDdgNFrCai;<0b}L{OYi|zq#5U73fT`BD=@H)RqK6l)|qm#U2eYd`o&4?mSdFsTVhf(8Oxsf#I{MywLF@D zPN;Ji$%eQs8^XTR-R2R)XG-Z;dB~F1to=Q9|5Ka))BXQ0$yX~NMu>P9O;vU{N`m;Dq|D7}=@!r( z*x;X7qui23?K&OPX##8vf~mB7jJg>uQ7AfYpRzrZajmIJH}3@Anx)R<#L$B7yBke!ujpp z+4gQb0rnYWzkQE4qZl(Gc3NDwzl2;QoZV2c-<)*4Y4j@_=Df+;ozsadhQ=8xHZPy8 z_#z*%R;avzc%~)+mis5NVghN_5LqiXq;PBug9#w;h`IPXh;To65LFyeD@5w9O;Qlp8ib+~^5GxdNOQ^2 zu0i#R)^hU*LtWtk8EmTBf|pu1@l)4)XRiBoBporsrnj#9E0$uzOY6>-*@@hBGQ6^;5f}D%(n7Yh^a#o?wE_G*dgrbFkmP*n zy%CP-JB)b1S6UpTw{Df_^WAXz`W!mHr8k+aYoNsN_WOueb;qPMausqcq}vpBNn4#ZPB zDc5VP(3cSN{Y0E!;hqlQ=~qw#uU+vH5zm8*(v*_AgdW}I_B<<>5@T1y*oS(WuU8Ze zcu1nUEw=hl!$b))uEaVq$QUqW3*cj|N? zHaSK-K@GrdnsGlO0u5mhoZ-*OK=YvIqO05(P^&4;8~)*y=RXNd%D1l9oE)UsRitu% z->}6If3Z+XTuWJQ3If&>ZqWW;kiPn)Yz@Z3f}}VWHf$X?w_*L^^;}hDV4g)lH*r2z z+G8AwL3tdZwk%iF#;xDvy651aV^kh;NBWD&J{^=Sx;Rw!@#C|MJhEsmOH87%Pvjx* zN?AuKIhB{9yDfk4SD5goUs?9ygG1}wzHtdjQn~=*vg&ePe7~ig;qDch^7WppFzc0d zMp?AL)uCbn$8NwQ)XK$#2@jgRw%9A0-U&XqdLFv z)6R&g1K5c^_T=T?af2uAqwPW!QX>7!F9JRUyjnN?+MC?9UY!OBJ=2ML&I*aAnKj;v zE#raGpz;izEUr+x_xNsey4r-zlX7^qSQ2WY`zGUY$i&@KtaIK-2}^Am+X0A)k}rCY zVH48`P{Ql~#Ypk}Z_j@4RbBSS@XCQxFRt#*=M$rA1f(&+o?`5SHaYZ*))gN^%CmD0 z9!pjY;TL=5V~dA0V6*t7m|`l{ga_+lbz{}6*B)Y&*sH;(8)wrCcy(~KnsR_6NDgm^Kt`rrNw(+Xq1>Xto$es>l< z5Nk%pH2I3s4Nu;HCi0yXI5%;^plPHV9Eem5m59i&siBQUc&CV^x za*ag*t>o*>l7kDqqSaS%5Bd-Z{u73 z%v9LNfwA6;DB<|rfw9udo|3zS5m%-Cz$pfJLdDMhd=2*K1~SH!W4|)V=_ILjOoY_G^Nv8oz>V{(Lyz;MOwkx8I+g(1w z-t@3O!WLjmAz7z{xTB*`Rbh<>FkFPZY1LHGRhxqKoqk`z_JuQ_+o>0v+aCUXV}f6x z>Arkj6i4R0163O@RY}zbKCm2ZKnL5QbX-2CO|zdkj$r+B9~<)Y`-TmOHg+ zloK-bIwd1IaF`*|L$^ClIVV%Et9d%HJ0;Z-|MnzkS0G6SbkTK#RcPBpqkL5)?9r4@<1QX1Vdh4WP!$)N}d0cV;G9FS>v6)r6rL|XkZ1Z%bvEb%Ku!?Btrqv%g$xjhkVoD)U zO5a^C#3bK)9hl(?{(-JfN)UD#v#WtO^d`>Z>Ie?!iN;9>848|XyS{jr(kjr#(| ziJ#*VN@7C0o*uqFFeD&%L>+PI*cfK4PuXNb=G7Pv5vK(&Kk3fWSRO0{f62WNY6L-B zxIbPr?E>&e3HH@u!{~?zx55OWXgqYbc7>rb?#t1DI`D{`Brtm>kd^*EWV3@__f!hN6G*HuS_w$l0oRc@E`B z4>*a1a2W6Ze7BL{;V8G{#+Osz8%ZRfsQf?3@}>)3FWERy7!;$Y;{}(YmhtM7O5yZ7 zSgqmz;hp|PG5xD@e+u)5t?wSn-fWm#4rc2;MG>H^G2(7bmF#m|(}7Gn>++p|NL|#z z_Fi7XW@&7-VU51iLHmN=y~r6NX6RU9^m*_6gO2)9{6g`|!%*HkZExFc7q7}~G(52Z zW!7-aQ2F{%k;dcuX|8ay;#*MSCCgTb=QiMJxWdCkv}hp*6zv$9J(*7qU|%Vgv^E~C8M`}Yd;Mu~P|~Wn*Jbe~0ai|*@kmce)SDneUEdAZ zX!5ijGCYj!Az3UpW!mO4D|k5+6>TG?maq<=sKrIgwz>$<^hy3!$dvrI=cbV8 zfQpPQ0EcJ?`jcSE_qH_QjeWP%eNRopNvX=GMUXR)9TP#3W5X!B?sZ7kodv6F%ijVS zID6M+znEB?#fOFWB)>&mCGuYTYi#sZGbR|sL)CG+8w8ek@H8IeoDh@S_nyPCp4CSY$? zO4|-o{h)y%TLnfsG4i4lZ9<6*#D@P_)lBBGc*$)T+8qjQ;v4#m$jKpi6YwcV9G8f_ z1Bw70m`PxB2@rV(D67og`O;curi?)sC)tDNwq|1%pX4gTvQlxKZ%ihTa7GB9glDgN za9N6C$`sV&$W2@On}woEIx1mh5QL!zC?5w3oY&y~=_NLdX;DzI*JNXoKVn9I=cuMM?HApx5Ed;9-?RYDu`K^BRInm)h-Xnu}ndqG5-fx9-p;zmuI z%K&RzSS0b3#nZ0&#fM%gM`ddom8rrdL;4uc0!^ zLtP7u*it))d`!SO)Rk&4VpMI>0^}$K7xIEahNQ$IL-mIb%^rU zGgq$yigk{dEdT6136UsZiJ|bi-K(8CPm)kl1u&$U1fV{-ZY?68K#Dt3Vgi=GS(U1r ziXz9=7ZoO!%&a<6)}p;io^qVWvL~k;vUJV{7!Z?pSOl`BNgU|x9}o-2$c1+Rt~*-{ zQ(?d8Rm%oggD#Xwo;3HzFs{gJ{+(n7f7lNF2 zTK@k0YAYSI67 zlZKk#JNn^Ix}OZT|NlV({UdV!1f;((rUk@P*wFKsL|*74TRBmFwx6A%PdZ;*p{zm7 z;l6RwCQ)q6-#Tmj{Pv3sM7J~!sSmu8!Qo8x*!nM#X8lvqw_XVBHXQCfd4Q@qiX?jS zT8B2vnnbCMyOy5e-FW8r?2Tn`2wPA|hYWYcik3eE%8I~O4*0w>a&v`{?CdTeT_Oo6 z*zu)hi{zNe;fI{Da06tH=hL@d7$gY^AT2e; zHYY%gd&E#EK#5TnLAu8>#9rS34zf`W&x`_?0`-kjvfV5laqOxnxs~TN$oD4aIfIFq z(@m8kY%T9>q;^mV1&yONgF3<-U^i=;?00(r6)>+Trxz2T)mK=0b$+ei2u^&^a8eJD z1>{)=a6FG$nAMm3DcO5`;QR_54cS%}XOVnem`O(_LC?#rb?kR#>PPROtbq=;Ddeo?j+6 zOtiDl>Mjw5zsVWpeZD?ua-JmP{BZp(SBK+gzUaNX{RR`+|>{R3bFXaEBVwQ{|bMqk-TI< z?CyzxZEgrY-LWdx?M?XQRiS|!+FUcB)ywuc`%Ug(A{cj!qx{_**aAV;LxTei@#R8H z-u#4)&1~hO4LwlS1JJN>25?}66JmI<&-idcIQ2h^M?q};JvRTMmp)5EW`Fjik={l9 zyIg;|{nM!Z-I=sM{I^ldmczt~w5B6kesv2FJ(caF+d*Wl18Eq#1om5Jy(t9!O_h%v zeQkb=y?=xCv%=BY`POg|#ou51ugYDO6rzFx6l|&!Lsu^{GZ9ISnPYMN+0zU~8@=Np zbSo~IEM*s0%mpg1G%9dC1>;|eB{h;kO^I+(G-`OfRK(O$9sKmfIEQF;g*S7(fiEVO zsxlfAEf^*L649L!UEafw$q+BZm^9Y7DEo58q*Y0h8+5vC$;9d^of_@=bh%XCr%|~J zG$+tdN+Mb=_PUh-Iy?G!O)23Dg-bGD=8|+$cL{v1k7a=c(ZAuD2}QGxTNAwW{h|f3 z+&i-P$#X4??#ZT%j2fv5V>eGNUVW%AmaLC+!a;|@qe1QR_h|e0NCDqxy3T=H2Bj8!`%K~IhDn(1(?O!?9Tf0)p z;vGNkKaIM=1Nv9Sq;GO(yTuMG1NhU7J~q%v4J>A^bbT4M+;%wfT0UcE)N(F2}hcL(5``3|^JYVAo@$09q!z!rfNQHA{g&%2+Y0CLx zfjs;11wY{8S*FS(I( zP7*Qt*z#;HH|W)29Ya;5UPF{>NLKfk5(V!s78OFK;vN$_IdNe zD>|^a1iO@?QcH(CwFOLa4SzCV&##843WJ;UI9@1KJ`Y()D+eP>ZAv&4AYk+mgitDf zozN)6)*q~Sm`cWCTKTk7cCof;yH2#y;t|}jMy28!gG4x+1-CVOg-jj3JCrJfm{h$ zDdMq1;^8mmv%{bnf2|YcU&UfQP}oJl`BnHYk{>MRE&mywa(}GL?)y3$cPR8FoXM7V z6wXwnrG&T%nf@|ZI?2M)OCC8%+qvmqOBUr9_g^sOUg)m%k9aE%%pthXB?ah(n~)U6 z7Gs|)02h2=wTT~K)o;eCxq$g(kfMiy)GQsC1G&rw2`(D(wMj)+bBvmZgGWmcb^AlG zu+rBXr(wc)_Ef=-ck%O#wWW9;LA0uCWocA}7K>}8k0|K9QkVv2&LUiNaUc2065i?X z8nQ6jx?Vxtv;wWsVlFatLmK@WPZ^{-yJouDanYN4g3${rueZ{_RtI4Alu9yV&e!eGuR=Cut{kXC5+73p^0YM45q@02hLizE1AM;^E3`sz&qOV z`)$(-rsQ^Zu%UQD=h-uzpTUy0k;1E9fMZ9L1^0D1ePA$Jd1BQ9Qs5&D1+GKRe+Q~aj! z^p(&W%-JiF!cndR%ov2PY@FXch;t1<1tuJ?aEq~EAq2@U4}cCcL&3VRlcUP++j{jr zaekFkz4Xa)xJ353F~sODuoWx=x_)`qI#*hmM^I`PfH- zywwn@zcxT^xIs40m~74;4IWx}B(2BjZO>$_-v7Xr9kXo++lj;#$UggQ$}wPK@6v(H zdARTVwT<`vpJ`90ords}z_w#CHXYYkZ(kuNoeHVXC zQ-8S)yg;QpnOGe({W`{o144fXj@1tnbPp5#kGmwPy1H=nbLwIs?T& zVFZsuuY!|MOw|5FN{qL?XK&$|L{;g^01bM=_b%_Gdxfq((a&!SN{c@kMzW?#kqyVWkA-%Qk^Xum~)#alPIVw7y#mS>~1vG;tJY_KZvk-9T#xr-wEtuA5(crWGXwS?FJ)JiPPm+o04HVQHf~D7!QX z;_+U%xC8W+pr60k=n3Fi6aBa+>RJcg_M%3j$d$DDu6s3>r}XTIqJ<0axmAVp5+$pbji}derI2;a+A^E1V|gua4fnh@c2?k0w{ze7VIjj=j@7yX8sBJM@W8HF1MKARIN5;0hNHOPqMU$$$a%WvR>mBd> z_ZU-j^0VSmX>kkGU=Ww1hu%ASj@Kxww$I9rFEu_{D?iu`=!Sk^^e$iM8u3RA zUHNo1@0~j zAWMgyD;vc#@C=YO_kPz0u8CYBf3ULgQDPUm_THzq^y{Qx-H_I0s_>eRjpyUPB8Rmf zfBO!gRa$F%?y>L#ZIidlgfw-;pKnQ!Ju~vI|Ge5&|J)_VSNqk#Un(9}eRn0W>hn1l z<1THZg4URW_&*g~+jo%mB|L`Iy(qtIF^T)46t7*1zcy%4vf*cp<7X3GCzxvtvALEe- zIO|+KsK*SAfX)Y^jvPN(lGA~)Kd;^P+V%PQmDqtzw`#or zCF1#>)q2(VzanJ14oUmQH$}YUrHN%G=~hr1!)ziy>V=BR1oOq21@esE<~NwWeU+sG zvsh9;Zq>cqyO-MLM!kLI8e=Y!QfT!%UHY|@nkKdaSzn`%wl?;#wXZ6u>tg!&$uL)1 zU8!)dPH5!I?T?>4HRaa#+_Nh^c5lxeo4@|*`rB&~HH(q}@Yi=Ey^GfSV=EtPII>2{ zx!x;{eEFG^((99xsd`oQ77bm0D5V=%mRVJ0P_@VNdd~{*UR+@M$o)NW%xLGlXLDWF zVPdq_HlgYMx!bR9-K&Jw_biS`Hn9(z1IFm}rS--j$zg4{o|;Y)V;wXt*P@ z6XcrlI6ulY?5|8>tzM5^4^pvtctAyV+GA89!ngX9Eo@Vu-Zpc1z>eVC;1&2V-vGD} zmrQF6KOO0FH=_K{-YDU(CAvRYH;-Czi6t@jOrtD6D9!)mjG%mi{{#a z+^drLmDMyY_M_Fv8=j_Xts77IiQzA}?%Mbzp9H=AjjGs9Ht=Cba-HMhot7*q^K8M7 zLh~}Br9=;ntz6XL{LwMD3r_XP$xl+yQTPCD*triX!_8mXOM~ifLf%f;^*_ZkJDe;I zga6xs{k5FuAG(;QDM*YdMSv1SMx+-gFM)`a(QAatHhHhb9vYE%_hv@4x5xeq>=0Qd zre)H3SM_26!5fXO!S;j#Ik$U zH(wNAFXZrZ7xtJ~l_YZXY>md$nVj{y4X|@|m(tG=KKY>?-2pdB0F_aGr3F)OZO{1} zixjRHBAw}02xQ<;nuMS?#~E{EpjlI*^pF^W5+<#L>>VvTWEYswFbTbzq1)qd`=cIS zHrLZi=EZuc+E=fVI?Qt4@M*QVD~9KtF1@5Co893bE8V%FHf5#rJjD3p*`A4Tx4@QD zQqQd%wUw|PQkjj@1((&-ZA*|3z}hH#4^kj%>W;eW`nZjwkB}tC%-*eP?@xrHx+1#P z1)H7}w6g)f-rmb6-QQfX!ndNfIkMblo#)qooRIpR@J(e+;=b7M^(On%!|R_!@=qCO zd~4S6U^>4cM4JaHpFbV`5|p>Pr9Lq-!?L&AP-M;-YTEW9;V?crVoP?FIek=Mr_0J| zA%EeyxESA1#jn)xwf7k`mkf*g@O@+Rh zi&44OHxfE^l2~qHaS!643J3~$_Z>xCpLPHf7pCMlE}njsX#`GioqtQc_x!3wg(u0; zn+XMm$KKq8gKV3f0(9c$OEjpPAn7n#Hz*1xyaKIiTUn#Dqz*B^L|6Q#nGwqr$kk28 z#<7#qw$+S3`UZi%jJN=qd+MU+>cv=Gt0Zr7SG!w7-h^*rl)>YTLCZOwkDu?{wd%CR zpA*}h#UsXrE_q2wxHDhceE#<=`3Lt$4ZiAn{$JjPgEaQ2+;x76Oi>ouZ}yf)X)T4= zvOY4p{Zs8eNtt~;V|s=Gr(H1$iSz(d5duGpxbX%}%;76r2OT;u&f-YjJ&(_m`}gnQ zP{`(3o~cBjVbrHMcij9%S0>Z{wU}Ki`LE^lNcu5ZtkT~q=&g^rkzTiiv-T~&(+Tx6 z=2s9sc@yR0mCo%3wZi)y?J9iVh2#wjh-B#y0`Zh}fC%xG4gnMZcWeB-G3o*V3ECsk zULjhthDst^;hlH%p{;{|obCM9dOx)y@-!nF?air!5yR_#rx4bm!!xw6{-txf6Tl>O z!uu(j@liH2tPGYUiF%fiW&$Yb&A5rV663nqHZn_>rD4*~R@9bGbU%ekOw^l1yZdbn zvdV)&0B(ev&><>tGTAa1s8r;05ip44hFc0Tnv@&!dvGo25(Esm7<1f4pJJ3&)NTmh zvYr{+b)Sk`^M={n_7oVUvZqYt8K2h|SO4Z!a>l%nQcf@n<{#fUX%noFyCG9J(e{RV zx-y`(>f!Ky`fcl6VTmH8-)_f8*ll8mk6kH?Dc1?SUQ8U0$5|JFLgR@j4wYZtg-NnW zGh-erjxUzoZ1}|sexd0?CC;}pgh}z->@7(zxcly`WvM7@rH+W|smSHLV>VEIhx587 zKO;KxU<*0Er72FT$Qgd7MHm(LVCvrx+LHbyp|@HPKId@bSN2rbZ|UXs&yAhG36DQo zUlq+={?#`OZT5Js$9R18r&pp`OTmCCs!i;vh?*=<2aC03PZm>n72<)4W@I%GXr=hib?sn_M zXMFTrmUUvfnWuj%cggyZJ+frUg)(Hpp4O&i#t0!))KtzGC*Sr1hHP^GA8s<_>RbwHnRAZm`rz|?7$v(TYZKT2 zL&EhO$pkq!ARE^0Xb2b2cJh6nT|5rapqa*mzz70#z*8V8V~O*)C%N=qte-&Xk~~xo z_NBLQb=o^ar!M^t=BW@<8Bm+EIap-nMDVy5NL(r12~%6N!g%Oi3T%9lJiISjtCF`R zN3Np={5~gS{BZ%p;&g&RC93}%0*jP=&f3NMea`Tl(}{}W5&d!@C!z8++lrJR3&2Lk ziKw!K8}AM&y_Q#2WLy~15|*6lpY{tiAe-;|#XG7+wrk(>n(A!tJUu9AP32`9b!Ma) z)#n55+6HhS8=MB#9lF+siIn%>);#IAWt!Skq*~4fwfnaJiKmX}OBHL&lC)I{pA567 z2QWBJ{84}MXG?CCS)y-QjlSOGTQqSHE@T&Sji)|$<9+BG9gPVm(t+8|nH|aa&!4^8 zN19&TuJZrO&enaaVBq#g6Mp(!lG#_KVug6KzHi0Qn-0EiqV`K-Ba(n1MlNqL(Eh7V zBdnAf4y6Orq^>CfUUJj9e72&rK=BW9iSA33H6}+1y=TsqHsU#;>lHje8L#+zST8#cJSIEy<$#OJ5DKKS84Dh`+n)@tR!{fe?x36D-nLpz zVC?onlaC{+8xuE8?J3cC0FSl-`E*BP%cdCrX`V`dVLutOMLYp->u#lMEFTA{nlNG? zw8funvgw?Ml(9Ij_{ewLTTF`ookFG6_-}RH?%1c7?V4uiN(JhlQR-s^xU;W6Xc_zsNh!~VK3SH>PF(llljXutw=Fi4Au^elg^_i zm&#fO`xP1|w1|wT73EE}c+vPXaQhd-io$h+EraztfMT)3WhV%6t; zpu70EsE=7>lxYb}K2-vH+A0UL3{@N!G3w*CCCi?!hYZ*(Nhv~4aCuG_&_1w{vd6M3 z!=hyY**3)J@IEB3ba;<{{Ac@VHEn=5UX~4-SE?l_RWcGY@bN)hK6~bb#~CR?al9)h z&GMWRvZ{E1drBW2=vJ0dX)=A;DWgCZh|fGlUD=~X?G=_EZ`K^z<;1r{R-VLtXwm20 z)K_8;bc@LxLiM%a>yWyQZ$j#{lf-+JTX0)P*;iK`7k;fu653!3?gg^ziCZp---7z~ zs=J38Zl`VWU-5dy>}Wl@pA8uB9V}joeFdlYgZFIpo>-+fLMTt4zh>Xy~Ga3mCW`(r)f z!fWrbKII6w!>-V2%l%ko+oZEQ2j4w6<*L%@RX{sAzC|VTL0%0L*|n zEsW|bjuzWhOhU{wvN)ET$tSZfw-i4V3zPCH^rLpPiItMz1*;DUuT+ zvvj<+zrYG>z>&MdpfXTm1)Ky#7At{E!+U(Ofa&nwMR?11@DYGy@XUnsdVjq~enyn$ZS2V1eSnAXmByHzIE{x?mpOf*C_1n#-8vvR3nX2{1z zuUO>LsQ6J)%EdPWRYR!Jy*d3BEf~gq>h(ZFM0jP~j4pY&cE0axvwPjipw_m^fG0gZ ze}sNJGU9(MBsb>p4^NzF=KwcIbqJm1gy<$^h!0{KJe{$!Pp+y$U0P9P;f%dvzr|># zhu;)4{L{3QTh5N{zj}L&`M?km7ZKbMAk71Xo6sF`n#8;HZ$L@YOw(shZPw$v(!F&l zRY8$Hjy;NT@XEcrXGhW_@$lN*$a40PQ#szr8g_}qy}h2LGa5i z-w0?URz}-$6E2Wv(A$$!pYh+LWbXgOn@K%A5-JjnBt>>p;W=n29oKup&Kb&}X`zYw^6CA`-kW)ys*}1eUmdd!#gpidz|m=UB2XIw z687(#5R*FB>BfK*#93A%w0pf(^t!`H1ip{8TUxK{k9k|jQi@0Oq1}_cy9@W_^8BvZ zQ{bevNZhS!Qr2}CpLVgR`xHSJo7I*9kwHi13v(95UWG&2UuzbU?8Hj(ei{A_K=xo7 zvx~cnK(`D`BIX zFJ|FyWUmU{64GzYJ~LUE@}gPwNsE_UR&R^f%rl9@{y%(`b0hMjiQ4?@qU%tS2aS#n z0Cxw*;a^}k)$d{;1sENgay>8d@#}PtC8T2rbiV18i5{5cVP`PEs=(jw)bVu;(ej|Z zTQm93RoWe|ri7UqU@pe7m&|E>U4lfvwBXRhwi?0zRZ#(M7ehb4!0>8s34qcI+`SI^Lpc^5U}^$B>thWDzPZkBrH4gMcI=KX6Rl`PiASHZ=X z2J46yJk<~|odvYrEYY?6|85MYFXiB~gn3-4kV-I=gX-#2h>fH-3E+pw4?$}#R>p?$ zz@FLGb(LtRBMO^IK1uJ{^DQR0>tjFv@$9LYP8YZ&?AY^~aoGljHNHZc&OvM!>Z?re z?-c*yNvMO<8g)1KTG6Lv>Zb?!$~crw!6T>30Ol#xNE!9Gr1r6~xtxyzN%2J-H@lu4+L98d;Oe;f%B-mJ50?xj$_`QqR7Fh_dW zT%T_ShW`3d{`$AalPBNO0+g}a=@|5mE(F(jEMFb7-3!ST;HG)+Vq&CT`@{Xnqu?vs z*V!bZ-Ta<*Sj0ePeUQ(x*)4M1DuL0L1e3{boWW$ryZ5pILhjPh#{dtk?29WNXBUT| z@fy)5{MaFd5S*WrkUocsD?5-U03k6OLL4Qn)&&UhDM0<0Wu#JOMK`s7qo09hr&|7C zD?QMy_GxA3y>6%qw<#oNYP=z8Dli=BQkqc&@qC2a=pKYjdFOI1*$FS^=u+32DXW`n z*E2BkPu8Aobcdx9P^tE6yTv0BJ#8KYm+(Hjjn{^N|=(-A@*8C_nmuHOLYhetP zklGq|YYqmDhs;Z4KlV!Xzvo={kGbj21iD3L#FgJY{nqxCfE4?A?rQ-ss8^z?sFUt z;TYM+tUl4OIWodAin30T?2+sp#|X!fkg`f8$2vw<_MRby?0JstQ5_r{TlV$t`@MeO z>-)QY{{U`o@7L?~9FNESp#~oFy%_Gq6RL+db9ft9DqK=c=R$fK^z@(VT)<)R3Ri3% zJ67e-g;=v&o4x~s0tafO3YFFidICt*>!2?>o&~{n3pM>OLX?u#0#jqLuIAJg-!-9L zrQb0BJ^9jmODvroZr3tcPN4{NpSC=lKlDngn_<4%5`{VBKAIz0Y*Xgb%|l}LG0HS2 zMF3{H;yE16B-uD`;#1#xkW%?3n2On((XAOyO(t@-j| z>^anSYgH$G0ef!Xg$a#^`^xCow0`A}RT-KItgR-0J{$ZAF!=N5)AL_512P#GNY_wn zQr3|DOkm-A*DMLQ)_$w^jcJma4#+{f^D%$zJ$!O3F79eV44Gj6JNL1fNn|ps&(dSw z;c4FvO{H9(EdI{k?l^nlGTjc@aQ0NNdg>b06`#(EXV>YR{oXvIeF5ev1LJMmlCdnK zwDl@RIsN}wIb?(&1Pb*KV z6VZ`99h`serc%ykF4*y6-pt&J*QIGHWF?FH=V*_<8;xd8tmoKe8qm~bx?w+xX#lcy zLQ$uRQ*HNNLwZtAc?@;;yw9K01wIaL`YPm!XGm5K7`l@>;mj z)*JA(cAUu4jB#JJ(BQu}|IeYRrJIxMPnFJwb5@-Vk@Eff&nbyh*-~+Jj>3vwusC(i z&$Cn$k$P#AlaCiLoYZCBe4@V<=Wr`;kT(pqembsPOuu^5F5vT@A8hkK61xU6Z|2?B zzIvP2EB51J>2f8k%%N@5xz(=e(y=(W3|yG`xtBtu*ZNzgC(t)Mws7^ULFv%~_ilrn zFvF1VpvJGOvhLF^sN{Y|Kzgu@ze6>O5|p@$mnt#OkMt(q-ct7Jqtiz)5JhbuM&w#4 zrO2o9eiT|GJMYyg!CU;${OXSvV6yi|kWw9HE&x&!^7s`?Ee=_&yaIgvHAyse4jmR6 zFY`e)kI}_VZ(FYQ!ogUoc88F6JMrbatPlIx=5CR%<<3mq@QetyL303Oi@4+}8lO?oQm5y3kY4 zFUXZ24tz2(+mG?dbxx8V$?7s>IX3@fY^6mB<7+7Z?V=P8;P4IW(nS zvVnKrLBiz+jflBI5l)5h29Nvl;9*do8}-fbOKTmvX;_(P8TuiQb2nvBv1E`_38(}# zgj?EEXXuG=zE4Y+;vZ&8$DNozIi)h3YoiN-6InlcJo>=?#hd@$V=OKjux&O8je5te zn6+2SZlT-iBgymfcEcJMK-JFg(4Q3Ck%-p~1ojHRw}$EiyXA`Y6oCL}=RaG(HT3L{ zij@qD5i+Gq{s(mtKm^YP_Fc!ijGsWmngM=*CkQZAIb=u>ZdtA03}UkI>3m#yHEZwS zog?pn0v?{~daREGr1Ew!8^9t#C09rIGwu52$l1abhMmQE#3rkAy^eG5Kxisn=`aC) zkT84l>mO={^6RhfFn(a=;~5b7!4+63GAQz3YuCQej_L&=Gn4OLz~wIdczZJf_@1d0B42%rhSE_b zRltV!(e4IT{bhC}E0igI}Y&@@f{9Yql& z;5}i_xvI-HE{rNppG+X%g~UAj`dYYOVw5AdfH(H?&@+S;27vg1a91=pV zFbbUoZcBeT=y#L9Si%>#m}+6?F|SxTn^j;0B(wA($2mocrH}feG=l^K%Du)#qEG0u z5nvgion8Y0Eh%tF45W!~n^zCHc(v+?x9JzCOO_u)KU|iLgw`_wam)I$ zyWY>u`wCrEv1b3EDe*%_gH?2K_oThS7Ov9Qul(bj$vXPn7ZpTbT*oVQ zL=f6P)X-A|V2Akp4&$mO6y&yGviOu^&g_}uxMaVX{juDCA)lJ_gr=#_G)@6ZAl3FV;=Hw?aZuBx8&&0oNc;w6=$@}vcl^8vLv^iZt zz{L`Xhx|3xtBmEKSTZ!rQsDYK`xSMFyi5WO>zo@<%5kNiEWu{ty z-WnQrVQqWw9;7=U86^8PdyPqU1dVlV;1?EUapY~~(dn?TQ$WG~%Cu{lu)v*Ms+OG_ z4DdwjI@GIBePK=@rzr_x8g?pCA3UyZggxtyiwKC9DD@c!`2pDA3O7$S>PAITiC6Z= zs#w8 zk_I>c1kxn96Z%g^CFLdHx+_wkm?_;?V?79AZ;kb5Uo37@cIgx|s*99@*%xVW3bVGY zHf~c9hN%HIR2(9v*0?&}0Z@}@O}WZ+07fZdu1?}V%IpcSGQAN^sq!$gRr^;cXG!b} zS26v$36^2?dX*1cIuNho@;MU?xKk*Q+EoQAsdh?cH8NDswyf(QgNTn)5y>2*ibPpu z_Qev@Rsg4xhXXRiy(!{_lf{)07bkc$Xi9j>97X`sGl*Dt9)jO1=L{yD!%isDTovJN zr*ZZ=K6=qIN`4YtfA(Rb4zts4=B(|Zx*X46nG$ppb~ftX1hCNri~Rfco|gAXoHU&f zCfc?=vkICH>1Ju71;?#QTT(y2+1E}w86KceKE{%qn}J?|{J-qi%=yKt7s41usdE*O zYvo{gx_M4Rq92vA3@yG=K# z&{Gz#0L@9{^7?AxW|1_27)p=7Z6(*^P3WiZCJDY-h<-tS@Ewst=EDE}m+7kiI1q4f zAPo0&10TgGdgp+1=Rf01g!CpB>HAhcxDa(MflLWgJoE)HpzkPl$i&GLZyzKlGLp4EYq|uGS`XK+q{>m=HTfK`o91^=vKYoH z#)^fz39@SrY4%()tB*Km>~@o-H6&W$4!4?|4_sP-2I0PQ$$7gHFz``EerN3V z*WqP@FXce*P>uO^3~GkCvVE9qts`hgL@TZX!_Nih4>YwCERV!3T+3w>_&>>nS3IqJ zl+U}2PcE6W0y^IdVsEnnBS3Q4vueVqc9@$Uvhdb|ose%_?McFB1enLjmLnKi?!^+lVc{e81qTNyB2>X4Yn5U`= z+F5FpwB_-HoLiz|mzkh5+V)_sXNl!FHT}IU+MBoyF@gXpEV15?4q-TcEm%lEVI@ik zj;YF6?(&~<#FZ4F)>7pM7-cK_;qQ@Q9_TdFeZ3a}%?-YzY1tdS>d@lrSP*xmxFC%q z!5xYeE=U;`U#N+2C{gdYnPyn>K=wOQRUozluWC0kozq>ke=@Uj zzY)^~QWH&dg_uAzD~CXco$yxKbuKn{%hrjUFICqsAoc7!jS}tIK5Vk>GYs~Es?RlB z(|5;soiX0@Npll_oEQ{~E+Pwm`QaCvq(MK$UShdO`UwQUL?;M$4*(w8`?u}+o84FCvK=hn3Yl+hjEN>C%T@ zOnly2Oh~<@^Fq(mw+M{t`%=xA+_$eS=CE^%`f+=mtFv#NgP_i|s}FZ*F?~Vi)hr%D z=F4fS387ZNS!nwteE2a>kb9wgvhd3{F+O5p9WNy_!NT&|IhjoXT2$jt7QYqe-mm+u zckWxA>l-*OI+`(y(c)EtFN)tMmV8vj)J5p+0TUzx)h>ZjMmY*i@7x7Ej%;g2T)c1j zfG)TCUtKQY>Ys(UwH=1$gN&_s*(^cA;RUt{17p9lS%V|H)$r<)AI(;c0~93;Fz;#h z8x%}&EgiG=u>P?Y4z$6PdWP;pnh?_ejC=-CDo zZi8fLb4&wkb_LooGaA_&1KaA=dCZJPY`_pn2p}E`MuMwH(jt!An~|YSJMQ97q~)RU&b)KF_vcx_@S^SB-U>SK-B$2Fl6~D@q(|*V zWtok5Z_%gI59(&2xz!fEvLfi&SV6cuL8;lTJT%S^I{;D9FWkyGM|K+~GX9x>cfOvF z<$aDJs0oCRN>*JKbf*>mNZB&wUMD9u5h((cD*0e-020K5y^Ji5@Brn2J$OU_YJe!q z2ptMaO6JN^F!Kd8$)p;v~{0X9Q*Kzz2w`9SLSdb%+y88K~Drq0Co+=GXW0k z#L*DF0E;7xk{RZ5jjPC3nx!}E6^}1XAx~@MCJW#ZDM75~*>%)IX;aX^6u+G@liOd9 z&#i#TM9HEU_i5%*1y;|(jU$LU$My{*S)q!3vHT;8usq13(#9P~?d+gWbTXQ9jb1Pb zPvCT!pFoN`oQwT9;}%oPc^#!EBK;zB9pIZkLa#g8Yj!_|qO=sRi#?kdxftsMle#~w z@tXc6k4DPgwa4M`{na_{HgAzJ%8VN9Dtz9U0RUZ^|I6)7W!C_B^GYTz~0B8C|#gJ|;W+1Q)})7Qiw0Vsajn;^J_qc8_-UFLX>R(A0hCq9H= zt*a<(1gx5J!ppcmUFySD>V$u!@2AG|)PcHe41a8(t(k06@hR8~3&K6hBExsV1IZAicP6r68+&<63f zY-J&Wqd^0VD7JH`3V@t@(a_)KZNo^3pa&O;!PcYCO5P0&1dc5%#y=Z{;}HLW{o#7~9xO5pXgEVYgogp8n+F6Zc6+H$g6LtI!$A z49hN~&Y7^4!CEcx9*9p&X}2D>!nJbT87$gY8-WxUbC2(tiyh?v2N+ayi);q5_;h0( zfvtuVRG}K>)0$#0ed&dNlk<4!lII!OQ@X)48$ube|-#_xui$s27$TuKFP zA3f6|1AQ@demQ`&VVF1uf|`zHLDgIa0wX@BO^yIXOzJC%dxi6;{|dYBIcgc?Dl3JA zlva5!p>1cIR>}X5q`NXcH-k*$BCi#S-_yoUe3HaRl2wU@qDVcY8S_~gt^DE(S$&3) z{(ZO?)J6_3sn>Z^rmUArrhw}ld*wzeK?2xdIa>lmd)a(lfD0+(**sboGIuz%_kdxH zy2#WC`l9WKJ3$x(>r^wx;=-Iv#GTXMt!TGPdtc+hIv^)FGz%W~#3W8YH4U-Ci$UNb zfJTA0!^hVZi)upH6-V*jaLIx;`4OWx@N?&0NK3-d@@-!)1AhBfaYL>s0POu5S-}*m zrRyR>-u@QH;^jA4HCEh#_AW}^g%*DQoV%Wzu;9~M&;3EzTR$75ibRmD09PlXw~)F- z&8yh<2jhG5y2#VPD~qL>@wxTZz)}F_1?w4|2H=_46lAYi*au=glNU5!c((wD0%vU= z^43{HOD@dJ1x2B61g7u?hXOpjCty$dA09SAq9jL}9#8JB-|uo>W-)ci72@?S@T!^% zFC1`o0vH;MexH)ifw( z_-G06BDfZ@f!w$~fvG!RaZukqVg7H=p}Wg`VitydkTP2W3hbs>4|ZVr_7z&m5!43^ zeoD%j;B|msxGooWCDKPMy5r?}nvz&{n;Min@w{(pbyEPGg=ogNODY4z;_~AQ)J41Y z-cy1)%E|PAL4MAL{LD@aVpn=go=FbSk0-!nq8>eg=(>RRCuY*9#&In3Hj|Y-|K+_P zvy-AS(JaMm{x?*)UevrV!(8e-D8Bu#_e1^Yd-pn}hJpz(5E@<-8CZ%E+%b9KWT!wV ztvyq1IMLYwL+d)LF$ZJ*J8wcw=Ate>SA1{FUt{WeX5H9KKr|9v_f3^NRzJ6#$)9!o z(V035-6PFTG67T3HQ*SnU2i;@@U}bIJzYG$ws{2esQvzb-X9@`QOpj&AN}JwY0iua za{r5kdf!?lVzBhh?mgILKu|?m5-_$8tk32-?ilp%z7mTDMPfsv{3Vf3cV=5fz8hS(&nCv>;pTQh-?(SE%ACzt->wIdCe+* ztqH03m;fmZtOe}e=Yb9~az|e~%~{bHrv-A6E>oW4;WN0M_`&jiTk;nf1Nw`tajpF1 zoC-<3Hk^FvO_n}ymp)Lw?Zs!p52H*ShQUC);lh6}|5wBCL_HI?LnhBfGB7Iq9S~GL z0+OAqx&J}9!mCf)HYGRw?nM6U;9RX6B)w^n9Iq$6ja}OV((AfaMw>?GoU%S-jpR8C zrN@_SctO1Ty!+YoGT~H(Je>gV*|)e=I>Onk1+4Ax{#C$aifD1r*R>QrPl2Cngu|O=yCC-UHM?A7i&!$@WIBdjN-=yodRH```y#UB|#ruljAwTv{DnYsaBm z*<10AAYV0n^=(i!!wJaUEXkjJVSh)Itb4=4jebI(!D1wUKJitVehs9kKr+#j&cfB0 zpQPAf0WmV%Hxm|(~7@+3R60WIo!> z<=w=pC%t3!jh@8mQ;U04u#o1b2NL{4%&sR1=T7b|NgR(KEv(l%FrPi!U621F`D5F* zx!y-5C*HV3naNbJ;^J z8lyw%57zPgLQrk)80h8S?&Y3taeJS9l3@jCh@&vuZX_bDt7WB6vtI^cA(wKb*x4S! z94OcnQr3@Y(LAo-Kc#FRTW2;7sGo(jo)FQbWdbpsG{><&R&KsCT9fJMKV?6D*2nPl z@DEKhEBATf0g{GV=pWH-m++HcJAlnTwr=^C8GK2py&x<5tC_aoDgVK$1MBSBC~)-+poIMU z4jPQ52?I+x1miQ$un8Gf8#AE&_w#he*W8PmxQ5>YcI9dmSr4ZC%>zjOrXwU@dEN({ zZ^n=heiHG5r~%-LP(sNxE*KQRox9F3vaZz0aK=aC3rJHg^}<81yl`zp%mc6J#DU6U zOk4b(RSe~;dmPA#O9(RLJ0-&~e0O?BFU-knddV${g-;>YNpnXs=Cx%N@4L>BD)U`h@NW9fSx=E!&Xo6q@s zNtQISDJ9w0`AT&&iT?#G&cIqWR zya~D8>_vJP5$snpy^1*|q=>x%t~A`PFdH!|Jnk!>9>cBGftU961(_kHLyE3Wxm0pF z&;1a6s#cge z^k>(wC~NF zw8#6Gs)n8JH#z6Uzb{pOO4<7+LbPfb+8bDg^alJ{Yo=XL53+8P8|Un_AP1;ol=WFB zJC@TOc?cQTj~^b!H*zgSJD>eIu`fJJI=c(R#b_TnII_Eo4Sh}#jn9&|TA!XJxCa%x zGS$x@(leOrDP(@;eOBUpQEf!crR z&1-m7Iil64mgB=~#|J@MzKJK_CWH2wRnylJe!StQs*fI-+wxMCm(kCS(!&IjY)T?NTt zbuFFf>sF=Gdj< zs;&!Y;bNL;#-%cy!|2CG(ZA~kh4rrG(Kty5VA3?hoS^u6ceJ4b5;laDWh6-80Sl># zw2}zuJ(p8*`Ov6ZZbI)DS%yK6Hg~9(Ur`U+18Y(A2x;o{QGpKd$zT4e&jqwsx$69# zBQ@yK?~0nND&}N);=sX8)lLOC?c^HG=z$^#IE>-p?)qf&uQ3g} zYZEs89C%03X^nlw$!0@(yv#gHkzEaKXx&HPE@3t^?&Uuo$5RSey^L=?$Ej9On?=XH z^IBk6fmT27e2Y0;p)No;NQj-C_5sbEd=$4Ki=9tv8-D?Ew~U}7d~F8O%TnMjNZw(M z=5<78o3MC|D67!qBRi;Ac^m?nTxG&cYYyj+2rA}WEGc){JNG!HR3A`{IrbvF%hocJ zpVSd0UGqEJjDa*raLMv>=(XftYy_aCua!%6?63f;0ASi!YTpQy7ll_%3_s+u+LZ(f zBW!HdAv``}h~fWLAXDP`k09sHbk^NH#X>o$dQSx>1&7DC%p1Wcab2K~3o1WwDyE`}UJm(;*Ifp$B5qP1~vK z8B3YnLn#Zt%QN9lcNfs`u0d1B%Fqch^xZ+SCxP%9#6SLxAKd-N8KkM(%zY zf6e~7Am*lgKX9^O=4bEMy#%L23f#SAyaj)(&7(%vtOuug)?d95P&`x^_9{QQ)PP=- zYW{pv&;3|q`?G?_AJSQ>O6$|(@mnsWBl?=Qn!Tu;*-dI-9RJ%gv_$gdLY9xyR@jeI z3&^jIkG%Lju`@43T*t;@@sJf6x3=4GRD{8Jcy4{RJKBu{ma;ScR)WK{Q>W`UVPjj{ zQ=#eWJ=?1xo)~FLpZSloh6HasH^OuXfIjXBe9! zk}Ox$QJ?$H17CXkx7JdfLAq=~BUkLri?J~o0xc~0&rW9e#5V$4)CWk}lz58ZHkZw@ z(y)GF((df*>VD?Gt;8NjP5Gp&@1)?KKyk>;Ns$SRn%2mvHjGrof4Hx8#1Lvd*y%m= zqbW^cCfvFEg-$jd2%!+Sezs%A{Ou%<-9;&v!hgyyP*gnMaz|uYpvCTqO8lljeGlgn zVXXaXq}4R*VpCI77SeTkq0>HjU@JWe z1W27zXk57dbjo8A+LNIn9lHbaRzCw+z42dr@8`3Akw;xjY!F^f;p2CL+CW)^%RsF? zNbw>K%Nm)x*ZAlJ{y9t<~uK`dk} zM_gZ~3n{~i{6Xq$=#%E0pz3Y7j42De}@+c z^?-u5II))wyGK2<-IH|Oe%ix)gVM=9eD$yZ;T}!;UKP^rc`kS?JoOLF!p+*i6p7!P zJZ7Czp4(3}iym^*j{p9mNCVt{2)ikF;fuxWtdcPC8Zxa^2`_;03|@4M&A8>QPF2EV z`P*@KEB}*DJBRO3_CRF!SToROS9ldq$rFKij=GQ_#9DvuR2k3||!z$N4 zs8X)atewQs&L7s+%*cl9q*}GKKQREHqi%wkR!z@-c`jK121QCz)2tr+r{Igq{qp0R z9B1b29K?*gl%-1pj)A+GA)t_f0X|{DKa;bk4tpkzHoR`HLXP7uAG)MotbzN#*V?)n za|88qJ9tAz>fz$vHyf&xBm?-;62hltHXGRP{4Jc4m_Jj3Em{`Up3F{89i9Gs!t7qn zgjkQSTM}s4+VyFuHPJBtB)+ZU_4&CnTgN<9WHw{CBk;)6seU8=t+f66k?QwtPfFfW zkJ* zzwUZ`I-_z&YhWIrr*$rN)zB-Q?WQ;4i>wFz|C2Qo0jZ{({54iJ%dS^(8DtFm05z5} z6u7E_H4QF=IjbOH4J61|fP#%#*B?M(Ij3KV)N?$TqC+u60j@h#kPR?B;UUIU)B8n# z3FCgXA0$+!TMf?&j7HwGL*(mG|ZWsjd3xY2k#eB@>}!CPLFbqznJI-Z1wggNA8`?bsPQ3j4PlPfcy|!)>*Dkd4Y`L+`C_s8rZfoQ z79P@9!6?l+JRWrFQ&=u)q;R15^Dx1D-cD2?M$TvfVvujTLs(o$TeL|n$$1CYp5~e~@ z#Ez3dEOl|SEE7Rc{k}H-;JZw;qYrzbG^+TeI1Ez61A8!d3jphC zV*DlMrr&i@yhD?ldd{1Y4naLVOKzUg&k#9LI4L%%JzW2~IWn(td)X+-yVMqXuVgYB zre8zq(#^VFY^m^|j6HgO+S9@@xSEuC z6qT|6cfD8YZyk>zgzPH_JU?2R@i&&Mjn!Cl*cN#kv;k~_Bn-*=IeE=z&9Ciz&a_wH zE3Qi#l#^NVL9Gqyy?LHMZCa@6*6@GYB2B9wohQ+L&CzP1Wiy+YjI^Bf=Fq`V>o3GfxlW)aTfR=g2%Hq1ZYc=-GuSIX zs+mp-Ej}F%)yNoGU8gd`99%|IeurA?3MzK80-sftY2)f{c#5w_uG;(!?1TsAp~xJI zWl%KZoDgTn7EELT2|TwA^b8EIOVUHbfxe(ax<5p9RF$5&B7 zLXu*#(?0I_y%QZ{d6bGViM7$A~{U$?DtIK_`CwtK zgmKePUS&nNyL{DgUgoEat2{iG(6Cz$@wGQX2NuO$axF?H7i~i03a_)p-p0DXe9;xJ zOZ3Z`9{tw8)G7n;hzUSAYLpqBuFpp{mMhw9Nw}C~=m%&%w3vrAnj+Jko;Q2@c+;os=J&1?fiVcoTK3-+KIRlNL?6GZ&e zq%e~nrMS%LTNmyD_@(0QT+%zR9{N#3NL z;)Jl|ehf3(Y&5%kj~w7%d~GRMkYVWM?62)(0S(e0ODGV#`dg2AZOBv;p;v|N-zj5h z+_FUfj@Ad4pnneEsT-fDps5f8vT@#C^#aN&9Shl_bg^@d(f z5_YY5_uq-1l}K3Guo6CLtZ7-jN1jz=$6UMX5(8acy)gAs#>TW3{^kg*TP{DIRb*Uz9#J_*@OE%^-Y7e|H=|Af^k-5iInXS-y zZVgiZKEa2u>&Q^eyzcwh&|eG2A=Z>D_|PH>x8G!yQIe^;%FVV>w(>`~1P#a{*J{RR zd0Wm}kK#_6IyVJogJ$iU^O!egK-6^uXH?Usq-F)z8D0gIeP-;leV=yKGYv}fejs2b z2mWZ0otdJirvr$=GnalrB=s34pf>5GuLXkbzV!n#nx^1%m>1dqNVz7cb8U$oemv}W zmH_B<{GlUGt`i_J>!V2V+S@BF5;KcYZY_SH^&uZ8i(d+kTkk1XWKfh#PGg7U2&w`G zn@wDp(9j(U!}}JqE7@6b9^K1-?$5S7p!5Y^J2aSxQoX6HwVRQHI{9PYurS26w>70Q$XTRkXTY`p{N_Dh+#Uh1ZtPk*+HNa zyga=<_KIs~3upy5dmBc8qijg9 z(oD9sv8s!OS&95(Mb{MjSQWqZe2hc8zw<4ud>Lx=a74N`Ny&BjF$rv(1|WQtOG!^V z!6l#cxAt*~mJW?fvE#qlYv%WyRXUvGt`{SYFEz)#PXQwC37fyo6olb2M46DRpe-OHa%M?ql=t zP#$NWW zi6)DJUG$!t-h5CD=bBjJ1i`6=B9?M|FE+CY!LFu?dgv(Tf$cbR!9@y^Friqq*2H>+ z5+6F5Q2c7GAKuV&G^IRq%Rh!XAF#TN8n%AUp7wRkOz{v{&*b%K4qj9C{beIznZEk* zm7TJ$ZB>eRj2GW}rjLfDuSZ>q-HkNIv^7djWjG&gg9*QHLrZu+qu&ruT;9=yLSa9X zP|fC^Xh{aYQ?^V{#K@Mgf*xU9i@@YD$S5xU1s-+E7=#3jT&{j8|(1%Yq%8xoYV6yf`jF` z@Hi*p#$FL8M#B6+*<-|iN0;kUb_!Z?lDsekdf2@VfY7EdLVeNAS~zk`p75>Ta9b3YIVQEP&}hm z$#L}!=gaS~IkWI^7?9|o(Sxo*DH$#Wybg1{6)pRvg;}y6E-VeG^uk zbjI@oP1_-4(4Bm!C0l;!W$3u`k;93|7DT(2mV543#%8>>{Ltf|fzz#aDOoXEvM1r} z^~?B_y-JRycp*z2aJTC>fcb6v8()Hx4OaC9u)ky^9*Xn4D<}tQ!t$xRr}MXo`)dS z;SAl6Lm9Z8iK&y5jB`c)hvz1F@wrC<`~H)HPbp{X180pA&R(4&MuE2te|-&M-=joz zHAG{AHF|!W-M#u|0Y+z;eKl@2RD*3uDPR_HU%5R!Hoj36XhmDz4^`yg$UfC8GIHbA zn2=u~rvr)&X|z`c43C0?7oYpiAoqv ziZV8~+$mO!Bvrl^cX4DwV{x5ndoiW$G%g0gVP+5gl+nKt_Z&Tx(b4d_8SONMCW{r( z5`WU@J+8OrI?AY1@QTCkZ=!c|oSpLSZ$#NjK`B1$kb&%s9kVvk3c0X=Zo=U4=}-pv z$C->-WXOTuXcDov)8QAe-Qv7nnbsh!R6o;Ko}wtznC}`&i(d7#t2Wd!R;qTal=pMk zd}932PXuj`ev!_-*-mWl?3=gy?Put3#T@E)!&*xu!-osTi5~L0 zb0hZM{-EZq8+s4?M|#8bah`5@i>VddsA-ybZ=DHl~7FoC{A%k(Elcv5s#yBae^$gUGT4pD$Pt1=U>f}$=0ZvQdRG4xb6HI9phl$ zv1Bt%r#BiiXtGNeYaBbM{evaeh;amUAPIZVG;Fg8g1u!#seUTW&p2v~KYVpmkVI~d zXdX2jZ{M}z>{S0Oa;xZ>IL1$&bL(V2C|kW*Ll!mKzk+Xj<7dnZd+#IanVtF44_>m0 zXC*k?p5Bqp26_x zMJ;JyRc1v@_VvKWT723pGe&&dtRQ?WeMGnT$TYbq#=+%|XV&j3VVAq^S?{YHTo#@( zyq)PHBjobrHujnE)Ru@R4KLMcEIb2RS=RYg*K)O5Z_g9=Z0W{v*i}wqzKDjqqtJ?9uRuTUyC3ix< z$KG699w}AFCO-(k<#GJoRQ@U7ZaJATa>O&Vgx(67UQ!}d6;!mfy*M^>py3m30oHTq5%`6y${(8H0IkN56b$$iZG zI3)P>+wT(QS%H%>0(0_%6LwBPm8aLRPM2sVZG&1C$LX}XLyx)EQ_gG;ohWMcganmq z{zr5@XNaH(f$h`M{_1;Q&o~VfF552*rqstTre|NQ6CwP$$s4LK*y_GFc4Kyk%fAH? zRAgD!_i%P?<2jiRloB^jD$K_b4gt8AY@TlkIzLwx8pV%|Yz(=QjNWq2Xa|)k3 zf&Yfu%OlU%Zw)Pw%Ri3S=Pg4*5*z$i@o$^p*ItgRd0z?;tXI`h$y8k{&Lq$b9J&8; z{=a(&Kul;1M)<2!>FqNDBJ>C)G3s2Ne4p8|pfI>6g{nI0B9N(*`ETz~e)9&X+L0IJ z!fc6$ZU&YrLvd6R3@yi?9}d50oC^mB-upiiS`|vTYpqk9vX{!c9JUsP34G{i?j!+wDL?FyE{6{*F6#{*dqT_Mg}yh zsSK!f8wg}IwX!$a?IlXBre4XSoX^1Y-5l7ozz5LsTokteDXO^GZth1CiTfjQx0R~yh(Kmoay zW&Ub9Yx0%yg3(cMLHwAGH4^JgH7rY&4)Cc^Fl65fbt}&b*0EuI{myFkT4O~_#2V;F z5opLH|I$Uo&Z}=K4RSJ?j**?tdMz3D`A=4Hb)mdP(e`UY^nyT~jMY>;4ltsKheA|aE1SO_*?I%#I zg=ZUMOn8qDW_zFw_;O*g zK=kvw!bJ0YTWcBgW5SS)8cdI9G;-hda@z7YYFdX!;cfSgr7XEybD&-&wjI_HE*HHE zf9X6oT&seN$)>vvVh;$1K zjdZ7U!~1=n_nhB3f4pnGYt3T$7vN&;``-7p_w~8*L)|l0#Ed_!6^8zJMkFkgrHnrL zIslv>y3>gr;=r7Mbi{W2pVbkvYg?6vBD3{cx6;lC2=%PVFR$sKmesPkpref7%kZ+` ziwH){44*lE5g*9~uZg4PT`8x3AL<4ddT+64u2+WZgiIIxuy?C6L!M=?I&!Enm08^H zjDCAJM;9$qM3S@-n@S|(b0`zwcy3}|Nj%pabYl1FdX=B*C{FUn$l&!{#k-+{GrxkLz;`%9V4g@=-nhYE`K_e+;Hwug^bOzzf! z$!YbEo8tL<6Q*w4z_-CyOoxU}t1cRsH&JK9i$dcYmph#InqG6|^Z#l0Zt0r7h?w3c zp!xR$B^O z@$%H&OYn+W82(rnyS%^R^KglnH`6I0;XqGrLe8%uT;NUk} z*CBnb&$zP2lGiT)qgwBgw&G2st}!kCYg3?qvs5hsRPg6xNlO)Z3fXhH;wrrq+Tf(~ z9!(vKF=5IdT)K9}B2HO5W1GdCcLML$7z?vj#6dEyNeN!WqYa}Xjgk|JhJ{PI4rFN& z98dNk)twH{(O_ksPPO6XBTs9`&aS*hiF9!P7;WX#slfTrVIcplFB%%Zxvy7f=wWv& zsy7&64n*bEl;%_zW2IW>wD7{lfjNBD)pbibZZm4@%reYh=%rs#+VztO7o!tj||YouM|ZJJG; zyxHQfkEvY;^!b5JD#iJ(3F+wui0>CXXCxVUq3tKaVxFNm;>4$!dH1a+s44$YKYu=m z<)S@mYGh^K??&tB2j$Lzw{GsTInHggl$T3oM?s48aGmFed2{ zliy|c&9s>_w#tTiP^l#hx^1i6cqRuaWIB3#VqV4X1c6KWg8Pw}@)91{@xOj{VZkI_y zk4^gWVV&dOeQ(>QvxJkxIGH+IvC0(+X4>mFPR?mQ0f4wtVkx`8D!>Zu)m7fatd5|`DxX^r6U!w3KWM-zh% zGoyxgk33_0<{FwV5tOcZaSBLg?(){@!y?;UgovYBd=z5@23YzFCJJqw9)3QJi;#K+ zKPQvjZ@@slf&k0Z;m{!2{UPPh^ad?z0B_~qIkk>$;ed(sr-N)sFaHx}16U^5JxDI; z&EbWchxOx==&+BMI;~}iD_6LFk`KQV7bg|)=*hi2V^@?yEP2o>7|cN|K7slqtV$O; zy82A4Dn!2z-rfr`fmL6AyzO8Vh)ZRyB&(A2O8aEf!y#@R=f$p}t2Xv*e@;}h0sBPX ze(xIo{|^JU&X6a9?PhWO5k$Bb{{JR{BoOntmI6W-&=+Wyy2>IYi-5r z>PTG8r&LaUZ0&bYRKG`hDBAG!OlRhitv*%ely;9JizCZphsY5BBU|_Jwa7IJVL(a*EJk<198YGlfR4HQnZu_6d9FI6)c}+MgwF26Jdxl z0)R65PPx00b4pVi5?j{+?BdMr!d|Mgf_lq}J|^Hn^unlGtRH`T zgXgM=MLqixwXC{p=yu0leDXuDQ3DR}X5O?oj&qNOItJX%#bVY7iK$hWsl3g-7bBX= zg5SrGqs;xnObAjD()J%qE|>~*4~oKCs#B7M6mF7J~f_lt*vlI*;$PqCf@n zd*U#MCfV`lmcPgPr#z46MLn;pUA75#_pV&Fg*^`>Uih9%m`I;6lx4K24A>uz<0Y^U z2dS$UwH4ZOdqa;pQ%fak>H|^}GH+uNvace}cr&ed-2#tJ4Qna`B8gAD2#OBJ`qUSH zBPB?-58fA@{#ms@9qK6Z-2J9fLapo2*Cm_I@sNkkCw zk2ie2J&y`1by<|(2CqG#?7n~~!06&Xi~QDg0B7!F>T?&Is6No6PDf7e!aZ9PCrFA+ z&@=aS;B2-4LxcG)d3Af3p4L#a+n}Q!=H&XFHZ?s2sgYojR_F{CLjlknM=(1PN*4J7 z4Js=AqO*{>$Vj ze+`SjJGx!IGpcL5yfgXU78HN6I%U%KUjs(ITmK}(e|39#U{caj{X764vfAbXfY^@y z87+SKNe^Vjq94CW3P=RPNy7m`oBtLzEKvjtg(?8zyaQ~)9wY2#sR}tE&sU3D0+cJR zj4CeE_^+nkUL!kA0$YiFE^e-nuc^&9KrmW7(Ub&16+8efrS)5QgB9)?`?u*&_;wS%_iB*{dqPvI2 z%1)@O`wMMfi#}hfNyVt6hTS*qoHbE*_Q0)wTH>Yg*YT8W@3dnADg?suV}D7ap6@wPMx7AvE;eKel(O;{cKWhBCJgr1>SQZ0Ta7L^{H(Ww3f zT7S(kgp3=V86|EEs0=UH5*?qQO~4oxh%kx$S@T_aYI9Z}4N7L7<`ushk6l03h>iFo zHl|*e@Pq0`zvp`E{3z%;7iePWC)moz3y-WIf!Y#pI|SpbNt$-e0$u=G!L%2!GC?fGD6bmUw9i}iyvJhhcpt(X<*M4DB5r<4^mF*p7v zKzy+rKu!hhK;8Sj4DY!t7~5yNE*Rg1B3wC@iN+7s1bq*7K*JEjhc_VlCgnU_m$B(n zqVyM@-`MP($5HdtrTox?Zgb6Jl+xwP@2nm_2~5vzMJ+aV}ViYEj)pT=CGc97qm ztNdcpgaxuckESc5{@M&l5CT)Z`JkscCNty%3k>(HHMO2C9tJ4IJedet>-2gQ8gMhA zjdvD&S$8Y0>MW?u{sKaqNV>2*|_>&bEa-a}$sn<5k1mt~WqxSyZTGlI<|RqzMD z8;()?82k-rJrrv*FxyT4vwL=6i)`F)rCYd3@b2(m{IehIuy47tz;gdw=3#;6v}fag z9$sb6k>{y*bhbARXXkBo|NX>j`ghdrX5OuyCT%nQ=pnA*- zR|0G~^bR|>XULzQhjMYt$P)qn%89@P99;gfrCz0-0~u{|sm zw&t=|SdNM&TKt~*PJ;FJ^#*-ml==G_hnpw+^MG~r$r;1k_1l{wsTx}c8GfeU9J>+F zsaoa)u26>%n1r$sy?27{8eeY+ve#{rrb{tS#}OsgrKGKk!7XU_>60z-Sk^RDEjBx6 zuu~!Bo1_A({|181HWcKZpyP4xw!>wSb04$1qHB6^STKNVx?S=v2XNwnxgp=4C)5~Y zj&6uyAlh?ktwK#w715Bp1R+o<43sndcHXzxJ7P!^vh!UCI3b&f$rZb@R#MioydFcb z$u`%}G804579Ek}yfQ_Gj(Tjqh4W{9ejrC%g%V>O9Zs{=pVFH~AOznVb}1(6vh$A{ zS5rO*r+vC)m_z2JdO4!=?55Q3j}y-xr;5y;zr>xAfbLp~$Gm8WKQnKPt6MzBt5ZUA zKy#8h=5xd~n)`M50ox(loymzIpC-t~rjAJOHbF;iy6;f5=XKDz9*Z)kgkKVum$zQ9 z^>YS??*-Lq*!tc2+@$T9Ue;*8G~n16i^(KTS)%Q1*9!EKVjd&CdXY#9$d|b9c0kMY0XvdQpf+6-Qx`Dr0h8yOTEShWBS?v;w%dv`xs{!Z#2tPax{^+Vj96(w*| z)K~-us#?mmZQh%xAO7o<0m5JoJ;=+4sM>60`5>$Ipa(gCnA5yk1jLF1{L8dkYhc`V zeiAzSt%$0`_a6+76H#iq4nkrG7jYzro&h*i+fmA)vmFz%@fCr30z*6RLqUt=nB#!n z33^cu@jnFbfEu;R-cKQr5BSKl;~%X9C@Ig+5o|7FIV_96kE%(wZ;?fwXD9ZatHiql zMWyzh$?tX#o?qY2*`Kbr*n4iaKfm5tetvyTs=H7>yg$Mz_@$ z$BJVYD-Yc7_%fS)BB@3e3X5$GiydwyUy1OHwhnT}#C(`@OnSPc1*|lMRRd)Y?fGI+ zP6zLvh33#XOc4W+m0Ti+d2p@}gcmlIhK8g844p7?jER*zc1}*5rOhV=oea5wrEwo< z`~OI(v)&NvX>RDidd>5{{8~jB|ImR4#sv%UuznOH`AlNuj<(w1y9zW5E<}|yAFx(R7fgF ztrry|t-^2CuwL0Q=B=ReQc{JqZ{nZxg54s{J3TDBwMGwUPWQ1`f0FjbC_E4-(&eN* zk_EQ{u_PgxXt2qT-4|4X@>p+id}g(qF2$p!N7>ZH09op zd7G1;V$naC32$C8E-5k1nZ+zZfD-y@$Aw$|Vwaiu$WoWf6r31l|Na_FvVFPjFWmGO zOmw~oFX>NC`7Ee%bIy?er%8oVIwzZK1(%bpF){b)3+PR`)@xaAc|(}`%!!O zy$HUbh$ar9+}Nj28?PGlm#|Z5;nS1aK5g3QLq@^-OJ2B6AE#fM*0-Kjgg78PumDuk zjf1Gl!h;_D`wC4f6Mm<~*?~C#9>jjw#n+7M>Uew;mIBT1=-#Kv!8&k>)2@rmz7j6C zS0T}PrA88;B0R3<9ghwx@U%vJc;#sK(CGtHefXfjO9;GCY+qj6XVp(?Jv}3T9$afb z{sZ7EPw2BhL5D%UG@{^a0J7fG*2M@3YDLR?STw9kh2{GSw z&e>YtqW!peV4UT*XuaRMwrT;05;tKLhUeBZfx#~OOIIXLt68{rRY&K3cW-X`eG`5B zqf6lovq6jgGys65^#sm8HD(vPCc3y49J5Nwwi5ra{s;TfHT35P=QQS{#FXvn`u^&`pOTrI>jL(HqZN5^-b7!OZ`Q&PijJW zfF}EJuu75iO@fx?ZNjkGb;2+Z%(lrV1Ak6^IR2I3;w|>7dS<;sB$e~%?@mhCuDJRH z5jz5L`)HkNQ1|-&eGU7&I5KY%T}K;!r7o}0a@BC+k4=7@?)FRoTpC?$28SrdE8y2a zT`n;k!v`lRq6SbUeXh=#XG6~DSS)_RZ^go5;NhOpyVVfLEDsg%!K7&r+u1|=T z;AMk_tt@2;$vSD@GS7Z*sT9 zd(t^{l(3U=Eor4)2x#)egMxNkVTScNN;)8v*jloN;P%$SU$Lk{GN*%kag0Z1%1|cw zrKMDW`bAcA&W*Ok_+3xMc2i`<_IDS{(Rfq-t?4R%)J$M>f5{h)BCCk=O~!l;1Fdbz zSHe7c3KzyOkmN|V+ZWIaZJnU)rv-bM6y4P`dE4JyU*V%En48;UjNFg)k^a=kp?#8G zKPJ6h$lL5l@Dp@c_iElz3V*!TlR#F%f0w;w{$RbgXK_|#j!2(;ijCWS_i@+Xn~<0E zNj9-~;$&A0NvGP*o63ohS87a}6E$PS3tWu?`dZmUpZk1Z*+kImq8AfJ#Ae*N)vB2L zfb|6%X5UEgEQ@N&gfa7j0})x}mpn)rb}>9E#9<~KW409V{bsu&W}@*EW+@=65Q(rE z*&!ad$geV-UJ<>oD`CJ%VH5%b{KUyrXoz+s4jhPU2m?ga(*F`sueOV_x4L2tJs;X_ z?`;wvSbL~%6Zu!JM@hCw+I}=<2L%Jfc%a~kTNdJ_R31u?Y@083IRQE^~sWftNQFK&zjQ4zq>~ zX)_&eJvcC6eeUDLREeS;FruJ0pbNP;zb9JhkB+~ z-_uWwZ2?Lnmfsnrf|U%NC38MT)9HIyeZ@cC;NZ9aa(ujrfWJ+M86ehkV-;3;^Xf$3 zph0&1vkD^jn&xu~FU0zY{>?gLe%#$^ji^(`C*5ui>CoW8o+=xoj|vo&f~upW3K+_Y zO@sADrHITO5i(f}+%XZwNrSl0II{3zy3n^kEQS7aXx3cB{EIOeW70VleF=1wS$hs! z3=MfCCXRgR&N;Yw72~(H;Fj16I^?vh2}{IS@3Wqphu9a!G^+3rmS^r`kA z3?#WicUc5V8x1K?@8xc^l#T{Qcfu+C>T^Ny#daRdpiKnXAF!1Y*3r>A+crm;*2*g& zsSy=>^zS}r6@0l(_I$Eg4=7|P_X{3~-I!zn>)_ae7&eU4ZG0X`bkV~YUojRWbw$MM z3Cn^AR_B>14Y48t`JTeI0GJ5#frIS4jD6T3n%UH1lfu~gp|itI`1X3wNydH;Y65}_ zZQ#i%@RlxUd^qL}^nCzlwp^!aSq3R)|M62ZJdn)W_0AsMuK?m;wdxL}iv~oqN2`)A z5Z-H`SCdRdVPoQ4#aTrgtc78lJ#D;}9hQLe=qu7YS;z0gw_IY&8 z0{?6)HlLQ*ME!$67Q7y%%#n~Cm?=eBJ(n=v+Kbn}bdy9as&!V};#pr6Jxr`WZm&S; z=~ka1O~*-&IG?c+G=I%3AL1YcMW2V!7?!bHqJ0Yif5Yhyv)!loY2we9xz}4jnSeiO z#4GNZOIr+|Sup@AS!@F6yMeUfR#3OW07Ye?Gg3IcLbISF4UeK6xzi$SpPx*XAN7o= zM%Z{vRuJ0$y6PK#3?4x@0r`++4?Mu}zY4Z}FE;I(;eq>pOE7rmSbX@@yCXr z&tswn3A1JB5LE)>_Nue&+!P3@Q}1G6x(3lShlRYxpSYC+t^E@U$C$POtZZrXLG;o# zNKib}LMyvR@)aZ?nISHEL{)Q-1s&q+$r(0GWPMyq&B_eMpFJKgp}l=@w|Tm>P2hLB z!Ybu=|EcxjG|QOfZjI# z_{ey&00&Mb)jkyxjVBN^E2kDEsoFRCBEkeVPxywFp)g}joJ&<)V;!o3@OmJH^_%CO z=aLI0nAo@LfH-#x)RWCUPXw%t+5cp?Y(DboCVdQW`|7CCM;nD1<@O3k-cXG&j&Ak{ zEQh?3QSVVrb{mtlEfil7p@;5h(FtKd0Q&c=rAZlK?17(|13pR&Bj3%u+dR2y%2OU= zi#U7A%36A24_cJq9feZjCIax)hd~x(ooLfDew6jD- zCZ{QJnN$Q(c<>1S5Px(;t)4lQs-&;p+lvs44h>io^h&QM=as}%UA6bX9ScSu)hO9K zXVgKJJNNfz<}|D>j;3BL{$4e7+1}n2EuA|3+eLT1bMNrtZiZ!twpEeW+#XSUA`JG* zoSb%xF!3MJ^Vl>laW*T=?f32H{aP3{Mq2_CSI#@B+uAF$Kke!eJzbiR@H;cyJy% z#R!s_^JskA&=kr?XgI!J#nmVbG{i;f9dAl?8wVT3^yR4|1%2g!eGAV_2aHJ|+t^o5 zJLe@fFQg7M1*W91;?;(Oc0ynw4hg11ViSru<>KU2!moj`B!JM$B&Dz#>b^43#8$#b z#8uhBLAtU<1D~+VH9v3B078yy>3>jf7ZkTHmlF55M3x)t!M&EPY+pYowN0;zU1obk zZ`5nuWcx^!lW?j6L{8E2hrsfXzd`bT#0l?h@1NsuCOBNCU0YR8?RmG*Y(DC_=9O7I zG-Z-}$hoFj4A0K9+>jb;y-wP^xcFhQ*fI6Z52$l5pw3VCP5!O((+eC}5|h3^&e8oy z`~vO-55#}ft(e(qv=N0iKv5bVTJuVPcRq6?AsZ;~P+dmg7G|G<)i7$jZx+xffDcl` z!v}B?S4}8az8r`X7-CySfXdQw7%>KAG^<4>{>l05u{buPezCnab0Z8s!v>3F<%QiU z^$&>48`C~dsr8OIFSHuNxEIV4)8NEosgbZw`LqhLXLXbKI`^_8C2de`#qU_G&NzCt zM#3$u?rZ+Dguc?wOE#^J!PKG^yP|irZ*htEAvhP`<=~9T~>QU0Dxo zpTe;e?ec8;jXMpxcQ4Uj0e!A^_}dVe2$x2ae~I>R6TfIA7g;)@==E0|2Yy}4E?*eG zxO%+rz&yLUetHD0bRy*70J~pRj7PwUqHv!oJB@||}P&p)nug$N^3cT1h{$GrBpHUb-cZurzxY4T?yl7*W9 z1+(EgWO&g}wy0vE$kx6sFM4LAxN4Q2@)dnf^C#ok?4A)H5t$z7UCJkE#o`+ksAcz~ zeNDO?DW1;LyJUTcUs}mGEy&^Yn{=ro$}%%QS!x%Vf?dnIhZzZyzb|4u?gre^bq_S% z)t&M2o-wyjtFp+3{Fa!e(Eu-TeV=i!aR1me@1Hu+RJ!0_&D*eKvA#f8TYPaBe3;^4 z(9`n+`|=(=+c)dt%FfH)-rN4Xw@D^=anRTi_9DJNKJsxg5_?%sjg(`=d&C0EB6{nH zDl|R9UTRU;0ygL!tHeK3BrnHWu|s{iBD%k>Tv@oGnI~!@iaFjWn9Vb%``a{# zEKd`_bp1wUQFdf|u zoZlwIW(eEsZQrm|v20u*Tl5W(-RJsrwS^{+1V?3AIr7CbeMcGm!6o$uPbAo z82H!i;3`&l!(eK4y6%}}aE%sIQCs;`E9@oHMZ*c}VRYcmzIks6vd@$b)q?}W5pD=E zA%JM2gGV_5W4{GV*%?u|9tLA4)R*wZpw==*c;QnsSita0MgbV=M9rGGXCKkH*#dOa z^V0U@YFs$#1vtDqM7=|blc>++1I(_iYpqR{ipnqx>*ef9dr|yIkB!oH<%4`&VsB5qTLTS4 z)_cL_ng$y1Qx)i1B|RaNe0?qc-AzHBr7GweXHV%P|)W4r}`|A>L;3^ zV21OTK6Y{-Uc+;J=4>g=UVGVyD$wNHkURxxg`3}|G30CZvFo=M;xs%@(Z*zXney;Z zI8?U%NkW4(76}c@>qf%mWIB4V$K=MbpOe*+guytb?yFVb<8rDcZj`V>+-!TS3?90ovMnN0d|g)p6DH8H^Z=s)R4fH^^+vJ#iX=pUkzP!Je6MvL9y~s81 z@i{I!PW?#>4lhJK^=MU$>=RiQX$Gw-*J}UOv9JbtU-j6xEGU*kE?PN!hT9?`I=QiG zOu017fmsfZMp%G>McK~gbebnFdDH!LE8y&wh(0Y&kaQ-k1x^;5J89)sBe^-S7G(F? z=6YVJ>R3aY`&Yi|aAx-_ndgF@K8@*^u78sV37I}eekARp0T)x`6@9Tn=|jEcrE%hV zm95@#`CVu!^ZQ2Nb$ULsCv0qMAd^}^#Xd&(juF1j69t7DlQ`g^P;Pjjl2JWP-}0(K z-*K7)&i(P{`@HhP^2pa`O@Faq?JZ-Ex$b(g@wakBAb=4Zx8uE#pl%SR?VC8Z^P%oA zYfcVRs_?q!8@3L^l8!@Jk-Wy$l=^d@IK3u#9NCOmtkkm;G-=0=YK_8>@x=#85K5{N z$tlI7Qw!U-9Lwon?Ro8VJu2-KY&j4{7)UPCmdTV&(58uLrz=Ei6VxGx z>`6Cj3JDE`nKg$%c!LYGYJWG%A=RJC#NM^f{d z{k%Fk4!guAKa9QTxmmlbJ!jd7{jEJ^%M|d*|Nd!Q!^D}`Id9(-Q`tECcExU4g~=Fu z;6cFlSf@9m3t-|a(_EN^mp?-BiAJV}YJw0t3)Y}Fq3Lub3|&xGTz&F}5YW5Zv;lR^ zp`t=~`jM{0Y6uKV0f*Qee}Ygok3ypfC9(g>ReLqF5IAX}d~mj@Ervqlr3w15hIOnr z&ORHQBYs1(u9+Ql`m5j<|9oRkIGZ~Lc)y_Vrquhe>(N##S-Vu;sEimf3l1xb&1)RC z#F6&6KI}B97Qz$zS5|$upiUkq#eh?sXWh;5`O|eD$?8Zsk9I%OaL?I-Na?DHb-3v& zZMr}_rnNY+R1?+<&0K@&<_W#q1!*skMxnJY$zue9G|->^I(aNDM&5B#@yp7uBs@RQ zC0qZ4Ky#vnic}3oBYS#t1QZ>g=5Zl#0Lj+dSrOzAgr*3mh3E4Fa!NXJ+c;m`hNW$B(r-KEfG!ksU z8T|;3O4pC+?BlLv6(#oXSNdE9f=8#9*eJQZEh&ueTHu;7P>7<64MhHE5wYm+o)pZ6 zOLnC1kiBVB9eSf&uVkg1D9A^(l*Gx+Uk%2I;xw{5{-GJ}Ug88*^FzmIdH4y+ERvEP z*moy1ypV)sN4<9-14FnIl{GG1HAucCUVbIKn*VSK-6aJh(PMimx|^$zY0^K)k|V#^ z-X@X7Er(&j4r&4j557f)a-sD6i^8ia3WCpWp?>u8X0p6Qa`bu9wD`GhW}lJ1YC8zG z&C3fa^)#v{BT+iCAviEOn}&XSrZ;CX+3o0VLJMv*kJCA(K1VTQ>I(R2h#0U5=LU`I z>`D3&YxXR*TU5^+0aUbxIck2#HL!;a;nxYla71KQ4)AWb)|mBJG~mXPZ@j zlJp<5gUB=_JQ25qOE$HTkE##hWF)-WV|;(`0oE@OFTksHTvOHkA|yS`2@5u> zP``1`8}c?O$~v1ROh}MT zjCxhUU9iBCZKbCtM^GtiyEJ?{UAoDo)*q0GL^!?GK9hdq)&S|L$ zk$kO=h}T5ZC^H8v;MUS~^DiYW5nnbzVeOZ73qZha=~L9r&g>Un8oVa? zw=)3|@5xHsM#Dbhlt4)loG!?(9Z8#*uqme%i9`|v@~PVaRqM3J=PLe2pKK=Y@^pT+ z|9gEOUd0Oi@9Vp+t-J@gzO&aFP6{@n4<#2C!b?rQH6!JaAVj8Q@h^NZB+okq6QQHh zJb;ag)6&y@h#NI#<^ShW|A+z09cNbpAvm?UNr3;&S5a~6bR1Z>L>Vo_qoYO-U*9;5 z?SlD*^V)~EJ!=*SJG4Po7x*4*qM7#{_&l{20t0?ly(EPE?4XJQ-KAX7 z^+N2FAWCUbnp;qehnOXj-P+9O1)FAXJL=_Cnsl)&W^FGSPnvOb!O2clfR2;_PnpVT z3^FRwJo57LU6i^}1!FjrR%ZC9F>CU-Zg_o+sDS|T2)FU|9( z1T47red5mrLS%W9l1Is7Mw^tiOAo%_;NT9LMnjn((FFB7z+DdJZe zlYqm(tLPJvvhf~>GF?cK3!M0hkerCHkMs>UKXTbDMOOGlDHw;;^7SXTdwD;fiR=u8fW{P>DDF;qyiSy_8rFvvRq|eff zZ#8W%+w5rHYoCGPK#08_hTX2q{_KfPmYa@MSX{8AGU33_!G_Ph71ObQB1cxIX9&c6 z4q=~2GbnxDTK{^hYap5-C| zNBFx7Vj!Q$~QyTr|5dt6c)yem-T?CGgw&=@o zVDhr|L<$RW*1g%wK)pKrTd!B2`2X_>3?*OmZEhhjMvC}4yl<3*G%caQ>?<%34LN%1 zvioERwigP0ZlDr|cM8bW6gmVHp$unx3X!nwp#BrmOQ+wY+9!=Y&VfBH`AZ5fn-3Q? z7-*d4WmY~VFMKFa_C{dCJgY}+y?lzkJ-INbpW8Nx;>0u`u#vD+|aKf*{bx?9Hj zXrKK3@_RkOH)@Pzx|4Yb8_^vQvGq_y9->4lBrGgb_2d4)mkFzbQr*EUi!kx=*k_he z;U4OhRu*_0!5LK$E>E%Sk1*EY$`}iXJp3!OBrCnCL86_%a^ynq7s&;oX1U>mIyD{%4L6{kz9e=r;;|^O& znRKba)qLxN@Hzd9cynB>T<6uG{52O@`EH#2b!R#2GiOw(L{v9XWL3}~YvEo&N>*KV zC+-b?)7WAUn4q0IUPOGK62M{%KmEKZBA`|Fcc#fu((g1XdC|1=$Daiu$WVwdWjCj; z#asLg+NeohIj8C!=aUaGuC;M4ouO}qa_JGnfvCgqHs)=XZBf4xY|~1%_50%+8j7?~ zW+7r0c_BfQ?x!jrrqMVK8Rs{nYGZ5H2fJz=BSMGWd`p~$zP{(3 zVWUsU$vHK7?osBXnptj@ZqobY4QP^GiK|RKPCpB#=B8IEVkcz4%@6h~)s#1<$T9J# zEZj7-rQrw#b;$0Qz)cA6^j)1sX;MrPcBLkyo(GR8>Ln*0O-6hRW(eH&QxDiYLI+zO zv;Oy|fZr9U);e$I?~`mUP7o$0L^KaX?|0vhZd&0xEOv$$Z#-wW)FAr|1QS}(ajBr3 z^e_ct*)_73(Kwa#&jf{>@Iq^o6IX4bTrCMJ5Vf8wI=BH?X-p7K_I|XHLY`s%av{OH zUlw@GzJA>kI<_%rorwY(=XCC-DpO2puM!D~FJVLRu!&YI+~-3xfLql6kkZLU}_-A@07 z)$a9sF~3W~;moEGf@4SX+oh)qcT0g#!`bB8k-pkH741B@C%fHXiN19{*HUC8e_4Vyb6L^srg=AuTVar#tE+!q_MSW*!c5 z>ri9-LY?gq73hD#)WwJeu^f3obuHP6hT|1 z5wbDuWz??{C&hDvH%hGjbq?hJqYD*wtk!X6^EbpVNu6F z(brq;siHVrw!x}tIctVd7+yYu5ZKJfg5`~ftN@%YnYvq-Q3P2eR*}`XXa5%9F)QBF|p-WuJGqDhYCv^^2D zg>e>Ym;J1PuBQpLP$b)@*xtt3#C_9=IvPJF6$Dc+ul^8t{TAo?Ok1Hs`e(rq z8MhU^@mH-n3kpJzW`QqsS4EJJP|QNkq{3!VuqHPq(Ps*0x&+d!lxe@9|KF2$z3(m;;u>oFbnKc(_8@ z1zb*;E=^81Q34;YVh+!OjZ4rleLiwh+!`#6T zzmiwHqmZGJvC>B{sfp4$kb(@}M9g3qHh$?<$n3jsm&2==u+}y8|9mW${`cEbnh_#V z96IYJ#oWpcskSi7lHKozXnLMTc_X}5#1@z#dXW7!R*2?zaao|Ns&;P|CONFp8)_Lh zlhrJ5$3U*SI~fdx%r3Kt?W8!K;Ix-pEF`q)U*D$6gNaZH+c!`2lW&e|1wzAN1F0$2 z19r3n9!x8@HxIGiGPjQo^_T*pycYvW4l~-Ky!p&Jx4rpRrngU~>-PoRV``srHKMCi zO+yQp<~C>G)VZjK@ES~Fx)LeqEox5vwVuOMDZw%%q8 z=&T-Zy>}x9G$)i6f8|s!ONU|rR6_K~uHK$1;=^lO)wg^R5gXQNMto|IwMdZn;b{W! z$9i}u;)4tg>GCk(!=$mZ0;9aExobEYRyQZ1}>nWaHsO zA;?B7?3t8xdb?9ykDuVw&-=Kn74pJRUaREhu($n98XdJA_%WpAebUbCva4RR$d_VI z&mS$Pb3NCF+g%9mJ*U-V<*s~IDcap4j_#%h`v}+($qJohQTaGL^y+Yf_uC!Zl#p)% z2nyS4YUTi0(ul})Th&F$0y+GWb;Q;9wYtpiZ*7M2VuM>@*LY?)mScP9_M<%oek38* zOX;Yq5d9e~fs`;SPV#(gx8WA)zE9dhB05=^I@(g+CaQptq za0)w%hsJ<$lXU6{Vpl!x_Qf36H!e+=&X;$su z5q;W5i6^g^JsQ57iSO5Du7kogh4dnEpLr-(0$e;>R^Omq*EPFQ0HHDuhe(Y+Q z>o)nMn6zY@QZ`U_-%s=Zb1@o{poW(Qi^s(Rk5jzi%Cra|V34i^npWs}`AF-9XMZoZ zzJwio){QhYJsgIE$gB(L^)YYo#8W*X9?w{SIs@0_ET?$gfP`jwF)bu{7YV}|&-mo$ z434zpBYEByRpR!;A931UJ}Wq5x$XA7LKEh%9~OVmq=>Ef%=U(#*0NY?1Z8 zntHJ#%XYKM`Q|0i-NcQ}Vsex?%bT=|@WdOxr}gL;fZ7N??WVBqT+$%iNsY+C6cVU( zDG4a;$qiP;2=gyj%s$ZB8g~%Cjwt^b(mo*QxQ4G`nwBthlem;gvS646quwsF^IX~z z*GMx-_$f`OqpmDl_pD$6%VAOhC??-x(!%WkEA6mq;q{MsE#tai4f4_FydRje7D zlObgl7!Kc4u5l}Vg#f<*;isD?5Q3rZBrB=l*~(UNni)}QTSCY;u}W1;Czf z-hhNZus}>{ev@}tGKQzUoKUr_#KRk(vc2j=8i|z?SF21|&Hlx2;b^zwQ-gf?cy}_P zmfew05_vYz0sl=;2$y&NJu+xo1SBMlakZy2GGT}$d^VR2vx>*Ze2w{f56gr50#H>Lcj`Iv|*`_#X-VEwO_N+BX$Hz-(5c%G6)Kp01^U1w|5mbtN2fZWB)BIIMo4*^>*<_#XcuG221w5lg0)zs_O+sC?{ z?2}K5tS}AQT1fm8Guw+v_*QOJjx`<*BYh4#662)xM*vPnQ{&BV+S2~nu=_i`(QjF> zKSZ+rdJ5qEuT+?P%2tM#dlS5jEekOUE;HIm$}XD7_$$T_Z*|!qwhL$?XA*+U*&m>C z#}asKo_m=e9&c9%9n)=3NxZ@YUlF-7B6DnzsU}q2X9IGIZDtyKsfoS!;!)X;BZ;8g zmAv*J-%8hR^8S_`8Z0i9LTy+*SCB4CHRcl6zJbt=u&;I&%;%ONpYlL9 zU`Lt9q7g^0D8<@Fk@TtZrtRt9fH+Xb`(?};oXA3JSyzxUVegk80da{*iAOp!X+nHV z)rFs}7czu+%*{f#K2!GgK^0f$Zc-v=X?+idW3*z4vtC3VgK-5MmSGO`=Q8?9x?ExYMa=con>MQ$xZ96NzZ-}WuI1v=Zn5LDXpnQJ zv4JR3!rPf=Xt;9iXI)35f*NQh_(^PWD6DSb@){tsQh90n$W)vAX?&iqkm&W~l1kUt zd0V9Xl?xTf&Zc8wk0z)s#8hhf7J(s+3k#n(26z9^DOk|N zd?p`As&XYKvoG)Y5$#a*Hh^Ix{=34nO$eO{SKzA;RhR^ir3JLlmIX$WfHMgtYFc(K zE}&RZ2?oPDfXyY(}}HgM#iuGmU`>g!j+8vE&) zOKFNi*ZF>CR8Sz5#d^N0NXXu7I%s}6lifRR@|ky6)0Au zSa2!sP~5$^ySr1|3GPyy;tnAsnf&|Ap4n^8O%}IV;d|w=H{x#erjl7He(6ddn- zJKu*`5`a2+6^KcIg=_rFi7c7huj$?5BS!xrZx8`2-$OGT7O1Xj=-<5;y*C82vxT@6Y}sD<36P#PE$IF3w9NF0|p1vZiG? ztX>E$mg*PfK>#Gcd@G#Wjq55Pf1CJAMzvf{kTx@Q?J4d?)wTBA%clKUBRk1e02L7K zY*>3WArbp&Rw=GXKh!2=Z3U}*jX$M3C*;@zfv-c=X7XRj5dwhDQ6hHRXRC8+$N;D& zqZi+i!7oOT#ZNt4KyW0%)nr#O9#?Ywe2d!V@ zPmyVrN{Rg|XjN`D(Z)%`&Wd9HcCSv46wR}3Ltaf)ZhBItVP73=g9=dT;Gi~q(E8O=rxyH6i$7fT~MSsd4jJCa^T0oZU2Y>#>(p@g3mGy6XA0E}Ob{Nam1d_oBb8vJ z->B-|-$<>`o~P9(SU8m8Aq;~48_;CEzB5vC51Rf{BUJSsdm6=+D>?5qIX1=e!4vdv zXjn0Fd6#pyh74n_Iq!iENhtjofq0?Lxqpz5M_&}g^27C(&l1bo2J?QRxrrcn10F7^ z%g!pyzs0{%L9}c^3qi0MN7Hz(vY17uuJo1seP3iyVgG!abV*)#wA`1z_{5z7J+gG? zPpZi;BqrV^o}M`dzu8zLS1#9HO3a+l{Q#flZ2(I^G~@+qR+3o0|Jtg1bN(YI z2?dPIkXih9FAu*G+un{WPt&yZN@=_Acnk0)6Z`p6%?z4{S(x>GzZi{h%ofWzIJ*Pt z+{TU`vw}vH7{80RYuUc+>3{6K^YkHU1lZp$qdY_gW%8Uau&TTIcxVup8WC4gIZL$^ zecp#7(1(T?mCO=9^4D#!qtGTYCHZiBihf9oX8Dxmr1~`M=9Zxs!58g8IP7zi$gS zCllhsAv5$>Mwu)pD6&X~o`=iU_wA7?+rT;caE5pXRx~|pgJ-9;4qw_#?MNetPp_kx zSYr}^*JFphS45GJJsXiS)S?hmK~N)U=th-Ia8R z41f9e=!}PpOX{-(%hFDizHgVukoqCasKxlvC*a^MN2FKv5N*U^k^!SgP6l zteG;YX1T<{D%1-t-Jg;SE(oH;O1+_`OWO|o4;RgpGLiJkf_14B-;ag7L>i@3PR!6+ zXLm-ywohGbN^WmW6GlQ%Su&9k9jsMc%M_uQw*RwN{38e|`Q?OyG`>GXlgrsjt&7Woqz6`E5 z1h_iG^!XVdiz+9+pWc<*M4jQ~K}k+QT}0|-XrhTcBq4VW?Tjk^Z4be0ALGZBk3grs zT;JRW*P0+_e1tB>_vKcFjf$Fi@7c?J3Kt2SZ;&0%i0dAa@e3bjy@J~%1k-Y@F+N47 z=w>!oz1Np$eC3zvk?})YEOsmgmIuv;hXSOFzlTT4%_^z*!~Mn5;@$v%m{4?+3Loe~16*IgkK``F}zK9rXDyDHg1l%15r%s1Sq@ z&yxAHwfLQd$O+19${F-2n>%)A7k9tqW7KYvxM;r7)cP+n7a2f|&#ZocVu6v_ z@g`I2QfOIt8jg$kpaNX57bEFk^t0e^dXd(GNc#?9Ih*>2s`WCyy19{ndRMcG8db8b zE#7~@;g|%TZ(5j58%QbMh3synn_r(PaUV>7B%QNo9zaYpRW{+3L#-P+5z`=Db43p8 zX`$ThDdehLQH(jEj!o>@OOZnwq7_lUou66l{ep(8IuP*(DF#dNPx*Uh_y9kZ|CN+4 zklNG+!}4-4X^~?*Z;5}8jOYU1veJjvfI6&k zE0e(T0LLQ>14qS)P-`>P`|+Va5~2d@5~<&R^}B5MV_jhB|0?J&O!!Jl%z`mKq=32j zVnN^i9cKVror32*$v(=F`RiWBciFQ&B_oOZ-uJFaeAqO@iw#<#3-ukCnjWN=anf~@s<-=TMQ{H#~cvv#hv5Gc_ zPbjmXbG%iM6AEA(WmSha@BZJn$_pkJ?FY;Ve1|ob-HUO&3g2YnTT2TM;cbc?N&J@K zD3)F3Hh4PXcPn)?W9>+{Ghc>iC;ozbis=OWVW;_Ler|t9fO2HoeZQmkZ}Pg{E+(ay zio@>=!PZda>n&hghzt?%t_Q`!4uur2y5gM?TsBySSBea{;yof###F@1VoRfAa|v!H zv|%KISOjEXAgV|x#9N0>+)=FC|5`mnyJ*1bzukPgXhDz^Eqpi2qt8w`TV%!7`J^Dx zwj$W~4Po1pp4_pUtma5qi#6d>bw)y^N27?(IO)5Dn4q^6q?8F^v~T~JX$>oO;68mE zcV&I>o@j}Z@_9$hNUz|P`jM{Qp!?R4o(q#fA^DCufdQ02{RK_KNd@lK!pm(?X8jp_ zK;FSp8wMDkFi)<^3{%XF#=YkO)o;%`R%S!qPy!;3YbZH1C9q*uf^Auyx)(W?A4je889MRRlfk;iJ{aXWBL}_cq%LMdZLlK2s z{3ggF55{56L6h9vo*x;IMoe53C>fAOC*3Pnc!+mJ&A1vz6+@r)xNk@)G9tMB@VWGP z8JKW({r0%{nUISRlDMmNKZV^V;6CNrkZ!UPNqaApM$B{P`=Gr1oRrXu|EjNfxg*{B zq&%9RTa|+HG?8T9$kptD6ZSYxlof3nWQJIxjBH_U?SvDC#Lc-OI~aV&rF?#E!lK5? zgiAjZhc@1?P$8~p=V{F`06sEt>+_94XZ>>m1(8z8;a2&|V6pr`Lg1Ub-~E+AWE|>M zIE-hZ$D;rF6J`@Q%v|fm<-F<>^$(2V;l2D|<3fG7_C~9c=g_;N?6#FUh%AVFlDyHz zv-I#*Mk{LJ&9rC596kJ}=5J0m&6y&|{|0O}$5zG5?SOL?M5h%&nQ1rbw#rgkbNDh3 z-k5KndSiam-zM?~@aV|Zj?y77R&rY@6IsTwXwf!kg%h%>km*HyK`g#n4Ts!u@w4HY zw*vru2r;Jyfr*89es|m=+9NX428{cq#J>`p8J>$#vtn>X*FGQX2k;%Qvy=~ikots2 zTqtQk3ZF6OgSB`KwYb<-PK%>cj}=ZIa4kRERL=i-A6zGcbxIH~d0UNpS;WUH(~8Vs zB0&=tRodC}7j5dGZBFf!C)7<{A<8hXc}^HPq+E>7t2bOdiH2?z3p?f5aGjJgr8ICU z@J*PmrHO=N2b7u&EpsAusE5^Uvm|juMcPtxV{FrgEOWcAs zhNmaxIocc+;I*pa>4E`IpzEv+r)tleAO(L%$ZuQ)-Q28++s@(u6b;FL0MlT+c_6(s z4|k_ynh01hf;1!KONcA?PjzZbTopNcU%J*~SyTC4O*>A*U@GKj*=Rz&rULkIylxO{ zV$Z>EgczS zNkjGNXhP+?d(}IHMbBO>7Y7n`o)rZuao>|q60t5493L6g7u2VhI7{c!yreg?uw>(Ts$Ax1K{$TQhlLc?L(7A_ztJI2eplkSjw-vtJ zxIByrG)uE)MI?t}897a;RFia3_{T03^~j`|NrZ`{DELmsOnNx%fX?vzWY#d|Uskwt z0f=-ZJ*k_iFpncX^}<~0mWzRj!aSscEe3sNXf=UNkU{C3r6M^pqSx>rmvjmTh*vqG z_qPZa!2;@`$>X)i6xr{1(w@OF$t6{k`$>69O3`~%6X;j^_{#NR5N zYNAP$a^@xTU$9kU-9cfXckeLb68dTT<<2}qbwMWGfBmMc{>vISkTo#>Q zf5YgTcNL)dhu{CjIkSK0eQq?qOE-r)Hr2fb7#)3XzSlUSC{yb@|If<2CP1B6*XY>o zwzh(M>7wN4Ttfi7_-~(s_F3RMbGF!*jO9;o0$L>qep-nsm?oSzU+6%ShqHw^FcU!r z1;0ZgF%>h@3(-a{?qxbC(;@G;Q6hFFzhJ#YV!9H}arqIubbJ`qy6mm@+t=aAki#1q zZ`gjnt|5q8)8)sgPOqG7>w(ac%#PR+8Ir{1AQ9#JnQ;=;4WI7iqoNpkGRg;&+drP5 zgt*b>e)J4(HoPXeVzZfY!YLfJ{(D+FLHEqXkG@)O;>a_CZKRX9{jBLMany#+cQpe- z72>jZSX5dqOqTx=PEjRF>(<%frm_Wo>C)xEtnbly47t4l!XKDUmFL3O`O|oV=W>x@09I%sM<)RFfuru%)om zb37VLTPw*=5n#~0F@L7atnuNZ^F6ul#i9;7N50O%594sV(_biV?WLH7e1JfAH~a1| z>)2(bX#~EupVK7_AbKP~?D(o~H$ubK_dgVUJrqUYAv%X`OSWjkkTrVNAJx&^t_pZZ z@Fjl_&mw}H9r8&CSHryN9UlY1ToF&bi#AH3d?~%oh`759v6)=dkR0~(K;he}1HlBV z3VF&>x9GtG2lR|vox+1;t`>t&bX|<*`f6Ua-io=WsZnErZpwHn=gDCafxIy|3jO6` zEe* z%|y9z5pz{s-r5WU7BK6QucXWma@YpH8WhP$g*}Nzu#;XTDumxt?W0(kY`$Y5*bhHc z$Py$K<|Qa9**=jZZ~qnkMVR&<={6&>v=5`X0uVkd5XT2??Q{RF@eJ+3V=PpW=&RMH zeCZ>LmvsDSQol&C&W<s1abS#a|>}%nFk+q z#?S@p!uOKV=U&1B=djo11#W&`wOqIa?j|-{=5Qi_i%x^I@uu;pU zepUk)doAqd!3`TOsx96e%JnWuxzZ}X{Nm)N=g~?kKjPKGrC5t2ne}Rj3xtLg^!${= zcX9=Gxj^avO>wsnC;6?xPUs=xQ(|Bk(1o)0Uc;U5;xoJM5%{_&zP1zkU|A=d2R)rWtJoz%Bv|&4ko2R>>`Q7kr$kpe+VYMZE^{!d0@*MZKEenjQyoiANi246OWgn5F zDc}EdItwB%NJ9qPQ!dO_391X*4vJxO0Z@P(i4|o(;sgR`;BcHIpP$i;?IXt0k1k9% zGG?HNm3aHXoH(6;y6tutLAh71bd4CxXt|Rbd`Q@K$0+{?J`mkNrX*D7xhogF-XC>> z#nX)X^?8y$LHE9hJl|PeKXc{{Cik6%js~#4Y~6X@KNX7i1dciekj3xZe9G;Fd=-I! zS7vXXIQPG;!KNIA_tthqB98+gyv9lU#;?;&iaGudSVE8^G7*>oGpvBX7_yIf^3s() zdl6q(aC}i$^h8ycf1GD5iEG1o9jA-u4YoZA7_}w6K3>6lOxK--&7^Z**QRry=dN&{ z=GB?r_ZV>-^WOBXa7#|?%iulrL`4Mjgx$crF$ricgP7w{O-#<2pgH4=Yjuy9b{HR4@0sj9%!e`b_3#@K?9c`Y^3Mhddh-Pd z8stsh@8Kr5YE)+qPaff7w-!r_RT@m1ThkM4K$;vOH0i(Qm3gENZc8TKm-s0{+Uixl z{H8IJltizI4jwk&+qLQzpunBBWZz3B6~ZkQQVUb5KnucuX_ye=sqK5=TXZ zhj1VDn}muJGM&7k|}&<%>O_1vFXnS<1>%t|jpE^E!Y3ZIM|Y5!if%^ugng_IP} zK!+1oHGu}v+2h_^wC+0GU>H$M0;=?ZZ@~%UDeLhS<05~uuG-{J zG2u7#xfBtJK%9}W<1mG9_gH+S^tsd@$ji^i%8F+Q5V>9(xUl?qv81Wd2lM4$Oy6G= zA<7_@Tu2u7y8uJD|L#hn`TR)8_TYLE{X}RQk0T9Z45L3E$2)=DGlhzZYVsmLa!JE_ zInL9t={$~kF!ZfWsRW=_5^fBfQ9_V>#4dJD997I&i=|5tAcuQ_KH-;F~- zeUrJ%6KTL~Kh&-)oV^S7uKLT1i;2dmiy7~9;q5lqEm#mo_m#rGrUB3wNY1dC2$RbgM*w#(B_xj=xdMSlHT^^ z$m7RHjvq_w2Q}EIiV_#EL_zD^*o$yx9^PD?zev&y(9oY<91@i?XTb430_OkZ^bnM5 zr2hiM38lPMf$PNXS}N|m;Bo4e9d&EyODV^hns+`ef6f3U#AFHk97`pCRNHDcba!)! z%kfHo`)f)}-zPq-x+1hh_;i`gi*HLEhA;R`5)Eu6oNKoZmo*kPSwsqk z=PjJN&r!vjuF}v9MhNFhJ@4cWuXg2)o`5?|UW*(}=3RTZA&34M4Qym8iwuJ_c&XZU#*jg1}+ zmm?Gi;YkbEV6h2n!yDCdAWsdwVXPKBYcXZXM1_+{Vh&T~tUk!bkoT{ibYrmfDEY7= zRIkX+mf_NMu*sY_H&Qj@g18hfmwPCA`LL&zUzN&Ldwu77CqDXH&ixVt|LDJ}$#`u) zST#wirN2&iPhE9kMWfcrEGFm;;TT087PxYiOuoYc>k1(4KoJlEcF_H%iuebKgMCq1QLg>*T*aomLT{6qg)Oe93W0{d;{pxpao_IS4`{rol&F2ECaEu? zh(0Z|()^1e6_lSstfPkp)#`Z#=*o<{Qa^BxD+$UGKK-SLNH*Wg^H#YRGRyNNVx_qV zCc^#M--!BMDn;Z>19#FnzBiIdaMVR`$E;%pq}KbZL(OeCdvyEH=IG9!YWYKf`?0DF z&|lV!#{@Vbuuwu-;V*2&ZFJ7tkcj*9YhT$H+@83|q`dciZC`Nvn3Kdji-k+Ywy6?b zcDkP`)b58C8GUGPe~V>obEEMH|1#4l^Uv7udUMpn_3qA?u!ooQ&X_D}tuE$IwXVF` zQPpk+Xx(}Wf7;6r%QDikk6S1AsUc&*Uc}NPFQ%iX%*^zJjg6-JO+%6H`=8zzI@6O9 z%Y}9aY}2yq!IVSOW5b2qRa_P(EDR_zCJU7xSB;8;)-RJR=+b0=Mb|4eca6P;d%|zg zm+lLZF)G(UOH+l&fL#Q*M3ADqMk#8jbI3;MUaMGIysUf}{ZvO;Tay~j?@lr>)$4q@ zYW}!?|7yd3u4Plr3jf%(P67WPeJ=ZlmC0V;=Jt=Aqs9|p97+uu5k|F~epH$h5=;HG zjvTOo(6+8a$CPvXp8W6h{p`a#5{}A-)h-y@QCHTj3)hi`czxok;Cqh#K@^I#N8=@P|9+3Alji1WC%WC|N^-vMQgScEgZ`gJgPLgIL0mjt z!(EetZv)Ssu_rn=r@Q1ircwPZXhsiYC_f}}M`@PpO*~haQT735$GG-{Q25>RZ9|<% z{E6;&nfo)NcUun@4V>nORduwdhv|3^drEq19Xy4?qVcP$nr-`-5qLHELO=Um1pO1w zzsqbK%CdQaKC*d3u-UHWD3lPT8_fQ24aHg+jo^6~lL2|*s_e5Qr|fqssVccTt*lP1GuZPf7Ji6}De3ys zBC7S>I&4Af>z~(1%HC`whW+48t(Ex{v_9P@(KZ@J>p#w~eQ+r^^{kyTnr@<^L&bj} zU}9v7sAIKM$X`MT!qiLV)hGc;1!u zNwwuc@I?clN=UD-D`+okb?=xq5FEf{-rFw(-5?0fUhLIQRGUKwvXN}vj(61wara}7 z!OiWHgJgP^WB=U_NKDB8iP-IB8~6J4$vlM*0MF_a*%bH6ldYf~#r%~~A z-6jKb<)MDbbKLKTL2)j@K*#+&7>J#nc?VXLb(7x=T4itB{YL}EqH+Wy3vE5Jioo`F z^6%9Aqi4h+RJpLVyjf6s+y1W|C^9E>_sSEb=CRwdaW#C_wi7Sl5P;^mb;Gs&ayEDZ zeF~r5zZ`w{9zyVXjd=oFwIzLY!;QIIL?!r+A$>p*!qv5+!1hnztwu;Gz5ju4v0U{~ zr49O+K{tjhw=C0_YKbo{*b_Xb>J61Tb{`crCp9wk!ln*C>cLg>kX)Lm&2Iz^Wlm&| zgmv%O2QM`>ZZxr%qKK7pw*k?F+O9s;&TzlQk{#)Z$VH81U?vVMyCgpo#dZOX0viJi z%O!wCmihXN9~L>_KInzY^tct*3idd4_1L?zwOO?RzBKfe#cV*Romjs676UwA4jxu9 z4rv6E>`f|&IXr*rv)Z9`O?euEyp+J^_j3*x49+xknf1E}N1xM1FtC7*j&cpP=fd@bW#jm>A*g6- zUoZS*ydSr&XjCn;h(l#_R0yl8vl#pMXRj+-tS#&M-bzSk$Rl1Ca~T6QNVoX_mSV;wr>1%7jSnE zeAIsTNi|j2dA+&7t#rI6cC-)1p_(TWzCc3tv)Z%UHQVbPMIK~+Z^P5#0<*dl_`7y& z39}g84^T5Jd%aj6?Xzvoc~UcL+8Wt6AdhE#NO$bcob(c9k4azgJMui<`M_5@(`T^h z2+L{M$o_N@Fz^4&3yb@F)#4zX>_4;UFP23ez$$Z^+h%mMqhqk$w${uD%)AgTqda!3 z;Dlq}9Xq z*hYSf`Um6G01`U_-vp+*Ey~itXUG4T7Z7iz_^jgv*|C>05&6pC@Hco{6kOb4$TiJu zWuPi}+HR70-xX|w6kM5il2ZyEdyIZmg+sm2!zZH6jU!ew$3I5)y)gkVI}zaZ?T7%- zP9Q2o_^$@D_su#weYpAQfKSP#unBc_$A>8a_N@U`?)L9ULs z^ILkNBb#C=5x8DU9&V$hsGMWiBYg|+nl_$CNeI^Zw2H4 z|5B-_KlA<#CRtwIxFtsh4w(8XYoF?0a9UqmQ&(~pd9ar6&+uZC)%qNH!au&l z@YA2}k@0r(b@3ehJ3zSP`|N}(j9|{b5fWZV^Ble2I$G0sCGS$RXn!l*q2kXdP6$!R z&7*ma{6Xz|9b{}{mV3!l-jCt92 z=;3)P@U+(w;4X9G>c>@cd2r1LIkUy@LK;R=nm_WviDTA9A|5daOqc6={nHA}mn+Wl*f)TWAKe&FAM~o6YMyOGme{lnxbzSG{=F#w$G`Dk;MQ?? zED)jn?j)C;;;lw=mlqM`eR>%v>;wQsO69rp~aV z-EuqeB;Ge3?uIebNh=pQ5;`YZ?dB7mO6z@QYj0ZUThCWLhFu^cNFS-(QseECa1VLn zAcZlaUae(mkbG2Px5oFY==|({df{k{TCq|p0bz19PkWf zVDr9ZUSedsia?hH+IW5Pe;CUp3+9sl?kqk!_qZdk@*53vT4n)!32D8AeFY`ogDP~O z9D02vwq`rsyorA4;q4lKNJ04%crBFB!IfaMK@G}x%>BjZ1tM^x3(*) z3)=T{y{uDShew9dPI(ZSzx|Flrw$yp-*(?! zF38cgbw8IgMP-CST2r0lB6UfT`MYQo0q=NlP2C5N9VK%B)yp9snZ@5U5yF}ERXyf!PyQ<*c^kcY!?+38| z?xwpNrN~k}R!G?kuE*tMtynOefwpJ688LF*G3aKw7`67*cw}Y@Pd_Fj!VRiw3lgzh zvEBo9*whcU(cuutjZApf>=&^Y;YoQA=I!oQ2PF1naaT^nXCK>huQcm=HQixA`g`Zw z_^l5XYmeGp6muG<9(F#6xmJE4rzd1Xp27i*ynyS z&{@cl6d*aH_LCn(vIKSa>`)Qzrn~r=>g^bDak=tV^y7}f<3pl1Tq7VO=?^VzRqQYH zZ)8`l=+P{1q)NedrrN>TDt6GFV|96rp2y9R0kVL0$>xesrttnk)6Vj{+nCNQyZBL5-8I;r>Ta zIy?XazF5^P+q9&)mN_@Pg_{dk?*|aEwm#^Y8#mA3?J@?69rra zOGVbn)SA1T?qwdda=j-Ywpy@Krb%rCS-Tn;E=k?I&Tf6IT z`#eiuigS6G#pyTl@W9!9BH{Myg<2brikuP8SiK(}I-BQ!yLYD@J#T?yJg>@NitA<( zApB2Q=*hRag~l9jsr%yd0I7d+A{-C74j#!TN}jb_>8XB&ko4;eJD+BVil@Zm4kPfj z$KK)3mcPIlkIV+nlLCmt3+Kwgl^#mF0o@HCfN~u6MM3FSUU55m`zC*Ge?0Eey2tHk z?TdyoTtL1)LOP64H#r{NJ08U+AQ06!P|fC*BO2{tn;b-HLf`&INszIiSPMgH0-6^E zi99L$ghH$hmAUOp*cq*Z^u~z$Js6z4wvBRmJhL6h9TvEJ<(pUlsAo2l-wm42-t~vq ze{{ujC*d44_Aci>Z}Mb*WKnAwUi~^$uO^_r@y%LfDW)y+5gyyMpAB&3RVVnCUn;V6 zQUbRsFMkx$fyDf-ZWi+XbXv$z=sql?O;BD@uhw%5H}Or+M|yMcg#~iB0jqRo1eb_o zbT)kdZ?V0z_zuHuYMuFX>}0w+zVu<)9Fn$9&l_FccGy%-`an+1ciOmZb(SZsvHGT% z`uY3AJPolBb+k$kZv3ICo8ISOLO9ef1{)Q%fQn4TRgTU*sraq=H;WNyEbM9M=Ivdp zgVD;`(&!x4MPw{4hHSK8QVTfe!d_^xUo_@bnKNg`+@U~NV*!fyxjJk?&9U0=symvV zqFNUtUOc?CCCf?MD?fXnMNvi#dZ=vS-V#>b@$oALR`F+l;P)psW)_2rW3HVyB&y;4=Jpymm1{88;vF>F}~~aZf?w<58M;C zA&lD4^p7%bS%JoK?Agh1COakI-4U(NLf7;4o+MdOYGuILLHDWeRbFyHs9Rq8)#o$7 z_m*R4(Y#`y_{`OSSGSeVYlhX%>g|lhTeL}`iV5#L@bk25=gH2yQD!$LIpC(obHvlt7IZIr*!c+jb3r^xZE9|-&0TF7?99F@a6 zjWcs6Mo6x$sP;SLu`ogZs#hEcwgT}$C=#GP^t@{Q2a|EA1;bg=c>E%2P_dJJeqeS? z!Rb{-N*yn?ncDbH(X-9n;Z@>mtXH2Gtl7K!z-zVP0g1z5LVEEN$jB>h1>E=@qH{1D zkPNM<@8Mkc>;O+M3V@#lHXj7Ow7MD&O3@$ekS+AScvz-@JmlKWE?)DBX~Zu2=+o~9 z!-hQ-MXitkM_0i`y}idbwE-ayqFpe1d#Gb$`khyb!D+N@j;F)_iQvkXPyrT|2Ca9I zaQxi^c{uHE3TDGiU`ZA|9FoU3+ z%-V7{kx^Z%@u!hQY|I|#?`mI#czpQfxO&E@QU$L@O#|gLT-UU+F?r8o>!kH% z)yJ0Pi;EI}VdZyol`op<_R2}0&gNAUysHUl$r;I6H%21{WJQh@eqQo70>9gI+nIwq zMRWV8N~3ZZG97rePN#{5<Pf1D#MjxlJKsm zxOe)lzLejPzwe|wQm z;3BShd~p7>7j-b$pJF7`hT|Nq*kBI(#(StNxAMDli=G*A)I>ssVH4`}et!dDJSXOMjpYm2= z4ThIlh{#oNQrH_-9@VIiNJhJ zwbaaSsknt;)jm5GBe5&o_pus!DIJI^^6;6fXqL~8yRlUdUF`gkEFSAw?~^wk-5>lJkpi8{6jX#5JztA5$5%*ofQ09& z{p$92AyNy(EC7)(uFqJSCkzgLj)vIrNJ-7r818?vY{4-Pq_# z_-dQn#?OSgDE9gn@k-)?3VU(;<_+LJu<^|(0@X*<6X!|v)J+#@URK09LggY#E|uay zT^=B-0GCS$uYkxc#v&5}gHQEaz%iwSO60MQP%zJ9=K;Ce#+m!rMvoEQ60WZ1ojE+F zbG`0BYy^9OSN$&Ae1<}~X*U-MP;$GQ&B^kPXcF~jq_0eR)I`dI7K=cuo;Do|k42Q1 zLz9<&YeBqY(e3cWOM$xfdyub2F4O2LTk4Z^ZN!Fkg$Q2VG_x_|aBEb|=E|a6?boV- za?Ms<(lzj3Zjs!JyKb^VLXB$c3hPqqW+v_Xs1t6yL3uJ&@3Ip$`}I9tsKDR;4QS{# zZusAauJ1DOeGnqsB8ENnpUo&1p@8NpbLY@XM(t<~P?0U#?7jQ4U}KoE4N7H2H*rA* zvK4W)P5V)(HJ#uQ(^-QNo-|*}l0Bt8S73Dzwao}?O=6%*b+Gli@P!Qhne%qD9`aG# zf^_VPM7|FBug3i2xu;R_{(bQd+`%qRDH}6B{uBp!FKzzq=g0WDp6OT&CcMvhmUzK< z39$q!cq$~*P7#pv&5>?)HCHB0-mgRa){I#q!>=ETA`8)RVg(vOXvt-VeBWa6VtZl^ zT}>u;861awn#gTH(H_}`ik_!5kP;;|ao%mnFB<%(Vz&My z*!Wt3ZA{d|c1l;6ZegV%Kj&I#C37!PLQNvd@?Z5ak1kA!0RJojpq~S4O8jLT0N;xG zS|J#nTOcf*7`2hc>(utABbsurNwn!lVuIX%PC!wBGlF+5UNQR+$Kmf*iEnV1m@YKC zteM@T76D+uEX9Y=67y}fd19)>vOvF)$wp`yE_Q=c$%sqs5^uluNdZenX4Ndxof{Cm ztN(G$P7JHo%Vm@vDy>-k@9LM?a~|-+IEpIGRM61AnlJKU2~&Kd!KL{u*wXzwL`2sU z{plywXWS;nIH{GHw;2hnG(wF=J@+elvbzc6<$hM2dtt@-kuU`0D_xJb3mRL6JG5Ur2`;O_4o5byF z1liXTF()#YvcK!XUo)<~MfmeXqO}+g_W3+Mg&m!!bFDn+fB6<4@`Rhn8oL0#HQUK_ zHZ!8$zYt0}gw$V&Jf4BKLg7a6>ysH%RW)?8E1m7kBZHu7rKV3%%cs$4w=Rba(Eg0{F~*_1z%0J&K8XAHJyrx$EV-4*8$DgFfb)IapEE88+@0&UmvLU z=c?`EyZ(~m<~nK-cHT!y?Hh;X2%dghYg`3BItD^2!Mlz7lAQ0wx+sY0nY=kUQl=$} zW1`QlLhWHL`=*cISJ?yN3#WJJkD<$PBz0xcuQS=BK5;8Px0`n5@|#W<7EiL2oIiIS z9i!GAAHTqa13V~K#)RSntX6AXDGe{WCTZGeYKM35mT=W3cE)Y)xM<6>$S0?>)Bd0@ zQa&t+w%t3j{xqS|WC;6fYU3fhe+a`avj2b3+D^i9XKFso#Tnat_gXkyTdv{imNN@K zm_~`{u1ZCJFlTHzEo3m*Q`HMVFYN#yb>9bBwKE2- zlY_YGh?XKxWvgU2m;cf_?2AP`=dw#es_zHco%%v2bqiC zb-TDu=7ccjf*@O-|5o~BbA0P*YU6Ygl-Xt8&u?3k=YFFyqEEa2&-d#iRA_Q%-_6iN z;BDDbNZlutSa>s|FhSl+7BS*}@=!W~I2UPv4JPG*iSsv#10nl zk(VzaiO1u$LEgb$pqw|GJ#__K1;k&;uumSUx~{`^u+bjxFR1sI)*L`<`g?RC;a>{{(=emNl7s%FTYSX!K|b~kaz&cbuX zngxR>OtVr6+P{riN>cDi5Hg&?1vzv-Fp1~RIN=5`JAY55`_c&DsV$w<-*j0c8LXxf zp(s*saPf)roOEHpxSoyr)bTp5N7SY(#jW7e==Wil+JV0DCVyQp=l-rEgnTfUB2c*Z zm5*?SwD`{&HHVaXanHh{AwU+hp-a73N{qzB3W{+4uB*tTH^|1dAw+^boXZ}k)pB|y zO_?2e(QeG@)D3a5)YNYe{y(#atsUJ`kmtRmHd8Tj4eTT`OIK4@pXSh!sN)rF%lu`# zt`!?Non8z4-=h4rbU$$LrOgg30~X>S?ko0Jsa3u!)Kcrr4g?=zD5LU+5GyxLOtQLq z#h^}R0WA>U3g3LWQFGsWYRImMn495)nt+)`TLFA1LRVo_`1GRed)@Yjk0ufcky#wi zl;b96x*mxf4O*sIw=y|Q|2|r&T3@k<8%!0O*c|0+yXt-Nuj&!qPB!m03U37KR^ZMK zQiCZIGHN<=6Sl*P05*8T8#XtAV zm~T!wcjJ(mAKzRa@3WCxqR`t}QuAL3Mc;B$V){Xog%#`R*xnd1>agI}DP00r<2Q~~ z)e@_|%)DH>XMRsUZhDa1H*?c`f)+qe1o{#!N2_)MMVgK)x%lYFoXro+p9c{qq-%n} zUwe3NvoI5PveT{9M2mRTee@@U_hxzi#0i4=hwd-PJa*Ra1mitY4@FKh4DHfxn6ySS z?k$g^~0@vFNY^O|+7h}x*{yx6bwT~GQ(wog>r;b4nKb9W! zNpVMaiUzZOh)g|qJ+`@b8?Nks-sUr=DP*=F^o~=MyI2lcZKfN7Y`g@=rgu8(iYu(0 z7wMMWd5k(A_NkB@?EJ2{y_yO=>51!;eC%HC1j3xbyZWzG9h*CsxMFtN5;gtK{_BG- zz-^fQ&(|4yh_n})&z~E|ievu<_J`pXRgt#;lXjAh3?SK(xESbY`W*0xg;t7Yjb@8x zPpGXWF^#>{%uCosY3SO|GqKn*e?5V{K)5Mn`lCM`p1j7jjFr|wn;EPimoC8v=e1XJ z?G0uY@U>k-;JlTgZK=`~gy74}7FMyw^#7vkEra6hwzk_~!7W&5++70%hv328-9k6P zgL{GncXw@Ef_s9yYa=1Jgzu0o(NsYd1^4Q;=3Lht!#wjh-GW+k z;P@jX-D4DCPIbGmbK4t{8xy$3N+x=jLXd5suFFNW;jZ}Gulf~&E}1r4TLI{DBPcTz zOq}mMiazg!lN+^E^e(dpm>j7Q8hC`advT=D`ZN+n9uT!CCbQG3TcI7w%t+3V6_#0N zVc*Z2LIN;&xs`tpX+Ax^eIny}xcUH1fBQ1L(vnbFq@P=jNt;fY?m!bS+9u1>H`xGe zZre43C8~8#P95V7%eBh8h;VaQ8X~A;;SxBWVH7Tv86Lz_!-i>_E;J<`r+MHkGBM}t zlFg$hIdx#;`dx;%-jm$W`5Ry}cJt#*wcNhC`|JKfni@S2EfOsgt?EEl?m(8=_8kEI z%yc7j7mBdm%1F7^cSGcp;$_2}7PUxcVlHYjg9McPsUbhqJ-!Y z7aG*7u`3h}ffimVTQRc&_;IntY{{5h`!3)EUaZ*hZ9*6%J1aKVJ_K@Q2(+v(k=g-J zjjpePj1#{&>^b=~n=2kOe2wKUJi4U$uI2)Z)0qP%(z6x->T5 zOlP&pywV`UbEy+{y5tW>mjFV8`e~T-zY;&}>9a-tlDj!A{_}6%l*0oV&AdTQLEDqm$nIJ`0$n zmeRYYP1{GmvB4P#=~s@X)UuJP|2rcpHEP0WM*hs6uRGf*J2Qz8QC-mxFaF20{Z`l5>}#xC02)7K5|_8UoC zyy9HGIi&N{4|U|?F8Y3X@zo}5J;UBDltHLDWBb~pMcgqjkhKSM)J0|DZm+9L0sSCs zC0lt7*xDD9cKV{AF$WO^8CdxAFPd|iKgcZd&n{NPHl7us#_I4~b{8~N9PwNw!aGTVM%Cm`u zsP^-6k4S8fJZFbyjJL!3I z<$CS6m&3dN{GYnA(zmBidVd(r5t?o` zyCQBlO?x{?CbtU-puqWF>^c221mHe}TZ3AQB;&%Y^w^lHfO--iAt3?Ni&VeZA{{9n zAH)K7{`A$@S!GFUi0=zzC?!*x-;E*}RlBHjee5~rli6`MFLg|!{nGRdQfb>=aT`F+ z1tDY*KvzUtdJDu1hMDX73gG&DrAuL8;Mgj1PGHB->B8]^h!?GRf{BxD^ zV4K4~L<4JQxhJS>Xv12M;}2p;Xv{i3+ejh{6Pu)^YZ=x zFJZFeDYzaB!Cz)DT0I5`4eJ*DCc&?paf`7ixEl9UKjQg##Zo`@mC|tz=cdsa_^t;qRh-|G4CZTXt~s0kP=l&NtS(zX~!7_qG}ki2d6d1?)v{aH+53TD^0~bqT*UjbxZF>j4>^)1pbK-r8_fg7IV+T z{q!!4WJ0j(HE>+YzBvY=0#p*Zv#xDu#tfB-Fsju-Z^6OearNSlX~MA{O>ZNfsI71P zwyZy8%tl$+l#qsJ z>j326on>^w6+Aooqk=w^PWPF+b1AQ%JIQ@ljW#pyCO3`0HQZR?jJ@{6PLDQ7z=!aU zhkesrqCB&Z_|_8)l%E1cIN>_a&D@k!0<3Y|3wdXzy(5@D9q^OR3$JW5y1+Qo)E;>P zpgTo+TpMKw(<5&8wU=)$lDKpl-!Gp{jdf=X!4?BsE7wmslw5BNXde8{Td2jqy0*$83MA2ElrX)0o_(E*R!*lSryYs_{-q(I zu=ZjnmN|LF(!fjv`M3b^UJfuci)?*$)KEA6fHqvU3If~hZTzl1!Y|YrdBUi-v5b{Ud)O}^zO0B1v+{WNMVi*= zOg%@xMp&`j9~}aFSE1YNeNfjB5{&$XN055ej3C^9V648>lWF{MqGbJ1QPnTi(`Wc3gFoF%rzr-#!c9&0eSe&#>pxn7CPl z#JmMrZmPp6SU6%`g(L8=Y_I`L(+9}hN_hyY`h*ihaaQsCmX2aR+ z-I4Ulw;bY=TY?8c`!>pk08u?`MY+3iiPfmH?rFHF_SS%51rLUKl4&Bf)O%0PD1Q-R|zIKx=g}N4V`!} z7uXf%UMTx!8>Gv54Gthz36^CLk$`@Fe!0ji`b~Rv9upTY5^S`!w3wW+WaH2@y71K8 z&{ItgBOWM}0-D)lwG_Plj8VU2H8cYg?Aq<3BWR*)BVW_XPf-98ptkMs4w>xM$GC<( z$~EZpB|mv&O;zu60#K;H!Q_0ZqMR zA>+8Wnx@|cxGZ)DGx$n9&;DzhPnCaf(|Tfu0{${J-O=kwFVxBVs?20Xnkz`tsdm=} zCxzKR6$%WBjGwaDN;991<~dTA2B(IZ3OniDHCFmJ_&Z$7JSUS`t`Ld2zb=8lbn^|Z z=j3QP)R8#lxm$Ios3AP6=}VD9H*(lmTykG)a8RGbQ#}o~l@@}v{t#l}l+54C=n4Md z6FQxnSmvXUyOopuHfAFU**N>{sZKrcetL{k^-pI6cQkd3X{y>-y9I)lVyjg!s4+GSu?obq740!aCSfrjGJ z;`=PefR{kT;Tr-ZTf`wLM?$5zb1tzld{r1wzHNYAeow2RES%7yXc%6u*kJbzKDf$RQATK1EW@A>)-A^UbXcAsi%%-2*K$NhlCOOJ>B2}<5v z3_YXgxjOQ2Hvii}V} z+7b^xU87B>pO-cOa^aNeG@NU8GvR+<=>QVOF3uk?)?BOec0oJ!FgIB(dbPiO@8P5Z@^O@5efUrwtjU_cBSiS<^8ph+E&iw_ zZt|8+vWRf)esfO)LU^it(ff-KhzilnvKbLuqN`3Lad3(z7S4Qx9r^~u(mhw#J7NYh z)#85@KzE`U9}V07WboDfN_;PiUd^t#CGmyIXbRw-Qp8hw-xg_;@Ntt#SUrb&iRpAi z2yMX&`yODo0<6KijU4>P$FnV(XRfXNw|=Ca{)d$iN6xUbt4eCzeJbnpj`w7jr&D&?BT=U(MD_D0ZXKyCMErWtpdKppI6xuvxl2tmB0$_hTzR(Yg zXfVkQr;dB%)jz^Xy2rV)4`*7vqZ2+rIpHv+pBmIW;V@wIAcd!LYQ(GjgW~sawLq*0 zV)K6mofr~e4Y$a{p+HKN`|EIK zri;>aCt+%;sxzRQxBbX9Dd;bC555wTaE;4}-yCp^+YC)^15yJ@fi2UqQ;_3(Y`^X* zr_RR5|L-xKQN*S=YfZvPDiW)upLcWhF?QKS=n4+rw zVxqD?Qk^mNL{QF^eo5K|;7um=

cCcybCdd}g8p6OFxFDB!k(CW~}p#2rh{dYok*huRakNk%o8a3>q@)5^=%M1`b>p z6FcFe-}0kV-4A$}nrTfZOE_eq%`H+b^67G?X?Gz3_h?TQ{Q9sDV)Kf+=2mAU@m`k` zUJVo2e|0*@oXY}zMT_o+PZ?x+uHp+CjNKFQBDf>sBYG9Wxeb?h!;Bk|l?{K#%l}|J zZL$$-`6ROHJLcnX$gPmt^eK14m{{FFq2%)S>D7~ETL*rvj*(F*YOby)exH#yme|l= zziGpBLofgA&2iJWXA4668AQ?oIhYuDx%IVZV;k|J_p$y8`8L(3W|-5jXKz1er@c7! zu1B_8q>G`RIE<0AE0q>~BvNpTgrTpX%?S+B) zOZiUA;ebArF+CXj{Twy`x!+UQs`O*cXN zeYk2quFl2>TQ2Z5p&5~*FDVkpu>^TBY)x)m%W6ea;FW!nO@F=X^IJ@kWSb0F?VeZO z!r~3tUq1r?!w0Mrq33MH2*Al$~j`6zgL#Ox>@4a z0klJEpMJuabNF9%jQ*( z4z}d*NTN)TKNYSMHJ)1%^fL~NkLX2KEB8~@W2CZpF?u8NA--XO`KwmK%OB6}3iUD( zT<=L=RdlZYN?s!Cnj=)N{5sG*OSJs@53Od8?;GLf!=SttaCpyMQ(8B;@-CwCJ@0&c zeE6aXT+7YJCDBjn?PnT9y2#$Jf+*dq?%7;NpS+h!j=eH!f|P2NOdCt5vlryGf)_qi z(jE@oZ6A$HN@5p|2VHISI!^iN{MXm^!c6b(n)cJLDd!*oX35n~He(@isw?(%)3dI9 zZ(wBt*-0D0#DzS?Ns`eoVw{zIbHTuVl(3h}q{1a--U5d1eF)0TwU61i>~XC}@|l}4 zinnM@pygb~r`g`9&v}Tw>xpIrSfY`sR%R<0DNNe-tR{t>3$Nktdz(A#5-=-fUO`!Q zC;J2duvw~(Tb+T7k{Uk*N$*ZM=d1hs1dJ;DQkJ^Gwt2Kg?PaF#hV+ZfP=WWM0k2ZQ zT>KhW*1H9AP+cm9`(m82N`|w41qX09{X4q|G&mS2@88euBSI-e{nk3`E=&F%?TE&; zJc>7|o&?59D^b~0lQM}2RH@w#9%=fo`m3%4z^~npufcKQ6sU`3e%lv^qi;qF9q)>wf?OQ#R6Jo9yn*9d^u{XcgdJiIj%pL)c(4;3#(;#WR3(C zVk#0Bdm|QUxtb&+flPGUyYIc`C}GVvR=X}7rr%eeQ@jGg*Oqo$P2C?UAB%o3xjgSf z_W(s-szIIYKDXN>c<@xUS1v+MU-#FL_i*>;`P#MUMyF>GSfUlFym062;pO482V2AH z1M0;i5QG+1@WtvYerrIsYXjU!aLeI)*(mrT+cts=KUtM)YeY)3YZ+&}S*KZ#?Eva| zv6)x0=Y@hR#7wEw9Ey!n{pwS|mp1LpwiDSOO!PL%my^;$9= zO5Yo0u;*D{@hev<{xF@POi@b~O$jQ60HoSxZ-eqkQwQ{)oxl_?1W|L>5@b} zUb`&ok%T|mu>JAKBZ-HvXPrxB;rw;a^7xw-h$_l3CHdhH$%p??UHX(omW#!qt-Y9d zEJQMgK~1p#j|}2l4j&_YN*7A*u9!a&F#SzLdeu6oDVwoIQL3r$qSC~t-dDvB!XXKl z!Sz*BmG6j#(r8;P{}#V=1NGzA83YaSb`d#-4qdSlhAS6h-jmtNNse)va6jlRp1}`w5#7t6^+u12fWmYPrKdCr{ld;u8%_ zBIm8o1B_VnKln8`3qXAm`<3xi9E9Hu2S1*crwVI@1^@x@9a8G!q)}s>IJ#(YE=)Z-}x6aMs)ZB|rfHj%I$OHQw-?%L^%Xt}|D1vuN*5 z03W4i3&7tD@0c%R=E{u3ZzfU9@&dy~_x*Z;r*wd6koJ>%*eR#kVFrL+I1}xR9sAc- zFuxnLii(pD&&9YGWupz@Q6KGvj(sn?EJGWPRu$e^tJ(3+PG~F#fa)l+AYzkZLK%B( zC2SmPeOzYC!H7~97gsXRfO+9?LN)6zh8<&K>VF@QTg z_}XK{d+ch>X7x+-VwlYrQmOeUEKqvyCNSzmenavw9}LYV4PPZ9g~p7?@e`$l z9-*)>lXVc@a$!rY)>28`^#oE01-#9eR>GV_>w1X)m~5A9PRJE$|L)w?@3TfB^N9Ys zDp`|+?j*@+840;Q%`etmLK@IkuO+?(NDp4@3F;FQ3IyqyS=&+Vm(bSzEJV(hh+*^N zRikz(Q{Ymbep$PI!9h4d9!62Z1A>0_NVUsvPMTJ}7?lQCi!4ge(#~15*~SlmLqu!@ zVEA4r(}}*}ec_;H^9rsTrNhmEuhR71u3~0YkIrVM8{$h;%scCt^rO@kygaT^(bt0IH_Hys6P$>)kx5-T8p|1xim(e)kiJ_lX zxo9ZUx0^H2YTZ=pcV{!`y&E4S(Us{AEoyOQ}KG4 zOi0O8Bw9Jz4m*_2k8b{3P+a`@XU>hYZ?2nDGg?aX1{Ykcbx@kuFmq=1u0gbJKJEE@ zwJEjIDT|WeqMFLM8Rw|p=t@^{7FXZATe7n^H;)%j{OneC4LEA0SC716ymd(1c?pbvsbS=$p zF<7`rD)%*EMsgfFwA$ayV`eJ@T~+z3PrWg<|L!-Rc(Yz5xm7q!rWX|$Vvw-l@gVbg zzZ9TX)x#v5)~e>oWsNdod-3A=>b=Z6ZSx7Qit;~doT~XGtojrxK>#9Tbk5D8aFlJi zc^2R$F8xn5!~beVUeNQmXe^~jxoM>T-80>Pd8dbSX|+=lM*q;smAcPoFRs5B>HQOI z2U>RjA+7z>(<%-E?Q8hzRma{xb&&jd8n|RVxfBDq>7j3M@Z47snYn(J5w%kf*&o7< z8}{){v>WgyMop8#2}LiK_Y$}N66XVr+msvoCkcY7+Tz+FZ+{X|^P-fcZXn0$@tfCv z{WA5CcPsN++i2Gl>Eh|ni;D2cA4kxSRVvlJnd7YUUm>m)>NzH%72}H9H5HQXa-@)1 zC5;@z&!hdcgtCLQaIPup$uoO}OZzfO(07soQfV%sI8w1eT}AKE8CMoAg5y+9uD6Ar z8L4@~x>QajFe@@8h93QXB2kjNrUAry!gnBkz1Tb&k4R=IKB;~ka&*E2u*~o~bLkgb z$dC|%Yw+Rx)61GCccaH>Iw&}Dmf z*n5YRlC%PGHKQedQF< zF1p_V?z^nr5UhfaSFqY@c% zDf^(JVn0X8wed6Vmxj}p_Nie5zOyXUM>WT3zg=tl7AEcBwTDxO$MwdZdMn{RceudmhzaO)E=OMPw8{&a;q3% z&s<5NgnUd!`AA$-Z6Y-A{|c->-2^_9Q2G4{zBAh;U{dpS7%H=8gNxkfZTFu4N%h;l zIvicv&jVn=+m{SLm2rFby}I)7f7Z_pBBx`cwM|51Vv&jj^6mJ7JB`szpa9Nm-`}?%Bxb+8k42QJ2#FmkSSU z9B)%MCGHs4UcShm6oYy>#6#rA?g;tC?BO@5`3> z5{u<1v_3!7kxj7P6t&*%eZ;?91G>=730AH7FDx=l^@A=8RzzV|6qAp9^6X+W%dyP? ztP)B9rzJ^dTc*#(EIoo?|}lS3Gjh!E7H=t6kT->D`3@;7G(99Vf#RW>%R zHU4Z`6PG&l@V?(;nb87T<|T&wsZ-Z^c?o>H9U@WFG^}k42d!g#pF6)uvC%&{UHG0I5{ceQjAN;6Ng-{lULDTi|Bv0~+jH$z&t136o%IEv zwgL8$zkVY?;n5x7e7t=%(DcvTvqz5ZzbXy@4lq>*;Quz>N}h@lz{`IoW6FT1-M6Ai zmbZ!UyhmX!OvNgrNTn-ope|o{V=7XnN%dO%$V_Q4L>Hv)-6P)8xvpiuTfI))Z|`c7 zXLNT*akJenPq)#6s{ZXV)n$@*QZ1F8Y8T8dx=wv{-g|SM-8ce;8^jJ&s;M=-qLLs^FX4 zTd4u*ESap4e3`VF?Dq#ea>M)@qe;oD!ya9mi+P;1Slc0o<5PGpoP3bfVZ=9khoO7P zVjh|LxlWyS(RLwgZ^YbMN9p(5esyYIb+%8{RwI%lQ(fWF{>*Q|r5VU-d3rm#kHc?j zTMtQ5#h!rL+1%(2nFs1LHlRX5HKn9Lt1NY3%RC1vZkik+T`4=-jzGFa(lv@x`(77j zAm7K0Jqqo!Z^D_$w9+;((2$NCo~KbaQo?PLA*WGK5+*L5p(OTI_!NI#QB=oPM}X0N zNzu8tiO^#|N9=juf+|fV0#tx!tUfx2)6zfa;R!Fbk?lYib_#NJvgUwIXxWM3nQW$3 zw#RjZs)zfoBsq}gu5Blfv2fwGx#nTYre|H=es^P2clA8Y12*byckS)tFe_q#c8+8HpI&9;WnHIyUJsQK+$UV4NAXsQHzhB_ z0u%3P`K)^kp>`Z+r)so61Y*Ox7-NY57Plk^fS&9kO$ODkwwk%jLW{l7y8fSx zY)7QE+25&bp)%80p?dDPo5R(J$FTt0z?MC_{{%hT> zx?h)e{&Nu19s{y-P@$kzZy@rcJ{m-Z;szyXJ-~ipQsPym7cbXFjsI?Sp_k9J?B1|z z6eVlTfz4a!IpzB*f8ys4j@}&t9HuQp{;j{K6n?J|KwV}75gY08_i3Yu#%@PiF>Dk4 z(rZutxNBqOhVUD>L&qE>{m*^oQ}_xi%=s%Y7xRVHl8d%L4l2Jv^LvX*>%#MU^TW*; z^E*0)j6EQJlqTqKxty%4(5nsd&%^NNn%>vJE6gPVXe2H;Z^BGslO5*KW}KT;_S^E@ z<&^@W?Xn}N9tdPUDj^AC@34O?^5QhgWt}xw-RDrZ%KDhcM)p#P%6Z_uYOI>R^*;2M z^>4yU5VQ$cDt-hhmtnwdY23F*9Z)Wdvu@Wb)XE}R9zlKSfnVm;!^_gwE`r)jhSmF= zMZiLJwhhkUv=`h%w5FxV1vh8ige(b9x`8SV(& zCbq9LSFL?5yvRuxv=)E5+jU~YlS2b~CCl#N*=aV%2_G(rQ|oiXY=vv9UnU^iB3mqu zP5K-eIEkH>$VZfJ9!0$hI^kPTN1$VRRKYR-xoIt~wv#X)c@U}d6G z?+ExGiyDDh7-`LSlI5YijW*^S+;%=A~Wes%np zXg(1#zVM(>1bt-5xWCnRT|ZkGhPeywx|}NF#M?659HTD?#*xxw;`GeB#U2WcubOLQ zW#Qe=kLf4r#kD75LpfI13mqg?whXmNs^oNjLz2t%sC`{nXG__o@A^xIvkBrZieJrJ zziL>>XYF{!-^LH+sF8OM0NhX-#72=Tah;)CPj4-t*-k+b1o=0>6CP=IuPCHljx9Cr zk=hnlU?&>(Gm7frys_IptxVc8+SHG0h1jw>ZM_}??0KU=cNI>N2v6B36PraFY0crD zNG6imZd>eE%Gggn#;eRXTEI9tox?BLe8g$L_3WiuZ5)tD2{$zQlz6|t^W-xL3>Lfw zDaEk?`H@vTG@zs3qWS!m7JFuuS&hdh4L?%@i90j~1y^lI(2R-{W zH{BJ^=HR}X`?lfTwaqOlr%#DDeGY|G5dlvzdFD@7h{bypGKPm1*2%x6RR_d(miTCD zUAZDFZVFm$sW2)%*&=j}J1}MWJJdCM$V#po@@_}P()qk-9MAq>yKe?2dvHw8+PpQM zcOF9@X+#?daH=AU1wRfB|4?XI+wfn-jLV<(xjD2#;{ki)h_u_SgsV;dV6 zR4z-l{iaFWcltuJ_;8Q6wS-d}(1dhEC+%GK`A;+ujJe+xqEUwF)yv4#HkR0FMiE4e zOQB%hG)57ea55?z<-aq`H9gm=y%SR7Qjh1C_bzuZ1-7HBqdehD?lAW~@iLxM;@NwnquKOpsLh3xwfu0i9Hv41L+H~073^qYeIqPKSs-+&6muX~4# ziv#eEUBAEkHKz)k#{L5y1>kWC#)@& z87@NXHg#c+&m$_x%;^xA-jUqG}m)X1V{!ds53x4xmq?*S3$K z-e<`ye*SG~aGH`#Y%MY{RY?z;P@+&ic8=S|+hb_a=vvQ0hwWqZa%OjX*FpUj$yIF>rM2s@fY z_Zs@{FHlk}*|OA)yGTl!d9M4$pxD8j& zD72V1eMCJ9e;sy`z1Q@GB+S7ey&(z~fB5Q>WAAFlqjQ}l3tY=M0ua7{jU#}Z3lBV% zV=#ACcG-y4dpCp8X2y}|SDX6#YIsJEdQo>ZLnbj zT%Ar`Yby`y5vcYPK)LvUu2sOGigPaNze7rw3}zSqdEFrb7a{)@@GSt8X?EqG$UxOT z@q-!DrL&i^ws%--BbK0E47`LgQ+49amB1uu|K2oKLaA+Pj$GZ_820Rx(cRWbjyIv@ zTC19GOM?8<`Z;?n`ZDscZCIEZ-h=>UU5Utybbzw2e?H=^4|OVPcNVK?IO4wC#p?{1 z3uOMgpcPvnm>~3BPjANFVb${Q)N^#Fw*m81m9)whXrAzGesd$fPH4=qTE$>7$;+Qa zJ!I$Y^Jla^@8&hcchJFtLhV{tnTPC_XpGXcruZ)WQn{N_0-xK=@o7u~SL9j;-!G7| zDyo-?EnQWF5{fNV9fV5P3IIyf@8r@OUJ{*@s;yHa+d^uuW%CD}QeAWJNGe`m-dV3tN z7E2PGX%ZbYo!}g{;VYn>0C-1ri3#yfrs*caoHtlu=yM1BrNBkcG@rsZQ(I(nWGhww zxq@epgP!Qe(#E3dZ>mP$da$3g3?x|Y%TGNYYTgj{J4#7yvCtD!$QmBfSf^6SINs9i zq@GHf-iq%epAJ@hZb{HEnRoCnwD`~va*Wz{{89mDCroHFvz(ihjI@Gx#(+u&gJ?LB#PcX(tP9Mv>Uk=}_u~#WZ3) zIoXp0zmS6)$iz5^d%Sj-$|~DarTb@=B~wh^yR6OJFf7%clRp6yT5^DbvB-p9D$APQ z%(=m?iC6g@81=xe4*_F6T5tTj-J5$%40Q=0_`i?&MG2SB|5I~Q{Ss0GoZAYsUyLF> z_!++!QElB#720RGK(^JR<2tSLAG+m4(J`&LHdG9L<8n<6wiOtIA8gC|o zm5l4Ey0@t&-iqFIuzsETk?gufUL&UiM@JUBP;NfCMqcGsE}5h==HNYx*OR@3CL36s z!^c>LyYIG@l88s`oG$MdoJl5o|4=C5r32R34k99G4}8Yo1%1)Ah=%+cq*(iAGEUCC zwC|nJ8Nk`qxY8_oOSb=sE@J!qYW9%Ul9^mu>{>Au8RBsMHUX}FPh7Ddr(H=-S!G&a zk%?lzkZlAB&0KzVK2>Cp62M+4bSXBiIyit}FT94lKjZ$vkJ5vMB?6ILnHjX+edjTc zFdr*G53W{86<%X3Ov@;DYkb$tHs*jn8&=K*E4OMmOFt3e1m4KWFTEETU+VuUY5rl} z|I8Qdsz+n}vPXNkOhWkmrl8pTYhru7k;bH5qjb55qJo5%5*|-)FlX6izR_f<#_{>2 z3L#{Y#e*Ny8l@%W^lgJPn*^pF=L4Bxa_qa|wNX#^gU`KnGw2Z`B9X0$+X|dwVOI@w z!&QPIC%^oMSvEr3qAs1ErMZ5wnLPo~Rf+De_rC*Q)}Pxjvu?)BR&+7f+DjQcQqGtr zR|`cvL?U@z&)_{M1WHb%@$>-AJNA>1oq3y;Gc}9+E62Hru z?muP`&$?h$0a!d0Ay2X{j5a|8z55-f$eyBLTFiOkuXpd)-##6WE!R0C!&BG?yJuzX zUMaD}>vq2IjogpwCkE%2zA&WOS`j^Ap7szCn~kp0TV2dWN|Wp3D*SQJGG0HSa}@i`mpRsk^{=V8F?|w0cB<| zhDClpa1D9dS1WczyS!)2 zO!Bw)%or`^;(-YPGwmNtf-&zIc5SQI%mpCnm2CAGQ+s5{8h}NcoZG{?r{4kYzoG~q zOlq=q5IRBqri|bc3yVduFK9nTK@+htLj(o z(*wiPa@*kL`j?f~EuffIlU*(6P9B{Vt7A<Hw0*r@_A?kx7~y-RXdI;EyyS z{SO*Uf5!v|)$~2A!$z8{4bfS4(K*QWsJ;H)OS|R=DqdY;sK0cxzsVIpgT%v0vq(L& z4c)8AIGXZbuX$Nxqj2b|cc?WnJ!q__5&f>&Hf2Q0&$zB(k*iVN_Y+z86ecl08rGrE z_3(>3#^hQ@sSyaLc^%buAHi;Jx2-wJZdnzM+d{)*J_2dUrH@D#&eh zr$w43W^rm1lf6vwAekX>p!6UbClR3%A(DBsVb|uSVpo>S=D;SKKKMs1`-Ba!<8-~(?n_}<dN$O%<^r-|xOQi*cz6*b+DFCQ{K2LliX&@5jpO69% zPJTFc-0qr#PYt)kE{!}e8iB}fYJWo&fZBv9zeO!J4@o5p8st_(nuh2_7V;|)P?RS% zAb^J!`?tKL5o6!%XnyLX!53Id1oUNQAx0PhEF~44thH(b=rcU{mqSZsZJ(5nT(UOG z#)j-lsyf#Q&66BLoUxG|U2)vSu{XVb(=qN`>c<#+s>}2FopqpH_TUHdNUDVULE;zz zbCxP1ztl`mq+pl_ilP{5I()dWjIaRzT_#+0TM7XfX*==z0D=N}q$>fpA8*(&XZS?j z_IoU@u9fCrnx*hCsQX@&lqZb zBJLdC4p|8$d6@Tr$NAE{TsJdp$G&lKT=4*Ne54yLLR-i-g=GJuz`lw4>r1P?-5CZP z0w*p3!NzsMqgeO#*>Zy=d1mBchFooeM{e1 zxD0T%v82f~2-b6ojX9^=c_cN{O^UvF`KHEC_#Nf4<$SKxBV(!5(;_u3! zg2?olNvcS1_e+MGB?q}MK}t!MhCw&BO$l(?X_G!-)F%4aTh-GO?c9^a-Mk)XKC_uC zIr;OD*(@Vwv>&ia=oSbz5qh7a45w#>lykwQr&|JI)>hE znXMEzcECyaAt^Xd}_@JPa4sRKbbYG{S$c!XwJEqhA5n@zeZ&sfxldk zb+^b~t615kY1*3HFe~BTw?b)LNFSZ#IQmYJt;L%YIPM6f``U2Aq4!iv2^a*d;u%OfS>0zZw|2d3W-DkN>;a91714*dUfOa`Hddmj*eKASYmM>N4LuA2I5|!@1T2th<-&#uf1J}mc1W~m)yhRW9gB4 zUy{ZoEq?1ut!%`6@B7s!@P=%eQVicWDh*nv`lGR{Q5Lfpvm~P4zNoJMnogP@LW0Dmh!go;1c_{z zucUlRogMQ1LC4`%qqKuQECUSwl`8?mlyedLS3Vqu6TfdUE<7HXbS~Yz5N!1d9522< zWd0MWqxF>&Pkr89!eJ{Ltl*xyxNxv+DeU}6rg&JL3X8B&yBU~2dqO`%pwnohCDA!t zfKHl4t^NR7KULE18c3s;i~01Glm_T2`oa)&R!z40b{~-=`{tr%hzNwWrLX~lsTkE_ zxw7N;@Js1!rE{;@7(ZSoUd1_`S#1DRkB;8C=)JE8Y9>2#dm-uav|tI09v5 zAU)LUD@`)i28+DxHxuadrK8>DmWS1YvKWGCfb84r!QR#JN725Fn$iznEe1vNxt`TN<@OU3#Ud-0T{`1prNyZX`Q7E6Sy zbTXS`$^~iRXk&?b`Bgk7GHixvv3;Wr0bwU*p>*~(fnPq7_@YEYdWKv#WIS zqQBTD)&;uL;^@h3$^3ECf9`U!iVsA6=u$Bfoh|LU3P*ut8v#6@`>5om@Jy-zw$$xW zZfVu@S`u34K0Y5)14(OyB)IA~$(TwP-r_lqGQCWKT%oS1HExwO^x3LMPaE|4;+ST$ zp@xtJ1x}%m=+UC#8llf8N7DK_{x867us<=i&&qv#&&`&w{4eCv`HSyrDy6eLTFV_! z*}rm1YPb_99^Vwng!=t{XYojBO?3MG8!kNU_&7g5|GL>eAW-;h&2e|*gS6SYbBsHO z*PQOzH~)i|o`8{es;QY1Dp&U&I!L5&d;iNRs$emjF5WydkgBy6RSYNvOqh>2@9+Gc zCFogBw6h^AH6MZSf#Sj^(Vevu*i13M!$xn~_g3Prp{U_vFCdxR7ENbJ_=x zv>5ulEf~MJoLjBtNC@eepv=lYF&KhAC_NG4^D4i*0sXvDQYGz_H4m@}bd?@cLJfEb z{|{Mj9TjEVbq!012vQQtkkZ|Qgft9D4&5a^bW4|XNSDOWLwAEHNH+}K(ybsM{a)Vp z^Stl3*7wi#A8WDToZs1JpL6yOK96>qGHTjSQW3r0`uF+cMYLw|SJ}0QOQl5bOGl%G zA36N;gkC(>sSm|-3`+U!NZy5#vyF&4!_|;RMVtYkJhfI2SiEfKB6cslMMkTe>Ai$6 zrxQi{i{M8v8BU_Hpx)L2rZEeTaATi}^qh#P4}iJ*^pHG1t4iZpcdQOr5M{wEwSdDF z48wX0=LtY#!J5mfgA;7$AJoMn3ZvE_&Z|PnTi}5v938`UZ15ko@|DQ4Gj>G9VpP-_ zMMSoCQ0~JL-psPzCyrUOus6X(=jgB(6b+_upz88_e+5f|DIDYw=PeNv(SYT`(`0C1 zUd5k2S|}L<4HNLR=8?WKqn7>xNOnrs^o4?2Xqa}@HfvY54D6EfatoR`f;#i*z+m7YqyBxnrMol;)_ilP0Co##k4gQ>h6?>IiPzJ+Dp(c{Qsl*}BeQJm>z zN*hq|_2!zn;`5oWG}m9cE~V&p9VGOwCCs&`B$SUOwvbffTBC0ia}z$3-94;);|o}p z3*1>V@+w}TZYu@&E})Qz;1wCH)K@((vc1YPA2lLE9!q*G70)5Bh{%UQY*U3umqR&Ir%}NouPFO3 zaQ11vt(A!gibs>Y@06b2uXoi53r>ARu)CChO!%$r8t#2E@+F+Yz?WJ(nbxZo_|cIh zCwozTtFc}Z0`{UErD1s5qFq`V$TSnx=EappUd|^QgB4c{5Ft&YXXbB6!Lv*Z>$Z^& zmT6x0)n=ye7dihqz5Smg05V~&_P@~&0&EaTpGn^k1%kFQWHIanCDIgfoJRJ_OfyrO zn`ADR<;QH>{&;r|o~Ntm=LA+X=F84W(RhC(kmueyZ1z9qwi>}oPIHje-RDmxanv$` zSy<>p)mjLO%C!o7XY`OHjuQ6Q9vZ`6%MtzP0vS+W6EWh$Wb6S=5*?mh(=c`)SBYKbgiTUY6$Pb<^bx!&95@6AGnns3Knm4w1SX7e#go^fOFh=J# zIY0>Ql6BVoQ%U!^U)>5x+7r567bZZLF9Ap~|^5TXcd~lL$q9}{* z`QG+14L9O)j?pB*Fj3(aR%(7!a-jC3SYD7=NuJrd?0uh65z$epG6Cxn#iI6?Fux1Z z`7ZC2h+k|{<)Vft4fh=M3K>v9p#55!yh23{2xXhwtu|cEsvorz%JWXcp4?N9kUJ@q z#!KM(8o0YAhXEPhx$RmQl!*cNmOzawz`feKGyD@*M|g7Rq=HIysJVUFQb(z~Mx=|7 zrY@=swOQ?c_oaug8!X9pn6ZNnGfwgOyTBeG4*Bf?guA%%sOi?ssjS1j=++GR_b-OD zliyDnmgi?B`D%G~{wUHy>#>f-p7u8SKszj{qO?zC78X0<;d&A?{(n;WA68Bw?(7}> z6lJh;Wv2PrlwJR~@7$uGRh8vpm@9lAr=erP{Fm8{M+!^LBqjho_*o(jr>A1y{yn_U z*;ng_7p7o1XqUJ_JQh3P&;9S_8etnyrhMuL$HABM*fTBE-%s*G=QUfPoRD~{2h(4dGjhZrs2vEgjgM7MPXYbfM~#FqY1`(p79iHVd(htYKNbvL^I zo=yS%#6(tTgDQhyeP*NoB0k$!e3UWQ_iXUrXJ1JBpLQr>90Te~e)t9_?!|;lFbBN~ z)8+fEO1}h=W(DQ+nYt`@F~9x8-{|I*Go5G7ihoT}W~jEM?$i8_xrCf(j2~B&P8m=>SldbkK8|<0h$O{`dkdrH%`C@J_i8>@&0Q z{#UZ%)*do;rn)DCIJp&>XpKA6N>Y4)pIAjW;576HY8$@xa6~o0u@;(@88T2V{3-Yb zjR_$f5u91@@ut>vZ=x8@;cN~c__dxQ`Q*&zcX8?*b^d-Jn&HoD*a-X$T?(P`F4Y9* zEtM1ga`b;8t)esz^<7~Me+RMonXe@unFYL{F4IRCj#tdS9J zco%pNEU`)2;#7d)mU|p;Y_z>juG>uq7z)Uiqk7y@ogGg!@lLd9Wgy z@P)BoMf}*|{1N9n7`>9y{$z;>ZMrFHMG_Aj+`#5t7FncaGf>=#+%YM@E*tF&OJ0~~ zT8w|E@IHty%#UGA+U(A4GZZ3h7y_g_+C1h|vM(iU&ixDp{J*h2SaN9apB1}zL5txS z2tSj+#oM$ejjkXBDQ0ivF*wWl8{JbRljE-u_2R1YzY69isf2FKUXrycERMVQ3+L@y z$DqzbHT=u>Gk1DfPDh)2j%RrzT%01;T@Q$dR$sm@w{Zm~j@6*4DGGq}jI7INXEMV? zoN4*!M;ckqqzKovSu(5wC$dcH>KXgZX|^sSy*A8|x~&pCU2 z#1h>Xm-xEKzIQTT$Bs3z!`e1sOW{Lp5qH%6L5z}VD&Ix zBV2@F4>3D>en|@9>(;y^4CN+81)Fr>e0~Q28&i9Eki(L9NdK#Hp9KBJS6-j2^Jx|r z@0ltEqpYYXf-iZ--s-`N;hC3eww(Pg9F3ff?a$R|)Kv|d@wRcEsWzno8|;F>11ls7 zmK&S??(XlWWKXOJ08fX7TWgNh65&$}yJZJJ-|bdj3Vc}FCxf5qQ|kQguP|`=3FumZ zH~9Ji`898so6@x(FBaBR&q4z3{f?Br1a;CYOlq`g02e=;jURB3fU^#i64o9?@Gh_Jegv=ffN{tseYdk zo56yAQ8jKyb5B{SY4Q-((st}!Jh0c1;%vlPFzz5wuqt}v?Z8r2zL(KN=I$=6YTQf# z1}{OG`%_cwq@>qE&+;BEi2W~VuXPwT0E3eYjZLs>2c!yc9ZR@Wn>Gzylu|+ikL6u9 zM7Sa*qmOV@@l@kc(d@aT51f?ws!nWrL>fKy$VNyAKqF~F4d;ccb_BZ`1#g$QYVE?0 zoheJl)K0m(JcjhkuPWChw~sb}L(r0r(aYpWd9&K`Mt^syePd>&CK=QQ0PH)$14BB!zUKjBj+Kgnar3wg&}VpXG;CQ3GZYcU4o$e5=1UP{QA&GdA4>N~Xj7vuXB@fD3sfJd?I$wA zM@MEXXKJ4Je{rigd}|tSC|W-E^T)8K*`rUj^w#{~S;cc?9|+#J`wfV1lRf+wM^JTv zp)<+jU!>+HW8lPr|=oh?uJQhnmCVtdgiKeiULpzo@DMK86{WLoFO>148p zOI4CRy~rM5x9Q(?Y{Rx1RcFRWU1?*)Wj_ zq2r0DiyDjCMvdphN$^M4h{swWvz~n}ns)nGr{SF!k^8iJSD0?RV_(nBAzIc-l}89W zDXSIvGCWXEHVCz`$u~t3tXhQ%ZbQZh$BM^N+ox@66bO%g<7=C?8{FkNT1gEQLC&H# zIK;ipr;WuQ4dfd_v57tWFcF_uW=s}(MEkVpPd_=LOmp zn#Od)Jx~l<%)Jj>%#C5o`W+(o06)qedi-k&!VL^VD-wqut-!XxcYHsMl(pcuR49YW z^l0N1s0O!0LeKNF}dR4PS>i*-z-|v^LA$GVbVjn15svPRg;8WwHARJREGHGgLC}$EL|Z!)J6UdbbyiH-vZZY$?hZkr?824} zW5dp7z5PwD5=lw0LObmFSMrypmxGG25A)DZt99!^Z*t~AY5hr|2IA_TNuWBtAZIs- zrgh6FuISCX<0I!#><2nuy5aOE$It}1Sqd7b0v{!}LXELEFv40ojbSNzZ3l7f8Hk(q z`>IrfdV(z1$~k^wYH71H4+pJMC$L$YWI zLo{s!eow8;2h0#hkb0&vKfgOk$5%hQRv`GohmUZ^6-I5;w^*P;)G`O@pOw~~@Et<` z(|sJMxf*JIvY{Oo6%2nZF(s!9_>M3Iyam-C*b%h*Q^3*;oNzp}|4>&~;e|H( zF_qCn4+(HgybLHA{xW1h(A`Q25Cr+LX`AdC?GK%AX<4thqPG49d-K!pfyLRozGCaw zU#<_Gi=eI^lYR-9xF1x`%_2NC@)G5g%i+w(P2dtd@!`oiZ@nt8l|>bLJ|z$iqJYUn zVwAa0Wb8a?EVyQkWj&6_khLeq-DR6gRN0hs4b*E&^*ynQ+#TEQORzlicna%FX<|pI z)F-!(di$i(qQG2eO8S$vh99JX3B%pCK=liyg+>xdsaX=O#TO=aR2*9oIKN&G8VtyY zv;U+YL>7FIta|eb=@C7WGsU$vL%eaVyz7H~)7gOu`PJH2Ea&WwL z>OfXZu>0u4hd!dr*lI*=b0rh1O%)G-PD-NT-$q^^F<8nMRaF^SM4}RdLzvj-@%Wh# zY@&gS`90S0_C>8qI%&xvuK-v8+-zbS_$okXhsgr?iRY?%1|gX-@AbeihslTC*<@YkO_6+-TX{gnRu zN$``9XOurP&5MsZ(oY~v-@?${GMpdyfaM*xk_8?W$p&e$%H+*`jc8(m?ppI?r?_47 z{oQPrHMc&I^VMio%C{8Z(L72SJCphD%DRjadTnggnh33Qj$w3|4G)R4er;K$Is0Pi z)Qg5Ri}dT6n59Y{LPMIdc1Yl?dgVdo0-|im8ua|n3_+pu18+83tVNcV)Zy_wALV@D z!y?XWe4L>(6P*4-3$nC1vl=60JoBxE37mzP(ddr&($M|shkhmg@f#mA$zQx);}dz8 zgai~yOYGQ7+@_uq?0C1Y+^nz4 zTE%1WqbWoT;e8_)-vz9Whzji;AV#Bs%vWq>Co-xyL3&j3!)?yJp;X@%BnY|QSXVfg zDaD&H4E#p7B@7ws7V!+HsaA_CKb3W+-Gv@Xnff&&UezkpJ8Mn!x>UyJ+3LeBi>^M_ zu_>O1!>SqzH6Q&gMG4s-k?wA#t)t|BUtAm9lM&%tp1JmqgN9w6&p$6;5V>kU%VA93 zx%I2svOXt$ZI+qN_V*{w-h3+&Md1_`M+6^18E)OL(L8m z0z(OL&|vwVm`KzhzJN|w_)dU>EH1SWY2ZDt8_0=x?*Vm>>ghOyWnCm&p7td2ra(^E z=Ij}G#pZkQauQdxg~{6AfmcwY)u6K}k7Qy>h=nd03u~MlNeU&z@V9D*_u9LT(=1!( z?=7xQS`*TWxS;Ssm!mB>eij7}sqyuZXF_)UD@}I_MCqZlp`UP>?bb(CFB!5NAM=E} z`CKNkgOFJDFnBL2cqv`{%S-%`7W!Otm|ecOQ%}_=W5U|ycyXm&73^-mv!2}TjH6%g z;g^|g!X%=GqOdoarV6W-; zs-N6nA96@obi1KV<4$I1Wtqyq`%F?)5L=^&9IG4u$>)|lFs~{LlUflnuDYKpVKh9K;+xcuT6&cWgOgPfix!x4HEf zq#2;ZaI?LPmbl>O2L0h2pcKYdo#Ho_-^D}RZ~PV8r}`_ovl*k!TTQRnh4}SXJjGJ6 zu5+!fi2cK!xgmb~b)VSv)Km7cW8+5a6uT5@fxvl8Y1f`XDfrL<3C)u2qM$ zd85sS&%XrNK5eu52ao<0gA&g&-dOQnI4W9Wmyr1J_}|0a z$4^}k`=3u=BkmJ!R)ptDgEAkz?zf)gCvD7$sh_w%QpTd2J~BORn_YtasHX!wc=NGG zP{G;lF%~AG284X1<=t=dIx4WY>FxW9wA5E3ht3L$Nm26jPWrr;FH5P`+1BXPQQu0$ zZEhW=DYv1Hi}FA7YOO?+9!uJral#ISp@wroWo`V)+V0e1j#Q!-4_}-^9obeH=NQaY z;lpOn!@nv9xnlV7tbz6WUP<*@K(;N^!^whTMXzfT8&Hb0^!QS+)>>O1tV=jAc zy}hD%{McN0F0Z6*o9MOIO{lMB4uSLPcdkjr*8U}dulEdY=Ip#cy*jqO#9}`jQH{4W zS#)TdNZr7%Kl0(SWyU=+DB~J+JBsOFwf{X|Pmk-v-*0-F`mw?bjeZvSynxqEI@ zq1#F?o`qOCmUKNU z-gid}suriYtH)}%&$iif6p?z0`kxe7wb{OUckMm-6at3?_9m)~9?-NwQpKDHapr4_ zP%JF$nO@ayTFERBXlO!ZRrMFbrk3w@>S1q5Mk9I>_f>TW(!y4B8~akURFxuTV>53k zHRe1b-3;HqCp&rAaQ!%*UjNI;I09|jO_b4SX{-RUF6)_OpShmudq8fORes1?XBd>( zR_M>trr(DS`#k*lahJYciZSmS>Y##jmT>F|pT}iGQ4YElx;3Cmv+&lC=Q{|6%ny=qXgChdjhgi0%^(EI= zg>#Lfz(m|iBE6p@(?Noc%RkyT;O)Iui<=biqj56 zR|{TjDEeHfIt7?wv2K$GnPvu4VNcts2aAi3hRU(L;ww#3kVp>ZJG{#$vMiN~6+g~Y z5Q`WU439Tn`5ZhXN~GbQSJOt=d-lLybqeuP5PPX*VR{fvkbWac^fPq_j@Wt4qVFG) z7>2Ty;WejoDHxbvEU`6*OY_c~kt6+%vNcElq;yQ+sZFL~;7;7i?Vq{Qz|4Tsa#VIxUhe&DR`AeFYAALZoSm{Le@nq zQfqy{L6Wa+GIiruRgpjgD@u4>st`Uq$}`}Jf4uxSy7aTwkyY4zDw3oG-=6Rb_7Aw1 zUqg^}Q>!@buC)H)NB~2ba;!LU%F7Lb$!=(qo@SkV0}oN}u#{?pwR=I)F=t+%-_#e2R^YG=r(cS(GMk6Nf3K6$|EiH_rSnz_V&WV-lFDTf` zZL*W_={s4IYvWrwC|EM+$-J0eP?b*|>e?w4vsdwK0wmDF6eN>2f4ddV8`K}-`7SEB zo3{9_zODmDb6Zx`&>dZKCR|m0CM+t7mZ@AIDk{?>c}Bh}%@1V07UqDAU?~??)jZrf z;22zq(CI))i)nwNJ#bB2cpc+97=Q~77QLm-rO(acx@Q>tWaCz16FzRj`~0EV{7xjn zUd|*4q#*hnX_Rt4PBNKYoGr2tVe%q1a!*+yaYSugV-Vfukj_eu!2s}M<@Z3rNxL|w zS4q1IHn^Sp>g+P=oPP6FVY0=Wp$#DXxy%NOQOsB*>rc+W5E0^h9$^OYB(}{}7RnNb zM#}!uK1GD(3QVL9uf7``YSrg#Np6dUe8E+2s%65DK4Weea4bZyv8YEeozT}hbuhc3 zBa#xd8Ji(6TcbQ{wXQ?k99=fe#dQcY%s=~7cmUOvv#V(wxj?Uycb`v(&x^_<#g9o6 z+NOPX8fjbT6*q5(QZHX_8PM-S(=8Q5G|^!vcQzwUBh&6irOF<~swD4XffiRH6}7@d z@%hh>Y?NLf8O~rgF;eVZ`DCG*hCqRxLPnp>`Xed4(Uj~R^B?vcp|y|b@_KOvEqPRe z#95BVucHp0#hx90Hr4lM6uk*@>~XtFK4+CPD=lrCHt5fU{T12ow7twV1J0n1)((t! zY<5-D;eGB{6o%m-3k4hdW|gnFZV`I2Ok!5e1BaJNljAaOUUdfEL3=U_rwWOi0eAG| zD~;sguG+5|7ZVH*gg2cgwu%H=q=GU-foY*n^N)OYSGLP^vtJ;kPq7fX%v@v`^N=dSPTF_~ zDZcpz)MVWqLTz8Tnin$qNU5Bz%|d1JTOFCi9I;rm?S`IP(du{zw40=v3|}kgEU2#9qP_5L@q)Ws;d-cBVp~T zMysK9XGV0=7MoJ9)~Lny`Zr;%l%|>Ph~I4_CpgbwH&VHVwEMnw6r%TJFKbvgKAUF? zA%lO#N1SB7k^!Vubh2@IQyuyuFb4$w!JQ<(@3@NCeR_lFundNBeRi1!$-YS%Nx`<2 zJvE;R6d0$W&-2WeR{3Az_)z(TCSUNmitFh{ZE4V=O|VIqD@s+z+-{{Cm6tVQq;lU1 z4>MtZ*ZC?)y%{8wKw93CeR$hy^y7L0_2ww(|=lxQ%xr~DC@`>A@E*Tjoa0+a_D29%vvJf+dI&|$vWd&M?=*(V)BWKW8ETc`p2oW-fl=r_d2{i&)e1} zD08Q&&PVe)%i*jzbe9*rRh<_I>}d1f(zv2-*1N2vkd`>+CYpVb*@q!K?EU*&NVJp3 zuLZ1$;E(>HHUP9*>|mt8OY`Z{s+v}Qggmu1=(h-kamdw=_XW!WfGaz|4WSE%Xz$%_ zfArt^E7-0(HrhJn&q9-&!;q1iyKA1yDQ|Q?T%G54;#DSc zbaRr;VxoMu2L%cteU4jEB>#_m3z2LaZVUvtDptC)U=h4Q(>B$un*B0psE|ulU`U&j zAT+~kYDi1)q#=a}MZaaR7ec_KM-cRrIDv^ygOH$Xo0AcE=En2>L+PA~mtepg^XT}o z7RK1`s@QN>X7Vk3?=eriN1sH{0~Ly z-TYsQG$wG7iXG|-U&M!BALIW8;m%Y*g82-^UZiKsmjpt3B<3&$E$lI;%Dcw80sFc|z812#$6F5GsN$;`5>Ns5`F*nQr-S|uPu*l!@jm8Tvu z?E*&L;)>--9k)eKCjSxASzNGrqQ}yPIhn4IM9)~z|D=&92@sK+yeX#uR5*fpXhV&# zM#!Bk^GMSU!DQ4B&x}b7*-z#M40O}>{bX;M_>wLlmLd|Fw7g?nCkeg<)^VLK}n^s1NOd$l(9Xg%_cxhp}t;+342+0Pe4Fus%-k~fG)-AbnG@AJSyx}xG zo`q~_Tv9jJk+l*V&TG!b84@24RGmmVG**BD7Db`&!k#|1!u47-A6 z7g|80s;LZ^lcYNExUTX^HewEHY~e#$4G5S8v95<{RjEIW=~ISw6#XxpfdkVzxZfd7 zg&oq_P>r0VuNc1XSof=0uj0ixH%I-N`)vMwv*oMx01PKYEs1+L2GlBO@v$Sc_>T8*g3 zfZ&I*^Jm_oR{7$%-~a?9Et8+=*G3flcgudA3N>s}D>=&G)8k`G1U-6UXF_uErzZE)n?;1*Yyt6_2Y-M3hBlG%>grLcFADQBtvS=NGTEU)Qd z}%(nWf}xY*vax^rA7s& zdGaS}_@@w}A}`%-X`tEUmgF$-;06z_Y>J#Ai~yrJLpdi=sUYZwocft=n~)k&XktnN z(@TP4Vm3y_Xvx(&rY#1perkOeW#5K#V_8m!KG3>OQa!9uIsY78%MbX4ZicBGOO=eFGsBLuDjc02`Hs!NIIyiuku57Hvh$N0O7Io`nZL;YG!X^bcN{&Ze0L6nd%`Nz**54x z&%o%&2M-YZk`cA>am@)1iT%VEv_3C9VXM5gPNv*MQF-#U9rVRJ8oZ z53*qbP}jW7f5KO1q6X}3Q?G(q`wRlB{qAtz-8(U9^;ytb?TRK7H|OQwAyjP!-(--?3?NU3KR*Ez7Igxbu0FRG)q z7f>u9(o-TKu;W(uXh-D~nvryRXi@kz!?_&0yCss9tQSOa9bQxZ7xRooNEJHs<^bYg z@v3R?20`5Jv{r50f4k~K*mv8P;d*#oiZAOe(iz5TJJ&v zbH@7Ur?7xZ-R*C0aUK)L-|Y3h);b%UQuXXg_}z516Z*>N$6?6ubpBexVBu@1SW|Lh+XJKomQ3pE?=y5 z@54s&+ZAp>t2E6Eo9>zB~!oC<} z@Jlb-{^EgH8_G^)}T##XwOempb@l&jOI7}bYFyb9M z!n02L?fu+_RNY21eBLMd=!dKx5GAl_S3t%`e4w7AJ~d3{G+(L5*R_lzGJ4|7zLI2l zMpK7z#b(nyx7_1Hs;T&mUW>M<&8p63@cW+dJr!;GaW3iA_h1qM432^Q(}BV#Lu(m3 zLSMtu3|XY>>wXQdz41uxV8ond$gtmZcK9XJa+avFepxLV@r)`xJe)E!-1zQJ)A#p| z@kS!^TNAYjq6J5ChN0S|t(N*=wu((bDQUCO^t!ob@z^Qb@}o|DI7C-&;7Agj5*p=8 z`M}Y*;Q$UzB+5N5s^iQB8>2}oEg7brBWk6ucxn8b=B^Btv#}wODX6zhNF7&C~5_m7Q5&%Zuw=*I9F`gt_)t$M?uWm z+NMEtb0sn_BB?92eoUzdA8}5R_W-Y5Jv&Rp56MZ+-CJ1|P>cI9`07d6(`+w9Cz9U}cC9S;?!@*GM8=Gpj zht`+RSy?6yB6QHKUtI$zx+Hq(TKXz-<1{?)G$5J&5K>9ae%AdQVfk<+NUvl&a}do zbrl0Refa8RndE$n8Kbr2aQ$LQd{yx0UDJqS^{55=^&gy805(8M2!|&C^e~q2iQi4) zsSBcZ`8+NE+4eiPnHn}%?+J6Zwu5Q8)_c=J|6S%EhL7|=K4*M*0y4B30p_|caW58f zw{zTp`Tt~*Wj2h|MKm>{FS6UBs}(p1FYbE$aAeG@{sp^c@QH%(a3T($deH~CODxk< zw5-oDT$PDSNs1@sCR#0)sa}ejW#$AZY0eD<9)qnoe_T?_r?V{xOqV8j`fV*nEIcWA z0k<7Du?3DRtFd7p^Wqon&(}6lM`GE<=tRbDlxIB(d2=s*&_DyHN_HfD^s&BR z)ToM(a_eT%c*qzA`eM@zwvrIvSZ?%TJ@sEdg%m~~0*BCFB_AtY1!n&5EB<>taylg- z=O3bRe+lKa$PMsYsvA#$+Q0t6Z8FNj#pSwv1PEMyl4KmlgT;O_%B19@*GfZl7ZF3x#Jhc_g1yq5<$9)~0#6jsABwudf*WNC-GmN*rh0?tir1 zHHrp{`H_14UQ%r=0uk$zK{NH@VvIY3|Nc%(Kgj{QGxLCMu>P4%-XeqUuJL87-lYn@ z%wSV!qVccTo@!q+?(_S!^>3NF)X+oP^fGw4!uJJnO)QLZb#9$T$d6hMvKEMwR)*bW z?dDgkXFo{ei$QQ#dnVyBH}Nk)7WoH!n>?HImfdx4YWlpE!k%QZeK}rMxlGtbp>36I z4NJ`%x?JS@g{cz-Qiag=5903DTNa4&8x${|s_4q?HrWeU}2?BPhY zJP5Y_|siQCw9-2i=V{uaC2e7oR!X7uM{sI8r%$rnNdGIWPCARQuJ>?L4u z{>=KsReizgeHtj2I}acTK4qY1W3}k!mj@>2a-)JhM*JjsP%c)^ntffDUvDY5>j&vo zF)^Z0v?84l9&VmlLP&wMMP+{ah+Y)b#@NYu*2Q{J5POEI^;m>-5-}NJp)C9jyX3Gq zMZew(Xsnzh5|kPkRhCFgC;L85(n3Bt2$sUkoiC8g*k53-`7TPEL+0cR{YbBgT==K9 zsqG(_U7YfdFod*uSTzZ~;G_GW4-LHt3|N-&R`!p5_{_Hxu6G-J)Fw5m-wA7<{>`p` z2O#d*ki<`1iqT3El3P5VLfhd-)jreQ_TPCbmJOoUyu?SbIXixJV{<-~|K#kt5BS?T zW1N+ue7B89tG(wRaBXaLO@)6qM!44=Ka&B0yFy72_iJkpJI|1)BL(~;Mf|8`ssx9nNmPoVlkqEJm^vFrmlPY%%*nsbj4{H-s}Kr*gas z{E~v;%gbfOlG0UQsZPkBrzGwuGLZWgA~U%&_WuN z-B21Z4kI8Vw8Nh)f)G^Ik|*Q5_-QT@wD@w6lN3(!A#m{lTtDI1=2`q<;z=Xj3y^iH z9rcWfCQF=VNklqxH)}B1f$^h)M_=8Tq<}1m8wKqi%`X}QLSRt`j$)H&JH@TT%Cj&Q zS_~iM6N4*~GP#DaE!8?~-r2kxuN5LyzbFe5($!9isq(tYKWiJV;WE!h`SKS1>98#c z+v(Hem+z^2t|u$hC%bu^s(7E;V2v1jD?W6maws4hl>}pxOKp*crUL`V-D2@kOTQC9WS|Y>dJ)js8z&38 zXqG{Tsl2y*elfoF*YPt|NcVmhzj!6Bv3X!-a0AC!K`I4jl+=<#s&C&$1pBpl$#|um z9DXH7;|Nnjl%CF|jR`0(Q)m`Y)kjZuVt;voQ6&RNiNofkNz=8mY2;%eno)8t7Cgk! zXIlcv41IY{7UXBz98&y2dPnk4VXE(Ss1hP#e$5PEo(C>k$ExBW1#C`*BLnyvZVss) z1G|JaO@CYI21MD8Ecld$@ab}awh=YI?b4isr81QAI=nZlnJ-&^E9xR&CC!2C@{ zhp3`}MIJyS@u@tD_^jMt3-i-`s!H*>n@1>0PWUY-tJMH*Y4`yP`L|#()Zk>E>YL5} zPAMNbuy9m-Ubt|$cYV}aiHWpZfM-jMdi-e&D#pKTfiff##%ojs4NyU>a*Mr*cI z-GCiWfDxIk?{~Sh&3!;cKuOr| zWm)a8ReTI_3jv}xI$tKQ&accJVHm)TTTF-$tSP5?j<)^&bp=!hVg)i?{0*EkR_D#G ze~MUOn=AgQ6MG~@&^Fj+)@Um|QrF3$l+1C<9i2X^Gpl-GtLbkh$-Of68*|1=uDHc9 zY+)DO)lW#7Z-6`Q=r4NE;@z$4Z+~~hB>TsZyZgE5D&omVzzxL-LV!fvDt;%mkXYbFV!F7xP7tQihPn+>oOuunj6j9XB0CmlNM?~cbm<+#4zkkD6{FDbL3M+`%0&E zJ!)czqFhQTXrXiqmA5@^_+iSA4O(w9LU}q=rG1^2)aG(k&jAj#z#1XSEIl7{&({sy zL6s2=f1dw7e-N@e4EQ5?2JO+DSM&{@>8M5sjtR|Uvf8_%N`l+uuCEJd|B!fze5GxZ z?X&y!yJ3rJ`1e}w>zG8r&u|ed(nZ&_JlfB34IhEVK6?ib=^+T9@ZsGi9e}6|2{}4@ zpSBprbTk;_2G^s@EN!Ie;(_X;4p{wTrEB%z>9t$HYXA7W@rM=VVb2*afB)|NcBF z010|109VNsQpXU2phZTRPcXK(4Y-X<(rd}w0e{z++gPp1jeH*Gped@6H^EW})_0#4 zgL@D|f`JEFeuyE_z`AT7)i!%7d0r5K>-6V@1G4ZJ#u3i&Yc#MBz$FTV>lK@$0*n1k z8qx@oH%legeLpz_J^C(Q{WQ6G@h8o8e}`LWbPrlC?;);-wThsZR_G;jw+;KTP3T@% zWz=UtLgC4C0g=j_{1Qu=I47hwA=A0QGz>ZXWjQUx>tFx=GUa}>j(2ujF)RK)29(8i zs4^g^@=E|%=ehVOXhk?zve9CYD_-gPaL?5Kz{B=hSk>s+SFHo(c1R%yUoNmBl{9%@ z_)Rg}vS}nN~eerztv!mZenobQ9;o#w64Y!Y2)4*Y~g90Iq znai(VetCcAHJdl`pPweU5>V^TrMgEji_<2bJ=4W%GuTZR<0~-G;V19d|6Mg411Pap z&sn@JKN4K#u74DXBgYH3Atx-pR@^zCN>mY*@&c02X|fRecMz*+5S8yP_qz*&7sX$5 zBGQZuL&GWcJ}q_KJ*n#Yquye2S5)`7aCX;EmJUp!0Y-`dVff~7pJU4^8He5>)wX6U z$wH}U>xjoS#U!e%$S3i3nuE!R4 z6w|wL%q>7CG@zVIR#20sM-?eTBiT4RVoeaFN2h15SYx@K$yR@t1g#V`bkPHVOQRG$ zJWhSoxm>gO$AJMXvX;q+I2;vi=4V2q8*@`HNAK+X_B?qfDp^5?AQEExOZf9Mq%t9} zeGh<-|00l+`Ls`rB&U)dHyGxpWGDj=RB+ed`xDaigmQ-_PqP0n3h4=2jHnw|muOTE z1_wOFcvpo=L8ue-Ej75dODe#F$a+akG3exKW%%8pXW5f+?s&{v;0fW1$*AJf_b;tG zbkFF&{%qJ%jPv4XKA0z^4Y_A>zBZl{K6O3qrp&>9)j9pPR2Z6S5Vn{FguX!VO}+htBkEzV%Dvw4#+4aeGJDnp;=BH<_WTGSL z6fjnWVsk@BpV}TM>oe|J=`sZ^@~8$gsWo-&klD(MTKTQhkQ)e7pb_gec~#In?`HvO zE)5!TPrh8am#h(2qrO#?*zslNj4Rv16TWVq8OKZ}RBqO8&S2guW80?tVz2DUFQYsT z3C^~~Rko%i;DzH17xIlSS>eNp#4XGb=Uta%N<1V4Wu2;NF=L@u1xILlz&{ui1d?Dj zRB|gKh$1Mqd0(7r%ks{(-mqi|uw^a)W)vsEhg_RQDROIW_`*Xz@zY#^%f+HZd>6tX zafyPhHlwX%j}O-eG`@$2c6E1Imvo*!nkmC|79uJ-)oW}mg?;}O{9P5^DP!A)yef|HoQQVo#qdBni~ zu=MY~i!$Y|W$Sd%^}hF-5K8q>u?a1pW$zQ1T6C4}%YdYlgcP&`evG=@0v+no@TORC zcF?otA0}c7Vs9at*U0$`bp>(S@U-Bm#?*}#U|a7+!zv#ev{|oG9aZlONi5LNv?3E1 z;M5>`fFS8$A$dA%7T-*R+n(A-!zxO-WJXxS-gkH=s28&41x$?oFZ1_%JnF4}g5g5Q z+M@@4_)FvOAImWo}`!{%`nnz@|z{199q4>ZYT(e6rql_J9Cc)Mp%v&OdIg8ea2Ad%$ z18E=uJ}{q}WCTuj{4zjPSJ-836*$zN-(>3p4S#SoibDWx8PZBLIYYSFL4wxO9k~TF zcbx5Fq((}wB>J&3%+3U?u|YR?H^$Up^QA(eDYQUV0S#h;opk1RNYWTl`(R=5ZHm>$ zh^Ts(S-cysDYbMZe#&>~{wMz-<};z`k*U?oPQ80K^R zsbN_OSphk;Qu7Gg$E2|lBhOI*t;aj&a6oM#UKw)JOS0Nrb#8y=*tTkgMD&=)HW@zZ zZ>)dvGxdn)PL<4yv^((dLbvmhsc|%sw){lEbtj8}cht@HMBYV7Tv67No89q1`mK2) z6bIP@5e+ijk6PEIWlSzKR~-i)6@zWP+3V40o0y~&H`yRsDnm+Ccue;fJ8Adk&Mq;L zlAi#s_Sqt$@)329slTRGo$sCLSjgl(1=(SaTmsD?GSv9@^+6g0mofX=TU~Z2)ivcB z;~{p3gq2tzHeKQx2nJIqXt1K}qEfclN`<%b=R5Pg$xlol8RbgcwRmH7z51Uz960#*EI(RHGdFRj5T3)HD8IE3TbmlxHQnQ&{kw{xF1!pP zg6jQs%=b9Umzlq0i`(4JlG@M-V0ce#-WEu>p8PCeG~VLHa5u3oln6Q$H^At+ALS-Y z(fv~DngL>Ay4aB8lzUcgj=6m#_V{JYX#T;+C}C-BcF#S|ozm{!Z1N_TX(cll8#Wo& z&2(2K?1=E=)EmRYplVvz9Sss8uikXWdEk~1%hJc81O-Hs;#)YqKd6$V{|5fiRl zzP~7$RybGTlYp?p+&6TqDMRhXBvJlKP+-Gevu04x-YI811X16OKTh=vq;Rb~gPG5{ zm(p*xsdKMG)eEhj=^vAI>B-~O*Ytikki{Y}%eL6UPF>yp0<7spn?&oR_`cIN9M1!x zQ6MK1R&AE^x=(kh7#2cYf!CD1R=FbgG!AX{Eg$>%MfGH>ZFLe|2;TR~J$M-j5iZ6W zB%brKK8{gXwlO!A9b|PVL)&cjmf?9q9_~gTuTbp-R(|~5sc|SoZ*5WilBjV+&D47C zb))@llEFfvKk(zHQPg$CgQfJ?J%jbtHK@+X{Te#EFzdRzvG-Kl;K5ADT0~Cm-xZ%{ z)^6$UAnQWJ4 zrd{&uUo+lX{bYP%yN-6)vW~}wb<)iiUDogCG;V}tsPyJv>kvS90GoA}y0vlcZmzET z6s#o&%!VzG4wsStskuVEOazf8C|XXpIohBob%Ko}*LFXl>q@1yn5LZ z7q(G}l4;Ws@t@jpQjT`vF4bD)GGv8Th59sv5iBiLiTld@n^iIt0 z@5p~@0)82^dO2Ic{fgHsEB0t_NI7?K8rC64qC!`G3}ixlQ$A0hEsdjia5ssbXJUS< z21e(`k3U1X*Y@0L{F?@`Gx8Une1*KSCkg-qai(0uDC1u>IxFZtKKG)jc*XL5E2$~j zbwiWap-$R-IuO1?C8P344Y#6_b#uw6wwJfJCC`@qBO9fj*J|SaY-h<&z1@wfMYIWl zxw|QO7Zb3$GwU|3iJ{NTk3Ll6mktZO>ifGNlg&LaEgoTqfskajR%okj zmE)priwhO4U`DPErJb*l$4KcWQk_Xg{7E#b`dRX5gYyxR4;Gpc8oZf@j?xbBopjDt zo2#@gH2}lUn6nQztD*{Pi;*?`J^k~I{0*Ovuq?D*Nvn=?ZO6ruqks(=+nsP_Jsy~< zPi55uaAL_i)!|cNKY-$8L`<(VukGTl?z_W=_uyQq1h5>k1J0m$8D&+Tax%Gfzk|Rf zT;|O_PMb)@?w1?X0w{FVEGn|rK;F(8(uss40>dEp!g;TA^C4O5jQpt9AG3Y#Lt*`i zfFfB!Nwv?jQpf|7<32xbOZ~+F*z|7 z4ZgT@kwL>p?+s4xp)N=qtQDfV%jh{lh0C;K%n?BQNuYpY>d5-Q3t%2$HYL(4B`(hP zbl47%z7RI`MYVbcZYP+^bTP@%5{acnVRp0=Ed7sAt9}V#4&kQ;M4M(lR!-pdE)o(% z&>CjfWHF!&nbFb?w-0wv|3|j@e@-;Au$S1iJ5qY&cz;-raeT3HZ2+ch)2rUKo;FOA zwg>jrfCXBG56D6o|~0h`}2jsk}qZH^nj ztuPl8HYU6`57iqeB8nGW=s}gA+5O?YLq<`h$OWuOhrN+Usiap*cTq`sZvWwB*>nXk z`)t0SeXfsW-8fc;T;?K~x3SnZL{PcJ)thOnFJU_1lt+U&N7th_>1a53@dntz-k7yD zb4Je2uP<*H8Q;D^Mml=QAQ)uC-9bYoc_Uxk0i)O4mG=D zrkUVlXK}qW3z^}ndnx(ew7cGi{-X8I0S}3~^ygHTD8_7_k{jL&i=Ezs4 zHf((DN^`LD-QX%9)SG2neKuL=Kix?8eTN@5@MC4e2c<$-sJ>>ub2kjxK(~h&R8)lG;qFa$=#Q%|?!hRUDDH zA0=oPwy>Z$&gZp=zKm*>eM|IKOd*qDS}jzie}iko_{I@*|oiy=>uo55O}S71O8Q@ooU6p4kS7_@VS%0PdLo zG@)I;eiV>C-4WkEwzdTA^@v^PF;00=SN)xNibRBYA$wpi-e4yuCMK4SaX0DgZ z-%xwJ`n6Fk5wzC{e|nMnWM6nl;LMDs;ax_v*f=EU$V(%UFeGu?s7Ha2~{BmvlqNgPS@+`FJzD z%y(90L@uf-77bRyI)0Qu%lz1$INvnl6u#dJbnxLqt_s+D5BZQAVh1N{X8_Dl5)geB zMZyNIUd1IM4jQnzM(EOM4?&d1?sr|;Hz-T-g${%$N;>bs=M5P0X;qK}9r{X&Hn}OQ zTzk22OZV`fhW54YH{KA1fe+4Ne1LIzB*YerREY0z#-%vHJb7u7^e%)?im#6sagJ<8 z=BM{)A8dfPYmV+}HYt|>FlCDJ59b>(PeCG;%y@(C4~$@Qa|EC-h$}y(E0oJ18x=vH z(cQIOwUz9>o=!BD@YUwv+j&GWP+FJJ?$24&@QL6$k?K{TDc1x#ne{Wm&5sCdL5rmlNABlQX0)ZI z6ZtE(!g`*cp8_>a3wLWvSRh#F?(DGFhW^v?xOtioHrWqm_GcfGv~C$Zb-E(fEI-f~ zAbn$1pVSFdTtQFdMxS4V;!aZCRUf|msfoon%9!(=s&Rj*k%~*F>ppaOo|xX?6mW zLakQqrfrIKA8l9dlA-1$2ovS$FKu(}(%x=DmFX1lU6hB?Nth2sfE@S#-H4h{{CkeV zhzLstA6wM(QPe`7IXtGZFZKE^N3v3x2}5y3g63Lu26R87ANB!Tp!svEfD zs_Y!wu%6<+t=<0?OF#N6-^F?>|8yO)+b9MncqQwxjyJ8Yc_bE`;^^0~GY3;Z>mXt7 zJ}bseM)+%NIEOa9uR5CKkt@xn#l5F)z_yYevkOcPorargJCRUdmYWy4gKm{{3VXas zZ{uW2N44ljxxjK?THamig@m}0#zj6eprVMaV%#M1(WXl6$*B`o65}8S`qaz;SJggVDNw@FM~P99rQ9!3n2foJ{pvo8 zNmmSfMBPQD`8$kBS|ZU#vWN;B@c5PPGyCvs2D4m4>uL&qPSq_Xt6!jVC#U!BF_F0iGP!8hsoQbes zsPMiXr-A|vXaV6;%4C#`gX%A>r4I*Db$#pKmIO?9rQ-V3rJ9auNU4@;ixJ14HM8NN z6jClyggoT*$!so8o((u3rJfn@Lhu$lsVV$qY7W0@S_!^a@WTQC!EvOacJ37o_ZoyX zI|VIz&y~bA`egB-L|P0{B{a+kJ5|z_%0qHj**gCkw>&R~QAO7wS?$>EHvUJizh?w4 zc%)l)i@!5XACc`yP~ef$pb-g}ndx|h{o}34o7W<)9xt8AToQUrXirc7Qg-gIGJI52 zffb&ZU-$WFB${fuFgr2{s{wS1x#IJsUkxKv`Fg0i>?`Jf48;ax%A9x~llFWQd{jDS ztEC|)%~U8VUpfHPubMWcz9LRh`w<$MVq#(UHq!x%&=hznCcVQ+IluwCmV_U90Y$Bk zqsJY!(oI-Bbt9y5wFH`GnB_-eDSSoix5@Vn$s|%{1BvcV3Mf^8(OtA$<}M&ICuSpn z9920{E_O2g=z9W>`xl#jSpFttm;W%$J*+g%a==1FXv>#W^G&@1q2>8*Y?^>}!?I2N zJI&;Mcl0z5t+$P;1MZ&%e|Xmm=`CoLo7RuaHQ4dVeiw17Xq+U|V_Z^b(Gna@t8i6$ z1;8A(xzbYF*98}XbAt-)2yZL3OL|-df#OACX|VQ7@mcCG`kT%V1ZSWQ)qJeI^$rDN zszX&;0duQz#g3Z&j>d}+JR&2%e(R)$KWKcrJt5=BjP)hgSVwP2zfAN6I*bWL<9vQ% zAt9GZ+LexGTHNN!#N{P2>H351z`QS^O23D3$dCVvnAe@Qt`@a3R_Lq&Ct&sqYu_7U z2fbfAr(nR==)sNFw3838J+wSc=7#r3yTvDqEDH=N-aEX4Px1 zxCR&--_mFB7xnKMJV%AUW%dbl8qXG|r|PZ6ke6SZ2jw#wfGNTkw%(x`*GqlJOu#f@qeRjS=I> zi^26jc#R1ue^Lw<T`#H-9YDNrpz2!*}jsnc%@vClQB zVOT=6D*;{S0R&_~Mk0RbK@4n^ThHalW3Aou{Aae_b*=V)Xq?oAkx!V<<`6<)8ojU} z#HG7i9hRxj_fZN(+V#(kFs3z~NXzGft(9BykQf{nYa|Vh*}IHFqc_ZRf=Z26m|J?zMD(=7a+)zP8@c^3p0d zy&jJ}Z03628Gr&%ATceU{OEcfyD_J`e*63;V3qZf6!AUZn#!R{T$f5lgx@Nn&B>&o zD%SWplA?_0khEsorA32P8f}8xU13LGJyN1RMu#XS55G-Fd2q$JrN7dlE9wy;4X}K> z&!!~vfw>9FiziC|bxw(}F1<@XU8kheBF% zW~6v3p6BLa{v^T-Lm{$~DAg_^miGiQbwhGagq;CG5;ffh@UX6f=c^Z0KAGRiITqBT z<-j|A?0~i3DrhMH=u;a~OYd?4$kR<&XKJD-DPR*!8^wU5&3iv>+z%iwzGIrK*k*HJ z5e_--XRL2>ezwvm%xS+ar~JwbZ(uZVE9|%6js5#6?^VAIhiy&l*L7u+ylk(*EDlGK zfnQ&zXdE~Xf2%ST_J0Yr%}{%wxXl2*dN54yVa*P@h-wfIpuTKynEm*ngGPc@fk%~h zOAJ#3?^BHqutcXNLqf&EG&{tm$i~BTIe+XIHPq@{D6~akma1$#rfK<7ta@W0P9lGzffv>J--;}1`q%75&G zX9WcRE@Ifh=D8S^>R)@adm<%UyeghBpcheZd;5 z5hK9ld`vL9G0hT9^e!M8U5JV$bV4rWLzhdr<;Z7?5z*>YdlvAl135ljoc4$qrlpAx z+_>`+p_$bWTl5BfpE+9ogJf=qZGh|<#`wMrkA7tD`Hmc-B~1bg+KhIl53osqyxJoD z&sc)JBN}wrwP#ywDRC*D1%KK^)|)}^^0>sp=)+ID-|2WsHBvg$itdDUQsvWj?gX{& z%@}|UZ)(^jWOt&E(8|c5*R`3jMnXQ2Vwq?)afZBB3V2L^Y29>+lM=k}yrlzoD&`N; zpT|24mm}Mb?U68pX_>N*$fFjl%gvS6U$>-De7*d!HT34^V9O8eBPNkpa&dohhM|V7 zf%x8Qu5j6u9SVG923Cw-b2U~eCl#u5J74e&Jn=`Q69L~$DPM2|X6qKK4-WKjG*4Wm zcWL-FBzCnH-l5!udqRLJuRZrL%bt9X#=7WF-5@N50& zZKRfuJu#Gozk?w{Q(zr#%*p)*-pe zfdru6#cja$+#ATOnMJp z?DegRKxRAAU8`KSQ(jc2-j8;+nGn0pY|461Q;kfGbJERceCd8drjCetHYq$m^=Sow zX9H#dMT6_}(9~NSA?eRYl4s7-Ej@#|&*5qs;G6BL(<;wWnC)uPYHsD)fhb1#LSf4U zf5r9wuG}xNONtIBb=zS0b!Ak)f16a|j6VfPMDx9C9el#RyFC66yXM`t7p|TnAD62TDIJ7+hn~T5Gl{yo% zKNASB^PKP+?E!Y&Uk}VrZu)2XRKP3I+<47a%wPYvV0&VD&#wSYYfy1_7b1u-4_bF3 z8KA{a#v(kexfC(qAx1a85v`W@kBr3L$Zt6 z;c4zA9Z)PSpQr{uJ?2;blNnqOvpwIA)YcN|@z>*cH@vGe_UesKLcoq;0ITByP_-Ml zo);}EPSY$UN;uxah>HbJNPlnlmk5ii5|{^Pdz9eiQ<5WC!xbs&EJ_fDwQ}Xh)j?^| z=7C$(F2=AoO9RJqZ%IC$y>244CuQ0*nzbxdX!fj#=ond*J{Sd(?%&~<9}UK z@?}`_OtG1`M#5SWVVPJwzA@1-Z0aoa3=4Q7=zrJ`EZo^m)6;m~Y8LK#R(;aleOo98 zcAv-`)=e`EIgUVFR9{ZaqaRGtnvZj&Ki2IjAx#aP97Gbly8dp8T8vC?!>c-Ah@F0x z(S032Fl4U^)wUQB^HX1(3d>zK=1E)@#IJq!JWYJN@w>yeP|(c>a_qkQ47Erb+QBzL z!2_<7o1Bh7D2CE*S-F=n=t_4iyN5`4B*DyYj(kzig<@s&t^xK5|IqWKx1ZRMWm5VQ zGsW6cB3pzbb_Szg7Cs-{0-hraVz3 zp`zsd5VFi6(?j&23!MA~gWCrKXLk(}z!@NCpk2*JH5BVYhU@&u>v<8+=?0}-uS|lJ z77l!t1B?M=d+#*9%k5=L!JIX2-?o)aG;=mrW69d1gk(o^IyxfhbJI-cPH-uSvT4qy zsFnO?+S254tH?T^WOCF@DjvsR;$wlH1usLf0{4J_grh%Jihpn7Al?$?@Yn)=6W+HR za6yjGgt+;6ik)_TV~J${&}n{}FiXMKI=!~Mdi?f0ZU3z^1xt*FOM%{S>XKLVJm3@V z`>~c+?&eD2tK*j{k=UH{eD{KWANiUDUi^NHc7oz5gDZo zlKo(%HrB92Ga0ao43J_CPD*`N$jl8ZuFpEOGoJhWPzji|C^IOyU|L;?NsP2~SxZ&yOv%-0Ya>8wTN+6mR6#NWkEROQ|pJFIr#5a&LKAwpSK zY4*v`Ck>}o9XzlGP$ORh)w zu&Cum(YdX6TzS-=(CAdHYZQuZ&XDtN6214_Arx&^N|QUEZZclNvG2MmsjG-W5cF{F zuq~LyfQ^*6wrI2*Mqc}f!k%(if0PPnAaZrYXC9wrzm*}e0?7VO>6Re%+SXe}6Un=D zG>SDs`wst>V}RZjQ6gNSK(R!<{3_-vP0mkKS_kvpReL0R_evKhP{-Wmbdi;!s9<^a z);lGW&~%uYjp~*zu$BqQW;21HEDLZf%vmgrm0IQ6sSi|?l@=t-%aGQ~m1pmP$v^)+ zqxVBS`*Slca8=5W5K5?4qG5&{^v&SI=beA@*bk#~>+1;z2X=b~BR_!`20{RF<39my zxCG=ZsPaizqiy%6xGWL0om0igcG*$`^fFj46L7W>D8AlU@sstklR27uF11^HGMZGzcBPD%q3 zgyb*iN_u9V+wD|Ad`Gm4%H7kLF^^eHBDx#X_N|$j2U*!_QUX}oo)q1`i9u4x>*TgS zy}eGqU*nGx``P7rn&w|2LMsGc90wv!9kpmt&A&AOkon!gxm#V|?$KAEmayKvdhZr; zw5+Ok(zm51PwLFaWm6r>YlS#VK!E2TF(g>qeUCU)bs+1cB94I;d$5#Qh}vo2S+8Pz zia8n3^IQj``F>AuOHfto^5<@{TM1sK9-7tj>`rA}xjg@PWJiE#;0o+S00n3R%3tW< zS@&DrWMjzcf!OvuHcbw=TKjPz)_AO-=hac5kjsmLWIlVKZD1UX2fcPI_Na#B1ke49 z7^*pxH%0_L1-9Dp$|B8uGt%BEb-{sxJ=q*EM!7-ppaD83TTasf#Z*J*#Dhp?<8|F3 z2{p+A9XZ%Ao95HlUiD%5CYotgsFkVyH(Xs|R&&JpFbtR#K3+V?N&MBq4h0f^E)Ile z{3$AfqJm0Ap>KXl$*L2_VhXE+SHsrmAI@)%2D6v z;fo(l^CzF%Ef|KULxJOspICcj<9?DrAX~!1NMz(wyh<`oBebZ=iUpA-c9TT#jZKe9 zFK+c__Ek(TYPD!rHzAeG=wqKKnm3v7nSD7u=`))d1r&{}sSjg9$kw?UHHqe(Mg!s2 zR>q)8clr+5&mVJpTHPbs6P610YPMZgg9Mki@@3B;r?THZ#ay@CueC;tF;9lnt@z1i z!~)*%LOq2~*dXC3vW&W1SNfK{R@&A^oEwm)_<5yY5@X%5TF~W7-4=yHKC>4^*%qN7 zMEC#+m{)tkWn7C&E}ueePb<(!T#@XydAq3=Gp7ai;~tonO~n?DQaLb#$;GhYP>EKS z*G+?{p|+2Q)%S@mgBUa3sz9)um>F}N2 zL?IAVhUqM;FSKj6;kHQj{(-kw@l@jIF{GA5Ri_!&Ji(zTmBfO7L>Kko30I%@ngmSN z4~a*+fmI8blgM^r{%YA{$vR$B# z8c-lzy+RwgYp|jgm2In!TYCW&vlU#loVT)vIFE#yfgd%bP4(8=9k0_CBj0Lmx zeQE9>NX`;g8N{;JJ%1ZqR+;<<)Yv_5ugGclpSr!z4eCS2JX!P)IpeWsCa6sM`1r?@ z;loCc!s7x37RK)vkG;2<_X@k;&gavt`u`Bw;5jwW!4WtyzuuRg@J864bYHe@4ki-R zalPvhU$f1PVk6mTd#qw&I+C&UVLSk8Zo`F$$ZTMQ4FcCxq$ky2LgpxKa6#I`m$9#S zGp>r%;o$kGC>PAIYY_4i3I0lgh*Op8fOb@By8(p-SgzUO@T2> zA{DxtWPMDL|Dij&xCAuCk}H)qcFVR`zWm(>EH`v z1`(iB^z%n6bIiExQTJ=AX;x|rLXs0R{ z5>Y=*_;Qi`_c-TBANQE7Q~W~({wXf`lm#4cF0xQJdS)5(m{QCcg66yz9cW+fC9_!^ z9lo3m1rX%w5$77cV!t9=-btMh5H-%B=n6XW$^7G=nK(XV34O^&fLmM}nEV0+=3c)~aWyOmiqpV*hi+so- zCU^1K*ri|OxT9PoiVR!XArbOwkym7}iA#4Oz&?rYE2<`;SSQ@t)}>gYGDra8zYGrMQbV+N$z%mXy)?aXOi4^fg$ zcwSFJsUpsE6)n1)eJe#!+d0I44V_f@tGch^iK(XSdG;dfm|NQtyB?w@El3o4zCa#- z7;@Z~*SErF&-Q1V$S~(faa&4JBZeXaFx=MBb7JWM)4N%5shln^^zj6z60|7_FS6e3 z=eB0-3R|Aj)TmHO8s${393q>mN#~=DFS|&p!+ZJ&u>OIfd1W2_gCEpU`sT4xut@5N zEW-~W`ZF()?TRBc7aFubP}qgr9;lY=YyVfRKc5h$}R;He_-^u z`TZV_WEL$hQ*z(?SA|KDQUS0IO7m(l$HfnQ(?l~6_=rC(1*}<~09So_V)rC*XkxA( z~ z>3l*+*h-(X=>^TvN*mf;7XMH0jRAU9<7fa3J8zK{JRag$Mvwqd5LFuIBr)ebhuX8e z2EIFMzerFJ>k!r^*n1%>rGiku^E_yznWwaQ(DZ%^%|MU&N z_WWQmo4E01h6{XlsQv77CddroBxz6j`#I^&%FV0y)r|KQk%M_V%P=FsI1q$UwoXtdsZ+~VV zMdh!$Y}%f6fgT%`v%^H~nXMmr2TQFK#b*2NN5rJ3lU~N&T142)mzF(aTYUzLB$#L* z3h_lPZ1_DZ0kZ(Oxx4?h!Qultj2MY8x5Nc;tDE@$$S~;t8cQ$%|Nn#p8@>oj=2Tg; zGnCi+RqCv4`8<>DLK9k+G&W8YzvP(h;ly{<^|4QQZ397Qt#+?i={P`=`g$mWs&{0O z=HoV%r0lSnruLD%CUXbZ|b|sszDnXp*FlGB$F%sV3$M_3^1iVZxDv)`tN!dtgdT}Z@=Xi*Xt607X zrL$`u1?k?vh;Ms2z1sT)`MO^FUh}g!E_0WvN$X&;4UUD;>o<74W5_hZiP;gVNOJn; z!#fS$_tVZF;gr(quyjr$d;%@6qUH|b)(ubbiL*S*XVVslaRTBbA^J(%xQ4zzQIm~U zV-Q}=y1(eBLAC=h{|8@D*Y8x8yt>-cG22*K-7Jl)DOhkdm=(4bk>A%wB62^h5X3II zwXL*OCmnA)9w?BvJJbzZrnhwS@#71^29pgy!J*jzO6YfSVF$SnJ!RW&WsZe7(5mf{ z;)%`V$(=oN2;DvC=6K**vh_%+AEFbqMC&IO6j2 zhZlKP?cZ@*WlWmHcE4Ha5#l33=6J%8=NtC(QX)xk!1}sbQzR$f60r#dR>XjOp2&iM zi2)+K#RjM_zwB~StkNx$#6FM%_e@FkiiE7`2RZ0;e!TNAo=us?c$R!k1OyEj<+I&= zvWHwiNn7~LUVR0y4Z?Q`XrU9eR)C10OR4mX2G7KgM!wrL*(|8*uaxwi;jB`X6+WJI}T+ zY~rV~56TbbB_>8F_mvN*40zQ&v5v&9fF*Uvv;eu~2lhu|Sdq;hwa8V4Fy|WotI_S% z3Rkv?v@+ar!p-dTfZKXG?mciE@i>6?+AH$hq}~5c@lMZ7pd$=Jc4dOIdAM=7__Er$ zS?OevkUdtS6Kd{7C)8LXt1g!(NFh8DBWdkaogSC89VVhxT3Qg?n67$NGDha0#`O&3dTHNrUJ;sYB-`nFu~b zya{q*zvH=Akg;*9n!>KVJ4A%MBNy0qZ><)gLQARFdwm_%6WMnlNgyy$)wleL)QhI4 z5|#AO-sLNde@kS8AeXjoRPVe4w|08T^Q@gp;=0k{BY&Pm;AaF_N|_`9KTaXk#KTXx2qsaOqQlhH68Xb_rRwZw!IR7QNy zzq6ug-Nf3~1aQ#$kH$XQH?Ys#Cj*B(sa*8eG(M`S+agHOTiYaJRcGf{CXtN!29b@Q zd*t7ak~0wy@=|8((M{SX3ayKIY#F&8SzP!Uu@--w>S68$T3z6N31q)*T;1Rwh$!6t zbLVWc^^#(>3n_|^t#^~Hh{g&5#D*yPV;{Gbbia)Iq;pw+BWwYUV*!+#Hw z0e}Ni^nBp!?w6TqRav9|OEJ=CmD zwR*i4*O1&kp4f$V6`e5)Tb_La#L%S+y&D;#>IU_Y$@un1^I~$!0K?yJr^_uiCKbz0 z+D^8Z#>8VAi-FVGQuNMM_DuaF0fp2=84>H_K|nMYC);A%l!Feib^^<4$!PE|h=%TY z*6TxH!5UF~MMhp+W38QQ-%}{;)(h$QJ$tmY7j=!XUd%+TaZSv5U`S`%1iYGolY_A*mGqo#?V7_-9 zIITCFvI$Q|DjRZ67ax+1K*9HlKHAve(V8^iM4rrLRXJ}8QhgKbCH~P>=tf_Vh&l0q zo9}N1dUMrn`)r0Je-O{9uz6vQDZ^o*8p+CFyEjrU65TaT*c(<5q8j~v6 zs?TIcw?H?WMP#R$N^)?|@(+;e?0@4(Hv5BI{yiDR*n~FVNLp~?LXm3S+M_!D2nNbE~S~qn6>o`q1ZQ$F> zo9u0q^&gr0@fP)f@PM=0}88Ov$Bk3$}5`VA3ws_e#*VxGtDruu5r zuG8-X6CkMjno3EBbcH-cE#tbigElnl(n7((S&BjS*}IUMA-GBV5=lIqzA@^4wxM%DXD|KD3Ip{l?PQj zYBB8ml1yeHU1Y;RZ~sO(T*U`#@m(WvKig-hh>Qe0s@SRzSdW!2!K@@kj@m%K$5h*S z&Zm5-f)tV#%0>MC#yaQ*k*#xWBZ7~j{ziEI{z@Gpk|A2KKKbj<<7?GCyAwe+XW^he zS%vjQi(U263X&viUQ-EuPc42=Eian7Zk(K0?Dsd>G%|x9F`2_;D`u7(vhi@KR91b7u%U}6!GQ-TBP1H#qm$cg+>Z(IK@1&V^&Zzh zxj^;rnXsJ<;}OHn$cb_qT&nO29e;Q?^zq3X%J0hsTYY;_AsRkI-%@c_n)9|+`p9yApBk;OyG(vJf+m{@rM1-Uy&drW1LZ% z8`;LUtH-?{5jE+FPc-qQZg0-7AxWj_b4$m}z8tM`=UbV~OTlTjVvgu;*1I)^4x<6D zfNLTG!j_`qYW$skd3CMTg!52orjhk9N9G;f`*su4QjTwJvuNY23V`)G!VUvIBg5R- zrAWZ+`!<1A*Y~XP0t|YhZf=6R?Y*a1YrvIn<=|$AwNS=E03t6t$gUi5p^=tiK|j-_ z(ql)ah(!lb-t5C|s#l?8Wwt!H={@()&~)j0VMle4M?6i^F;Y&cU>6V2V9j z_mis>5`p!=g3g6Rk?Q1IP}L3m8J+ZVy* z{a=mjjho-?+j8RH?d8^bT{R81gPlGd-DqK~9r+mQwPv62`{yVo0*IC%cDodjEsk|x zMo++#?j=j*I*LknW$L9x?79zx9fq)q{ZGlELgz%436DgUs6n)o2f+P~nqT}`^mlX$X zO9*$_qGRLdPDfZ7TC*z)v#w;};5L#Dcc?Vtgpe88lB2<;2%&%r$sj)9P#O!o(#ft# zj$gm60q;XWE}oBfW_wV^k>@e8!&a_R(+iC>lTcZ+ovR`)VQ}H9luhHV)osTmtRkw= zW_Nug1fV*||5qfSN8tY{azj}WxP=A;T1?{#5AjvY2r~0n`#8KdxENNp1bXHf={5LC zAuf%zO?S%=bFhJnH}$j1-}xzixR|_pV%EE|9WKDfD+^Eu;#CV4qMa{iBt0hYKFFy{ zGFFass2eXk73#a31QJ-m-W$BbOJ8+I)gA@81Ee}&fSD@~xhT_XC2K7o&b%mR%XpBo zyzk;b5>GZa@ttgk=9bVG$a~>_VjGw~j)Mpv?_(coW6IxQhcyNv{z8G7f#KC?m>@28TrrvsHGSeCI&MEgs>D71l0b5_@iImeG%z6B@M=LvSPyADJM%x^L-A#kFA;e@Zf%G4?udV+ib|qLB&-i8N>cXJr<@J*=YW_Q z0L0Z_m$=F2S8upt`{YK~>lN#`N60eC?9PQR-sZkF5abts8K5%|Ue?p$ zEs~9I`~?pI1i*`4fEZqDt_*a^%rh%%FkA4{LrRNtn2>Ez=Y=`h+_)~QOPar0v!rz+ zjoZde^}STvl1trzy1@6Z76|%+=rimqHkD zf=^3+aQh2=^B3T|>gW?vPx|d|Kf`S|a04{bdsuGP?Zv{*XT;ziFN@2%aMI}4m`22+ z?^Pg+pDUlm)pe4_bmM*+?IslynkL9!I{NFKiDCw0l5Cd9pVR1WXwygtc${%l=em<@ zcElXqbc~b1bhcnIvU4wIa~YA)d#rfvsF0g8#zb-4Nkb`H7{E_&&c`|wBiCuGTj-YU zqBq!3OtQp~(y+3lvCU7MJrOk^hqz4nQMFlGFWhE&u@i()lS4b*#q|}CcP5EP2hJC z?}gZX&kjTGH9I6ejJbW?CL;+;3T{&In+fy9!i}DeRMEO7r!P$Zs8cFT}A)}XW_0H8!06r1K~>^bMgV|uwUU1UOV-(;=zusqAWq00R) zwIl2nI#&V-us{Od7sb$@B#JqSMO_go*7Lpy6*-5$veY)1_wraL#Yu>VKuqa-#;l|4 z;0wXO7K+mrbajhe=>4!h-mIFCJB+nGU!N~Sly8kJaugm!vd5UgR^odvZvZ&v!EARH zS&YD2&+d@eJZKjo_`GEcbofdQdXO-(8)B2+cA+5vhkKP?}^MeDpbiDXIc0uBu z*&s;5F`htI06Ml)R-&{4Q6aGXM!pp&+|4@t?GiY%YMc4=tj+WdwCNH?Tyq?RJ9sGM zE!Xf8#(kH%Srq3-z00?axC9!7N5(un73L||DdVI&i|0Ic_Y04_Y|6JOxdiG($pGWw z`;8W+qvX{tW(urBY4r@e*n?(zsG?E$$TqSZ;V`CpGB3_f@yy2?=-ZsA-36-tT^!>m z`LNKlOp%I|tjm{o?(mPY3AAu9xJBJaExBh?Nip_@PW5Copwy7D4TCfA!n`JV*$ z78gTCfu*u9G|KC~we-<0>a8;$$Z1<PrteLlcJ|AYa?hZyR1@?6e(#HTH)E;R2}ZsIvE0!GEp16UPlO8n`zUyLiN`7k7z# zZWo0<*An>#oa*mxz?1+s*m*b+m~XBsmx3XZ=L#+ee8@(ey(DAn&5x(I(d!%vv5A|a zJMCvcoF*!v_DSw*vPj4xW){GoV_+j-<9=47ot{;NPh9K^f}v@}xR)|3R2fPpF`OQz zVS-!y#okyf0n26YC$^r0{xkdi)fe~lS<~l)K4D^jbydDVO1noMmScI3oSerG>@r$OK}QIr zxt+@#&J0))T}%cCA2*~8#j<_99CD}*qDQYYdLWg#3x!<7uL+~i0k7PP=TabK2gCAl zw0%{BH?g=X8C$P@L(&eBwzA?TCC{4grd!y6q-E1@M?7ptBPO4Q68eK5$&T_~;YE(} zZ5GqO*2__O<%FbTnkF%XD9fb-Tv^@Lu)RdyEkN6l#S@TFXaP~&waS_m7zP!v&ib_I z4EVLkm`?60G-Ic^Mi$uykQ^+qNg55jgn#0YlO6H@NFVcZuf%=M@MCpui9QImG{#Nc z;ma}wZ`UnT%c`?zJ{f)}<2I=%v#&z_R}NY6B(A&=f6D8Qb5WTL{N8LwZHd9`^pJxt zBjW$X*I7l?wQY$uSg;Trf(LhZcM==|1h?QG+%>odcemi~PH<=A8r<2qZRD+U>Qb~~AX$?LYYtAu;^geuLETn%4C)(5eZDjX;d0coPL-TMk(HDWTL9oOYPvj=csKi## zUrSk1mxRmKrUz3oa2sa98)#-*$s!bvzh}sC$W2P0%Psn2Wt>_hya*VKnP?nyQ(xJ1 zzq^#%a?!8Z-Md#@KdzdBmw@gGU?7EYL# z&fo5Ik~FRyADpM}B=r+be29yEt_68wIWR*skECNwM})8e`9fC2Tq@O!6s)yCH5xoO zL+{HCa!pJn$F`hyprBO5blSwsgk+zgRyfS;7o1!si`?FNis-*GXvkFz z^|e=-v^!s>OrX+`yMwFp%lXHBPk;sc5htLgb(V2BeagWNfe{*iZ42m z0>D2|(5aQXRGgYJAW%y#uPKhz2L!H0&&XxfFBh=CeZlqu-nZB(`vhkA@6bvpBfdxi z+)_9zq(z#_{Z+vF)E|a3DpWP(*ao!mz|uZPTeL&o7fdJmT^~HTnSQUMTvc;B3fIc02EwFEq)QUo{dCsp;*@Kk#DYg0l7Mfd<>42xQFAlBZ2Y`L z?C-5z^7&+mu<~4Ppdy`Ys-1O=X9{5wpIv-xO$1q}w2mCyw-bjNWWzW>D5 zsrx%lyA|%?@%(11-IXNHM6AI|l7Y5eY##m3RE=prQ@oR`F3o;(w<{MDLdyqm*AuAu zhP|tsyX)mmr!(aF{>cpdTP@k^-F_r5iP`A!z*+9mx&fG*hl1&v>pj=q2jqoo@C?;^I--yvAxsv@tZCpEA~ks* z`aEsns@Q{J*zFg*K^JRap&zK;f3$hbdO2%1mK?_1+!aW9)Xzqpi!54-0kdMes}t7x z$80>Vmp^84BB!2t%n|o3={JXEHOv-C?a8?5wpopcJ)#pe)MRq(CS1~~K-*_ET|JMTGUuguI2JL5+?}ccxhG}rxfEUpj zsWVudD?0TdU)vwoF!fcabxkm9#amoyyz*)zBz5rkxj9zRFzv{6x+0G`>>w|f|I}sJ zsrXs-vLlWJL+}i_r_YFL(?1ONPcGvApOzeZ4GPG>XrqL3Eqc#VilG8$w2BEqgI2vN z7US{WXd|w7hByQ4#?K2Q+>9t9#b>N>tGzt?Q$mhnm8+>ZR^0JaKz%h;Yn!#xVm_Qv zb!@Uqx1zN$iOVQ)JOFI^v0{001sP5T1153||BiH%F=tcnX654nmU>`}oEx)1GS5W( zLmGE*zBF5prnqT}efFGer7l7E@^ybHz>8JT4a8Sj2Ohk`$dU9F);9tPXhjt%KQQu_ zs4(-;OR<3*GF+W_@G-^7}^q5y7SXDCVFd7O-xgJ1w>TQ2Scs=Pe@9#qr^e-srIC8MGpxFP~3wMx2pmX?#OX_Ju&s zIL2^Eul96+FG)0Y#Mj-;r7D*tQfZuTPHJxx(qh!6R|N*^siYM7MG?uZ9ZwH)eRT+3 zg&)-$9Ca%%Cor4W20&$^j{>~DnG+`aKz(HOkkHtI$sZqzMpo#M>N!p4n4gRbVnDqX zq~8h!`Lh3Q@)7QB!s)1v!K&G;Pdu)n@WL=elQiulSvpqA6{sH|&EA^woYlW`->53Y zDrp#kB$#9aP;vlg*E~1Rz&eibPAR?y%FJlDJww< zRjOab08q%{`g;qn81dggNUy^{`|KYU3{=B35t9=sEv<_V4nE({II*};hdz%*OeFbu*eEw5yvWd)Fc3x)x<@~&2 zlh@>(`(L#_Nes*wk`WjZ^b%zfPC3(Irn(juwb37=a-vqgj_`cNV%ko1=rr5$>beg4 zlO9LH_sU@H@uZ+yNLJ`cqx-}{j2?^G@Y^TMxos8*fd`bPOYXA(Bxoywj1L}B z=3yCB=<>sO0-)T^Y3P+iAy;D2s;U%Y<)$%yQf)4J6Z*|esh0z{;4s7JWr9V9G`UM< zEXbI=5OTnUT)p~y%F3nvz`wvle8^XKx6?HtGid^z8}TK1b#}4VwKOH%dA(ciia%%g z4cHQmRkJ}r7@gd&F0QHkB|S)y3i+t}gW-{>bCi*V7I^RYFqjtPIxyi9A zE`@BR0mW|`Xfe>Ql2gr*ug#N5G{4*`!3NeUD$p9%ngN9~2k8FOF`~>B!9^&K#wm^P z%%%M0VE@-u?56$srb&1+#`oG+>C)u#&bM1XvrvTfzg%@hoK2ZJ3YwXtzXDLm_@2Zs$x|@|Dd}=@373b@~GRc&JOQ(dtqH{G7HRujza9kOzIH&?w5v33aqkPq28@2ADoZO}d zBJ%-%dai(p)B6;0cAa_@ocN?S4B&RE{Vka^K<@Jhp4KohHs4s<#Y?)_<^AKtD3c5U z;k0kgUI25Obs?nLB#SbqDOjJJ=@#5+SW(9C}xQV zosTS4{*+99uIuF}6FKA1{tM%OekGrCtLFY}iwl$txrRb^g^8In{Novt&#fuqv)U;p5yM&%WX9B1{Qw5XsDtAp242Vr4;BinZJ@P<;oEZbTFy#DN!`+IE%xzu2uothc7FJ> zVrovMb5u+&|I|oNIW8Jwj{r|x5eFY~q*S?=5S;wPmmD4%kfM8fX7IG}P05=7ZSP13OpPy5pV#p;w)Vk_h zJ1ZVt2s_Z*igDKy=a0VZJkYyx9R-SMA!#n81^nZuXL!b~o(=(C6WO=O(^b2KieY+a z9dvp80PIm!A_s@MRwRlt(^^0T*jOE4l@(oDioYtiKjYvcPodt=rt(m|X4hxwBz@>{ zq&7T-5f(9AEgMpFLwT?V#gZ@^$qc~N3NgFjI~eIp>5b73#du@GT~rOfROO2dJggGm z;n$jIM>Fhy=%yT<2EvVo-V(p|n_hqA!9orDquNR2m(x%$>=p9jFrKVn8`B3hYvg46 zdwNqyBSPhZjZT-K&TW`mzx4){yVbp_HLDRXeABRy=1&LtCEsKn`iMJGd2hl|yLKqW zNW=m1OTzk9c7(3DvA9Wa7=V<@@XG->aB#IfZ_|7ppvcPpVqbd{Hb;J8hox# zMOZib0@2=2W&FYUo`d>WqkrFcKQ+HTaDF1QsQQu(=!p5>&}o|a<8J~MADogJ5H%E} zqUSQT`Obwdf=Vq3e9M@1Bg!FfERZqDmHp4@5nnz z`ZY^Dr1`N$V;5*jb8-V@@j96Kw1wc@Fg7Lm9xd_G);S)qmLQR3*nIAu7$-Crvy|Dt zD6)Wt^LbL+d;?hh>^Qw&+i;o#^I1*5XcKMUoi47>tPt6EKv_ol_I8tYZNE^BKG>%u zsrt3T2o^ha|3koU{{TGK{{h|qH3=YUE(t$9NTI!-lIWb^E#lO1@LY@P>tr>YrRWm~ z2_}>T|GX$9bFppjo(>{=F;|D%p2-b;09UPFQ!3ZJpSj zILPdydF#8>-FUjGQu7@23GMD8=1(rG(U@b0eimXbrQvdn1jEA)p^FoekMgy*w-ZAOYZ_O%u0!fKmMrjX@)+|JIx$p|M=D(EWQ?Ct{??YLua1# z>ic15Ygdx$`wdg5l;{?{$^3oiNpSS1)|ucwHrpnn*RDP$?en6$oc$dQ@Mv@h$#%Gb zd||9S5S7cHa|Wc)p59$9VOn3ZPfQZ(7JH4~Kadp^X(T--+8f0_oTJ*C>z3GO5@|4x z=opXijSE;FpRQUg2X=pMJdJNxoIf?{dk-_MvDbo7k&r5DQxo}%(wvnt2XNO)th5Q~ z7~&`5QLXeE6YfEC;&Bf*vw9<@&GWHD6=oN`Q_Zw|TSCbQylG%9i^ypP$^MSoNx<6R zKOZw|;lF(J<>~Csj+ag{0B}gH#EemnR=3=+EJvst+Io~h8H&HJ7gi@TG(|d6c=u;& zk7h}a4|X;yGdW;ftzk&70|7Z=*Y3#?m2S5WqTr{M)7A4YLpg_sb1PWjZ@tc669MpT z4;P;}h|B`bT(an2nyFU5S!8AIk3h4$pixt7$2er}hj)w1j$*m-6uOHdVOBQZcQhZb zC!hDux(QraGju{4LcZId2RUTbyzRaq45UgP>I84Tuoaa>opCA{PkNHa8ivvO9)KDE zw3F|7T_5#d0%ioqPVxsxoG;k})88PU02X;RS)u*E%q3jpO4?N4?iBc@5oUEP`k*H= z2WFyw^l6+{9dF*tJj@Z6JjGUuHRgD%HH~&hNjSYNDdmZ;hYvA6pQJL zef>Q;1k7!x?e%itVfLD!4@h>U?@-vZh|9&UtA@rf+$#^=V}VKStfSETp_I|(NI!W5;|^mnPA&@bagETVYdCX5a%hz;TEJ(d1ndJy zscS{-2p;!I8ZO4;1>^875LVBVkttbe4$6gS(WDutQs!72a#it4)JZ-1FGe80>n+!L<0wD_|NS5p!~Nz%PX1VO9$&Ti`g#@$^t8jtbJ zN;ZhbEW|9oXbp6AVz$q73gFa^mKwzO{ZnTTwSQD=lI=9(rF|`k4m>lJwh1j`hBxlZ z%}dQ&%`44o%44u zc`3NBAa(gH+*=u{j`C&&=aH?EDYxibS!7GQ;E4%t`EZKT_#2L4r<&8q*qcDJXgTSk zlQy;6wSihWJUc`aDggiz7SU2(SvPK2^=%tX0(01{!&<-Vz{L2=dlv1k18nw*lvlDD z+;+bPWWC?Z?tO_u>hqJHPucHz`Gzxg}xKoQlxD z*J{GHwa=&xEy#`vVwcqXPR>aJb8EksOHSYe)vpx$hEZ>*H)bf4Vn8Kjw#kX6$>ib~ zE}uKYYIf#2Ga=+N(5QqCSka!~GcWKXo55$sI0i8Hw2&rmuG*__Wn8Flu7)jyR=QlO z@t6ff4(dnI8dd#Hh#h6+J-}$0z?zoe1@p87cj(_`oo?hFp9n5N90qaHIx?YqfRgn~ zZMD21x3HtvK;X958BhV{1qiVUYGTLQ6xulh3m0ws3cnkZ!J;acUK~Xel8z&U9J033 z=@Ikw?u)V&nK2!QSJl*+S7nUd>ybfZ6HSdGZSJNFUs94f(Z(%ip*h0RfO4MnJSX1N zz>GpBeTB&yM!hMW4!M94*LzNw8L<#!KcMw>n4xV`ld2!Z#Od-{UPvDGCd#o`x>Bn{ ziiQ&?%eKWU#bH3@%t@Qn`1)9HiowV^qNoi;4{d9EdT+sy}{>M6sY3|_TEOc z9_5Q9qC6d<6AoWfT~gjjat`;1P0|TL)qP}jLkrt!YSAgYooDSJlw z6E2GHj^mbRFTwE_Aff{WT*G}xTlJGjFa^myG+zc5BpoGAF7Zgi6K|N5Kp2xvFqV!K zT@<4y--lcJI{9H|C&XsFg{8g*Zc`wqhmS|-4J7q5TeK7~U>4bEUt+if((9~HE#p7t z?ck2H-H`^aFhJ0>S1(lz2*?oF(9p7QLoE0Wv&DKlzqe~>e-c}X^Q1dR0unmQ$- z=X69mBkPF`1mH?C8&=SGzeL0pe>;6jt}#8k`kej>JwN6NMO&9GUTQvWt(kR27-uGA zmB4m|w{*)eW1{_77xV3`SU3m_7sDPKbRIXklH}$i?Nr8K6{3h+%lbl{#b^0Tap}fUBHIo2PKfGOwIEVa6Nl%R@{B0)Qod7;B&!J$64$ z%>J$D!wG{b+(f9@krr)MWQS%mz+Pq752~{3jjw&WKTdU1Z4`Wzp(FOuyo|8A_>KI_ z?ef7`;6V)qIL-35PWzUVbs$;`IF4pP-5Ud?OtKi%(c{-WA>LFvCvVWk3 z^G5Tw^(OW8#X6-(O829h~_G@{DTWL&E?Xf26i;fWs)-3;2Ev<^4-S;@{>3Qe^Oa0==%+DB2Q6R$|ZUu z%p=TFa2E~N`dihKq@$IaxGFL+OT|ah+Q^|5T@|CZ0=pt78=q4eP0{gZ#FPi4v-5I! zLB=}q44MGF0uz_#6$wzQ1^k7*2bu?-{`l{WzYXxo4y=BFHtk~WA8~GQ%fDjrc;XAn zLMrL_3I3-owl(%2&%t}~GzAW4c-qf(m_1f4nJbi2z5$NTD2D>_gqR8??2I2&sC2?S z&-6ma+xTlba**3(_+xd~qji+^OyH5qT8&qO+H3U$d^VsT{Do}~@Ub@WtK?H#7AP>A z(aO<(AM6}zNe#_#^b#cb)UrZ}qPBU>2=-6J-^uL3J}QRmVc8QmEae5|u_)IAm$_pB zI*UJC3wAr zv!}b(`HGRnQ!^J&*G!7}qxU)x=8$$MvtMLlbqPysSyyCnW`Z%snp|l&3>antKYkxh zew40N+qy2M6ps3xsz^|s^mvk=a`iB|Nzk+X?l;Bf0`}O+ECZD`4@>E@sLR&rM5$P* z&-wBQC+!wYa&=(AUX@-BWOc@;T!bJ73Njvi#7u^gnJPM;O#ns45`^U1^mMYZ{hs>{Lh{c#b|2JC8X2dpIU+~lgt+)6Aq)s# z!nw?B$SrtSDy&Y~E|DV68*!qfh!c4_mTWMB0Vp2g%L?O;wg#EYXd5S`&ks2q<>fj@ z=@d0bITnTCBQ7tV#uEvd2sUwt9;v7~Q-OXJ8Nq-g0DglW=RYq@Uv8y7l4@5YEK`Z% zkF*weIJVY=tiTt+&yIvR-_w|nWA}7AD}plUQ2pJM@GBK|H!YSR?LP*HSD%aIjThmk z^&{p98S0nIke~EUU0qNAn}`4RLeNx8={-z(B{pM6!4JvYf#GOeS0-AS(V=@7{WZ=@ z^5=Hc?L)KqZ>RGv){F;|UzJf^l3mD(>j~fz1&69mBz)B1kxE-*sYge*kiLta7w&}! z`Ce;l6i4&WeZUQ~#-pLGq>dy_AT6m;0(f#Y*YUP#b`Y=opF7#%XE=fsdHE;2n!*mm zfS<9K?ezK=kzfO_zsp0tV@VBxc}jUUAv$ZOtEP@wAa4`h5eBh3v)Io(Qc7InP|KwY*#k$1in2Z< zc)~_W@h!RoroNhbjHw2UyAOjb5M=%Cg#hX__i2Pl8m{m^Z2QfSYJnI62@))eD<~VF zaxa^YAoajUH>RwPPa5M)+xliG$igc|v$+6m1ezUKUoDD)wQ$kL1+W32F^I>QgxAe& zNR80zM~AA3bmFkL7=PEi%EKhNqb4v@6cgKlnEP!!^ z{GCS^tc9lB`rPqIWV*KV*{jj-xAze{t+1BuirFpt5bg`R@2{1cu~D^XQDlLRgJ*RY z$q;w7oIU2($t0lYa-~=z-8|7U>$ENwWDW?JDyR$~Hx0>oJlC4_1ry?qM}*rF?6&p5 z(T>4M!B^SvTeC!)6S-mbXsJX#BdT@IQ7M^1CM%-#H*bkzR>)&POP;&AoNj0lXT(%W zBLd1VWC`TyArk;kQTpq649?VA$Y4GKtNj;yg$8+hGV?^|1)oq&23xXBx?stEYme+~L>Z7{rb*=+IL z|7J0Ul#e3+I$8eWt%8vS*;Kq(qITItsgvql*kKxQQ~~|$;3rZ6XZoKsaTj%M|3{yh z-#0Y~@3Bw2Cn=Lf*S7@1Z?<|fhH3nYrs+Djo85EF?qcBmm4xf;sHZVN?}YB?lb!{M zgX9Bt#}G4EO4%6~iH`rL_EOM|Rr>dv77~I08oK9A#RW8Qgp#~2=kmi(mSCj;KFj;< z?82a@O+{{$(b;m@Q+1(i3ptVGWe}T>G@GvFi%B*sFp9gFBBE<~q?)gjgA=GUEjN9Y z7xLHtbtAzOQpV;&xPhR?n#X2nDuEd1Gn1t4Eb%f$pnyjJ7)Ji!ZDMIH&$hq z$s~Kv?6uQqCL#7m>al&aj-qrPp*+_tYH9Ir_&qpO5FrNnbSIEB{HyyoC0f$ z^J6~5wCvfA2FrDC)6kPDd@?_tS}^$DAjU`Od5)B;=iG99?Yu4Ux1rbf#R!Xw&+;W? z@47beqBI6FEKT?{7w|i-svy4U6G98EQdt0jm*GV%uH&5Bu~i=ApDc8&x~Nln_Cewxbh8L}Pg-`gIuH>!s0ihj=b? zFF^R!vv$emOAg-y-Xbe_#W!8ov~f54iBMT*ezJjIpEO%gA11sCURb>m4?)%uvHbKH z0_FZS#_L=rA-KbDZ->`hAytr~x4vjcvuL$$`b{J6K<3vbk0krDx$>`bR1=~Nm^hzlin4v~i+7F#7M?9`@W1R zQQxT*5{M4?RXiKwcvcJ`U+(`nZw!pNxBcv_gFkk;`KF?P+K7E?sJ{8pAp+Ruo{tF_ z(Zfn|%^l*I<%?CBj;_$Y-TINd#s;fYZrwrxjv|Bo2sG#w8(|Z2f+wgI941MgaZ0Cu z4ZMXr5%>kz^ne=iQp>Dq=%Rr^4NwB$n_3)>SF%YAML|LU0ivLsfjv0czm%DdWUcEy zbPGU(%&}?BxraBjE*JS&kS)p zKC}Ks?;F&r1T93CWVRK^KYNbQUKqDfXIkj^j4)IsJ;xbmTwbcAJU7_=X|y-ngxPDc zLZr^_YMpWt5)#830j5p!4l~@2q^|H%#HkB9Ul~5Dr5vG#UqA16GV@Z2Y(wmgg*5we z6R|hC1V{i=wz81x$r6ILvR~XYwUP`lU_9bIh$2@e_?_@3ZvcjRs`-^C4K+(7qC~lI zuKs<_fM0`;YQuXv4XYC|3vu|7wAqHQi?cy%k|7tVlTSL8g7d3ZAJ5H%jUJcq-@ku0 z$&T>R(}mOeMEh}HLs;Ew9;bvg8{CfLrHjCaC}?AZaSXd_cH*%F4kM_NwAhg~*ARq> zl4d6|Tejw)8d>b+P!``}P{EK%k#f~SAzdCxEz#pdR*Nh9$~K8JE;?T7UTYI=o&JMP zW+0J1Wk1)lUk5M3Sf|CI(mL7u*WA_klUY%XAIty%)8HVD)0yS>9wIJ?~o*M`v zTo@$}{b1M0?noWyx{j6qIKq)x8S1AMLUJ&aEd z23s?P$|p|;=bwCxv2V%fyHH#Dy@3P#lc8jG$x3&9?~%)nn)A1nM(_J`PdmpqesI(t zinr?r)ylVbG#JPgr#B}qqp zmrd$E@A!Gs%LJ4yR;wZaIF~9E)z6&6mJ@HVhQEG zkAPM}!e)qGkPN*2gN8vkG9a952a7~hSsqvBh)+qC5qx;Wgtaj2;LR;V(5AdPWlLyD z$+(unD~!D@zo4jndFVSy2XTFrZ8{V@s{1~e&^lHAl%3hSh?Vz?ecY$lm1KAD3vf=^zZ* z^pFYh{*i|gorX7RWqbJ^4Xu-Mbx`nm_L^<(Ok#<6+9{3Qgo7vlRgLspw%8Iz{PIud z=Vg3r{HwA0?Vr0YqTji{)z9lp;4@J$ilHri%_Xn%>8Bo;_$PP{IRIK7?ar|xTrS! zV=9FKOYJ77q!m6W4%v!NS!peLHQdE|rIUoa^HqC~_*!q#dAb)HFc$?fIhD4&5dj9S zl^7W7DJ@Tw-0~UP6dG9XJB^Q3^XpD%d!eT3$tH@q6zetSLn0P$;?Ly>uwn612J9mv zTPEdrvy;5uZc?)~ikxOeOh}GByOQhMPxZ)k-1u3`9P{zo*W!SC~XouLQ zM#k(1#vI1?Q8d`oDhwibGgfU0t=Q(Bt=T^AXY?~=OwuTkhmz~C1AdlmkE{?D#Yx3# z(~ryVoD1wH>I?8D@vDoYIa;{kOH$oIV=wKB(`AZ=)2LlBcA4It?(56BDr*5q22Oe$ z&>S`_Ih2{WBUd%i9LF--PhKx9Q8P0VLBh8;O;fTPBy>_QunnUs2<17iph$M>RcRCF^?+XjJZTmk7y&!t+b--`mH{a$9A2FOd(<$#d z?4^fh0{qwKYHhO(211|VN#5!dR;n5NvTdMF#Q#|a^#;tqaIFqhI zM`gOfgbN;+C5f`VGV*czBDD(Bue5%S^)oe=DEpGcqG@$4Ib~|;oq_L0&7o_)7p@5?cW-CN(;Lrpcs6zAaP0f z%yiP1)b1kiVEv)8V{dziQ82U@A27c}^)54E&AN~^)e^1Lz2Ax_c_Kb2PtQLcLHU;1pcm^qv!Wmd9+R12;RS1ma_DfVZZ{@Jr6?l$QN&aYQ|v zMeF7${(ZB9Us`C+*c(Q89m)TUL7VTzbhl*(sUDi{;rF}Q?^dWBNCHd+8)BRZsF2SddYZkDU zjTtIH%LPQ!tH}Q;O?&H5?2GT>cGuX#h8Q`d({Uq$E=Z0}TjsErBQeOlZ({|i^h5~O z+RHbS`%FN`;TF63`-s{H`t2n{+C|N35`G8NZ=%1Yyq@wmBtD;Orm&a_48TS8eBf38 ze$MlBV0O7|%ryk*)LjQ%zEpyxq>Y#_=~m7&?baje0;qxC`6-mg%mk(cj<9+ ztosFq^VHb%gh&cU_tKnAvrVot{x8DBe`8Yq--5LtLp~R@5(}?JGJ-b$yA2f5>U<4U zI?Cx|r%29j2RoTC`OXJD5hjp?kWp6!;R$N_YC7&e)XD%(nIoQBy0@{qLVlS+KNi*2 z{Xb=+^asY)*$-2y8<_RzVa+?`=CtBISj&9e&NzB+yi43;ChSq1!7pJ9uzZ^H3jJ{g zN)Rkg3XbL_`)b7%N+!ZCbW6D2Hhq2xKhP#~d3`F>*nOZYSoBU8t{gZDg{% z=etpTk9QHik48`DPowXeDSiWcKDq(0Rtd;+SZ-gA6jWSEP5je@r)qLSs@l9ep8bEy z82&`o39(aOEC3s|b-jA71aL;5dH_#kXUB~E3n{?*Nv-$E-WxaEl3!V8B|76We`s|% zbj7xfWFI#BN0`xoVYwZ&(90p1l(gmpKN5d+Tb}m%iXe^Ufw+$iswuNvZ-Twbj?h53 zHE${MEI<`ovrS&RxwV^RQ+RcL@$GtL%SO-6CprdpEJd_-12~{9`IFOv@{gd^a$Ho- zKLoDbc<+4b%oAahk!aG|;n)dLl+^pJx)YIdMOJ*om9MUhbwww$Rg}-;!Y8wBmg96N zmq0Zpk-Ec{dTP&Vh$4tGQleFX!|C25!KJA|HR6*N7%2xHzUzT z53JTQmKtzR>M6sYJ>ED)-pKbB)Ld>q<;m7iwqZ*)cuNH}2-1`vCmHemOmxL+^4h|6 zumy&1{lhwngwd4H-)fWqUHeRqjN>pzH}q?O&&7J55@{Fr#Eaf$@1s(1Rr>I~>Hz(x zb6E%3O1X9v3i5#l;7|hC!dZ-IR32J@0oa50>q|2$4w%s08#0&Gw%+N+O?{Dh~ zuL@))iuc_yMtAUMFD(?0@3GrXjA3SWi1pnuE|`U*7jcOt$7Ewj_auke_8hLSP}A+l zu3|nuwuGQR_lk!mI!E}t7(MO8(aVKQTJ3`86|xv!Ra*+PaY|@Ya__Y?(f=qRIbGk> z&n#%@J3@%7zbFB-qQF=g6W~SnlV{&pa7p*>QjA?l2CP|GgdBLZ>7xJ^%m4CGAwh=e z7eKWYCD8Nbgu@IdGVq$>@j8IvCU;n}wH_CYTrIWTnY+#{uobxlf0FTltL)f{cgU=9 z(AS!lT0`B7%0Xja6r+)wf&Kitos?9mAbZDm8uSV&$2@Z!xXhU$z4yE{ zEJ{)5k#XK8OQj!uaFFB-d+~lV{iOB+@SX5xrAwR7>*U4r)=1k8c9rAVNExuX}^M>0uMzQl*P>59N{;{gyMfjsoC)`&Y5bNMFG&>_> zwbR8H0m#ne`2(|04D-vu-ThPY^`8e$J&^e6c2_rRo5sTww_jT4L=2Q4l*YS1Z0IEy z`PpO$8nj(M45DH~_!VuFJy7Nt{?N2U90VqGNQ&ms@G77cMmX#?D?MVCPh&>$`_EWK z9hb9KVPQ)Pa&v_y`@mWj5Bh1HlolR@BljAJvb_!e!m1M&4n=zx5gfkT%t0hhdH8`= zHrXMqH2>3L=qzS4UUdYYerm;=)J^GuDY;&@08Y3CONRlM{jr}{2p|p~f<*gIyKc=K zDwUENNnt%?dfei3QIUt%?u|O>-fUC)DdC*~s@HqyX@&XBiqTi@8R1#0#@Erm_^F}1 zdBe;j5Tt&O+$>UerSp5+VxX6^t|onqR~kS6x_!mco*9`ztaPmhGLRSNMp>v|ARQgJ z;_OaKYJ}fgrg}G3YGXo(f9N9|Y#AKMCnL_-H{Vc<&pG<-?MNx})|H)al--}YV z*T6y^>c$g11+{aSjkeKxq-04+TWE3i(GicdY(fn@}AX?K!xly zH{p}qE&Q6A1b55@r*mi!Sg5j`8vS}rFgXo4Xy%AP;&rib zf4|bzM=gx&4I%KlztP{?RexoF5=UlTvBWXewB!**sJ7n35O_ZK{j$!V{kr9P{}kgB zq3Cn5;P$KnIA-yvGvjyqvPH%h1!w)<;%9uwlGS>PFohN@5*!;$@`7JtVc+8gO)Syfy&kQPFvM85_T9u)f?|#;!aStnTk_)sBv}nmwRj&W+S)#dFa2=!D zMrc#xe=sU5QB}fEO}y5Eh%bR%5E|FDAYxbwZ8LIQMg!LeL^9nxzUhHU>gOQcOAVN& zx?MaT_0+JsYcXv5A&*CI5SUJweUWB@YmJm&^R zoIhBSh!G7A#Bi92h$RiguRJu9I^;x9B#rUe3ltov$Dj`@UEGM>yZ6;?*5#dI6SUZgo04rAE6TxlZ6g=8`KqdH zNy#HAyJQx}2`_8d_c_HFRi0(%MNL|lmb{g{$61ywQYb=4Wr-P6@_k%K+oA!DQgE|E zcrym4@vM`Lig;{%z2oojoe_Q==5m=}zV5U7r~P*fo3epOzHhw>#6L3|AJ~A5jg+XA z(w8k(#ckMfxXyBmOrjo=KsDd@_=iTp{pof-KVp1t`P*Oi7Tg{l^vNjlVA%%*Y~JyR z(p2wc-ko2Idz5&GF#BBad)-EC0MF(=;NVcte!*^EtK04KeS~E(a08jgaKIGcp^5lD zS%EoFW(S~9`cQ!NPdue4{x7zerx;72%`gXs7W`CvlqP4;BPhsZ683o%soXvIg44B< z?=A{zB`nB1>LrI-e8|LFRAz*I`sg?$A$jEFZ%Z~x3l-$;#t0>Et#mv`{jB(`W4{Ma1{vlCW(8UW zo8>~yMY1UOsaO(L%EMY~1xK0lv*x7T4$VvrS5u?>;=p>@pJ|l$oHMYAE}T_NN7m2w z(rJ4rcDp{=*dEBN`hMg_$I!39ZCgLs-C%eA6w%d<>-$9TbRFXxk^PzuoPBoFOV0?h zrd5f@=x8zG?A}rKc8W$F9Qoda1yQoEZ`W*}h@T<#R4+m(`Od;9APyz=voNTSH{PH- z*)CbFc5wUtBVMFu@kOdlG6cQO47W1LWXn+Y=M3g_T_c%*9rG8F6FA4|vgmK^8}c8& z2B5aeoVOOsxo3J$$%E_JP$pCrm^sB7k!Uf?r9IUPOFh^1n6|?TP7`N4PD&EE4p-DD zjWA2+LQ5~Cu3NQX8o9kWk=nf%^~v+73B<*ZFMfvZ#uU651;@hi!L>*l8%QZ{p~qw-ynm@Zx09Axk68z94rQ5Z>jMM4p4ng zb@N3dUe395B5PPWgSlX#uH&}y9tZ%v2BzTL`>@DyB!zGR$(KAO^gfZpnt@D#cu88a z0FI;M<$6T4BUM+zNdLY*t9GNWe}Cri-B z9WuhkN^``lkZjLEb3BHQp0-<<6Om;GYn`V6Hx)NUWjU!JC2I<;g~bE~N9!&LHN=r&6s+_v8lwwl$8u5rRyJzZQKJrw}|emq$4| za8ws=rs=uL!*KN~2`P@e;m2=0+_w>UwJ$O4&b`v3c- zGni{zu^*ctkj1I|QFxuoWy2Ic%DmYF@eQBse3nH4y)n zgxa%LnGx`v>b9C|Vt~yoL{P0XJHYPR^0=ahVEUMN7~8xo$pL%)dZFZ8(W7SCC@Ra! zG%XFtEK)?q`6g!IWbJNzL z60$6H;K#*x=#em3-*J4wBDV>&S`GADw-P9#CUCvK#+mLrt$&&itT2vwH>3ZynBFcYr^d6<3^SjU5wkLFk4z0z__f_~EoG!FD+Qf4 zvpt=HL+==sXyj3VUP<{`moV*%(R2n=+(Q$^r(8xV3{B7XTld$R4zCQ=`e#sJ>W){0 zd6#gv?3Paa&9wLMbCaTW1_s@nL`h|RVuMiWn=M7+;+zbeVrYn-@7@@@l|5?&N(Nd6 zn#jk>dCIOCP9xR;g=o~2{W?D8%t`AOSs9gp0(d&uRmQgpO2HgzU{gP zZEVK8T-#mja{D5p-ocL5WifACIDFYYKglMs(kPM?;nE|1 z&204{EU<|3EU>^c%z701f+>o>$kpnC1d;d)u^lB+{x6SlgFxhGa^KZe@ z+&6dl$ltqu?h35h4GrcOeG^FF>unsowp5v_KtN4HF4PJ9#7OBjt5uqtabWSOWhVZs zvNm2;ee$OJCpbMPwbCcrO)cu4)L^qfYp`n!6lZTkP3@&eypcGaglbSYn^V1(d-b&T zXFX2qQ=jj{Cl%kER!G^a=a<-9Bf*EJZ{n}_F-3BzY$hYK zzOK(8;b+jIFXijyf1cSFq>3p9-EjG~?NKIH51*ToP#r5IaX_>10+1;Jd2xOTWf3b-&V^;(XB^(SjF!(BAS8so4 zn(83HKOJpm1L^WTPc)zG0Pa^(v)f{raON zZpo3xW;7j(^CS3LW3Ea_@a;Gd*<+!SR$s}|5PK4E@iw){mL20M6{YAqMjYa%N!{W5 zq_5&6hDTtcov-=GbB>3kMP*IN(h$bkw&oVcuO;$%UOsSGeU6e8tjfkHbeQO1;>G7= z+fNJp4hQusDd9u?fJ9FB1Y)t-cpv)1>FqaUmpDwNvqY~LLXcIIL6jb#FVZfQ@8dis zaj!buH-5v_=NUD#nTxaGSrzqzlMzMT7a@o$fYV02e$IYK!ZQ=0L}PHZ+HuXaV#E>e z*21LFFRdXC+XZajR|paAshfNi6ndSjx&R43p!mELclA}hT!WQVdBR&jIHj%z{)IY< zzb_)E6rS}CU5A^4p@uc)9jDm-q-4b;1)DrJ;e&(ThHQgJ+)|$ha>-**5Tq|U@s11e z*tnwZH$4l1*`fV0G2LoZ+d14#Dnl(5)-x`gO&49Lv!RQQBC^DdxNYqQ_h#+n82-GK zFP$oi(y1RSYtWhq!VC0C&p2>mh-5ovFYax{Ya-R?=z7HA=I9c#oK?5*Iu=_?FX}sV zMiSj}1!muwf34^f#nu54Tk#2~E=$;XSgE~exmT*urxbBA_vv2n`ElmO|F3jv(1-sJ*<%PekBJ4hhz+!1(<9P*LAPi1GbQLV%>t#r zMFHj7ShiCGJq9mm)a)nCC#U-}L9q$FJLM>S1Fy^;#;hlSWgvIltdQjt~hm+@jH1X zQt;|LCTrBgf$?9yK?HpB8TT4T;&bM}t-NL9SKIfOS|IhbKvtA5v-Z!q&ejer2iDUb z82EPTaSCcCgfWx}gSOL1Esdc~aJiwpOG8)K-10Tu4^*~LP5-fE7jPmz+V-M@CzOCQ!^we58WR=-9AqBdAhx z1b|y2wP#}KSK0OuCRDx~$M|;d8)on8UoYeZ&szF}N@soKqW5-@3yEc0_ z*AlT;!uE^}oc%b!E;IIO*6%F`awc`t22{?Am7R1Zb?lI&E9omKobN4BA% zR!j*8?h)9vszf^9MFk}`t~HAtnHhd1FZSn2Ke&a}voQ=)^{3fcH1Tr8?gsXkFPc15 zw%Pn`D9kE1f|n_i)&oiDceOr9#FlgcVSQ$HcIQLvOGwadWDpO$vRU7kKX)Bw^gS{@ zZ*f0c{_RVB@%)eCjz*XWw&A#-Hye6onqTQk!`rwXjI8wZ-3m+cIqn0S`Z3JhAD*JV zqm)W|gqc4RI|p9FjlRE&qpE+?0-Ff2d4d7b_ZY~xvklU>LAQ<0VdwxFjUfT}4`>09 zEN++IyuAlrzRN+MM-OH~5=1IanF~~MbH-oPrI#pa0}KF$^CUlL7D9SQ%Kkc9hi?an zYvU7y&F$f(4o4J;u%YbmT2v;w>B)tiUt5}yQFHk)ux*uc_~==dB7yWU&_0&58DRr- zU_VloWYSsgNYM!dOCcpvjIVJp(}&rk7F74(qbPoHIfCaRpX`(ODM=y^!t8XJl}gMY zwJB$Z?{@&ni%i3#-KCsTa*#OfY7IK1R> z%ie~S;dT`DWQji!9F{{JC74qgP3Fu@rBhpXUCBIPRMy0X63(b%epIkuyFt;Kf!7AN z=LydM=?AaAk(n2`^7y$Jgu}doH%a`kvuu5DGNhI=a=(^+J53twU_vY{vT)M>3GfQI z3{(G8J`O`Lq9#A;G~2*BN)l>x*hcy3-xhaum`R?WpR)s4uQ9M>5yuy{zi=(#!fo3N z&@V{n!U@juKu)YB$L|Fp`B_ulS+>MyK(Xg9k%zvECtnXi5 z7B*gi98fO*m(ZkJ+kQ#YT(xwLm2ff8r~1;CjxhQ zfYP26ian$wwqj;5kGb!=@J$ zaZ!iAd%pdFc}N-=LTG1E^bydr*j);rhf159ktUdYbE4>)DSbYOlt})DrGw==^OdTBm9)Z)1%7s5u<<)sEXT1>rY-paTH1q~em`xHwe>>7>e6d0-Q z+q?sM9}hZtRg8@-A`vU`B8tPBvYwcZTw^lo@Np5jZWrCL_AoC!j5#h3>#)GHO_qcd z38i}O3-$!8pYB9yBXO)^q&wQOmFQSnIu0T@pqW7P z9RQH12HrIO9StVpXQqraI1OaG3Vs;s_*Ib z#&Z{Z3Bx-2A0zJMUhFSa)V5oG{d`B+Fm0s|EB6Zfhlh8r6j*8Q(-9-TL?oJ?pPzo1 z*}_h>s1L>CoEC_F+XPTK{diDU z_z1z9$r1Fi*AVnAUn{%GMzSwPi}-B6R$CI2s=S<5Eir~gq}!!q8itkTBVwMEaFK>8 zV!+jrI-_IlBVwE#i=`JXBD@gTSdE@h9qZcCin0ibv7UUhYR{4evU6^In6ma>GOCyS zWi)Q8_E=n^>eJk_S4&|Ws&N9aFR#kFl}^Bze?}dHM~?&Hc=CU1j*3r6g8WYj zna#{tiwgWw+aqO&L-6qA{bnQDE7%px8V5xLvIXRKfvszvZO#PNbk9pY-+A5ct!!+` z&&JLKhHm9fcrUs=-yj52KZ{Xl1U+8R7tjgJ9+r}s0RhEiSnCAkWcfg``S-}=IMC)u z!XD^#G~~;2dnDispr@{V7NG~e_Oe?61-`YEZ~{Q{p$bnsAI2Xbmh9k|NU#_FIY{#p z+C3e8|Eny--cg*_GVc`)>dRX>7R0)Op3FEzKcG_Mo0=B`TL%@Vz~92Az6ZlL}a4+v~7ok5E7#{txm-z z*#llBesS7xSfSw`9kwGyKJ#jxdCe-8C0tRi5&X7~7?#6MaUMILNjXl)P72V#%8bml zrF$5o4^t2C+99?dz3ca{79hg>H0XKgOW5r;Vd3_{)sDfxBiBJov=0&au! zN6w*TLipaTSg&$_jGe*i}vyS=KgZD)!U2qSS}rGxEa;wu8qYAwi9ly^&FZ$HF=yfBX&S8r5*1| zhFA2-3F#!Gu82yi7p2aK(|Uxe(nmi%8>>k(YSdiH%1MO*<1RJ5>h;DU4;t3qKh)FM z`XPe9_QtMrKh~AF{l8=c1OGO3KEXP|Kl{Hlam6S2*ZrXHrPAGXKVNm9CDl(X&^?{a z;QAtC+Vz*yeu=OFypE$z2t~nR`&7H%w4=g?i+*>_~|d&>7Dhcz%v z6joD%H*?^f6&@XY^ceqlJT3W4h$VG|=$r3%leyu9BcY?OFJ-bf$Vwml=*5?)MQOi` zVxFh?nr}zk(+f{h-)&x+T`1Ec8Y%Fc4ClPJ2jXL9x|%j)0%5MAk}$eZZ2)C0{BDGU%`n{e zO*xS=3R_eCo_ap*Q=+1eG8O}%&)VR6!(!C;KJ9p`oAlV&vC;TLC zo?jvH6CH{^J*|gVxnt4JpuB?uqAzopF(^E80Mbqy5W(qQcn263OLQ5+K6oV-5kov7 zq3V)g=3te0^wxNI$y}CSqvfBT@-?^0pK_G<^6-y;S`-ICpR;n_WbOyemr8eH%s{03 zalk=)2}43LG{J@Helw)~A6IiAmC$~f=>j2ggX2dO>gPC~BWK+u<{qe7kXqq2WvBRt zO2P1f7#a(Y)H%j1lW!R~(#yV5ZZ;*FZ$XW}dk%X>z*m-cJ`ro@umxJ{qT)74$qb(u zS%lkDjUG(eS=lqsk(!Ymxs7HBwdC^-=M1m2rv^<_lFol=?Vo>-4p6 z{*In|o*H%YOECiKXiQ0{HeK;ZCgS+sTYY@$qD=#&o@oJ;8Y*M2Ix%gAooa@OMEQ^0paEHt%9uXP29SFcfs*NKG0ca-76yOTokYVtmE`<4_>>u!V{s`_A7;@4^$z#ZiepNr^7GWP5AvtrfQ zumx~;;RCTyY{Ed7me54UW{wv>T){u%An=YVD|^2Zu5bi+E)*QhNu9CnACb9U$!N$v z(gQhf>#9_pxzhny3S zB-B4?=<6t6(soDkZX{|HI(&Mmq3vuYI&@D@zxt!m?M=Qlnl3#>4K0USvV^ug|H4_O zOdT&H09?fjNNr5WHP0XokJx5uS+NUnD~%G5JMi?Flwg&+51;Vftc-_q4%_gKFYa}c zSeE=}9yU68Eh_xDH(5cvl5mGFq&w-*2TieNq4<}jST;v_kg4q2Oy7_}-=kpf{qV`` z$_2{FlwruQKD4jR%lGp;6uW!LS(nU{;J_e(TfXz?;wf@7OSDs1qqg*GNV9TQ(u)?F z0{?ht;8rGq1t6%wRr)h}ORpwqyRDYwtk<{hL-gy$ri-1dcHN}#iJcGaMtODJGlW_t zyICP1?6Xu8MB$KT3L;F^U`U?S{xC#v2_0I$81Ga4llZs21|pZiE^DD@nNfU}!4B)F zXJZVH19tJOu?8zOeObr2u}17ijdNaHA=U8MbYNzAp;6WLuurvrZ#P~BTWsp?@=w5> zi~uzCU(^IN>~d=+xQ_CLqFB4uw(DQ_&o9|;{-gB*a2VF$-lQcrJ;D<1se^ul5=iN{ z9adMAVq?odnKJmqwYj|1yH1aU&hWLt-WN@M!;$bieW}}PuiM`weIv0AeM6@|NDQTp z)*2gIM7Vt?)KP$`W||WSa0QE3jaRI_oHAyQqz?TUL8TQ2T;|t^gui=%EamdT`(^O0 zg1_bMl)RPjk$|pi0&5AovqLevs6AbzSx#J|80T^keI(yP0uu3mhYP^9EUFKCdphNI z4g3YP=r;5?&xE2Yt3VS&7pli%TV=GX?pIs2CWEn{uu9u3Bnq(4&_Z3IQ&=Xb6Vvz0 z)MMW3ZCRZR-u)iQ8l?n(2~_P%S>-#p)g82J&7V^YN_6C(>k{aV=2m+L9P0q^OM?M= z3OuCSCFdLo??;WAFer)nXFFF%HqW=|tx0q>AJScAD4hxG6(89AkkJ0^1dzNvg& zU!*dcVR0UrS)zng`!Ifk)EySQT%FH;%>+ei_B~1VKJIvp^B)aQ1Vv6wTw62_J9OKM zY7For0V#zXmwFL@)TTW(N#N%KPk3>91W2u%o@V~YpY@cG{JQlUlUkmbT)NX;)F`L+ zEzwpY<>;AXS%3BH1bdqMYP8X}maZSQ-bPLzUL|6lUi8JtnwTricFV}4^<09Ug`TGi zY8YjVS-ZoID^y?NY5Jwjf33H!(~U5u)$z z*esV@BOoz-o>~F8Xi|bN{zhy|N~TD2M@IIo$>ZKdLmL(`r`;v^n)Vg0c+-fgFMj@c zwG(a?ggPbg6JDYJBqjvj(ushcQ7Rv_c9~T8{|f_p0>Q0e+ix%7*gcNf4%p);_V2~d ztbEh%4cqiZt-b3WPvuR5PBkCD3QJfrh43I#;4zVUu-DnbR$CfQ4-Wf=atfYoyPvO5 zPM_QQMrzC-;r_uhrIG#dmgl_Z46>qW1#ZDXDtQST$)35Tj9oX1hYxzL5~TAi|h$*6Y9cUO2SR-+Fy-*ogC-_|X257n%M% z&UeS`bBvLKN3m*$CSx)|ZJ8Q80j@u8V#{ghhFm*)m~gmoMnz8Bj6LA-?W@+zSo=(v zJzU*}ww$fW1+ny?kvk5Znm|v}Jmnu|X^T2*hvYCEMrc>q?j+B?RCzyLnQvYcz(qpf z8Ku-q$p)+m?${Lg-y57tdTJmTllsvSK1_0A`VOvJ$m8Vz|64=Cw*l@Ke9VTw>lbIly&qF#TO zZ3gZU{mFpuimSIa&A--qFH%KTpH2t7g$xK3Br`V*Fdu!Z8UrtM<=}$|o>zC?T`lo@Z=Rc9xOIi7y{B9)?mI8Saeiy(@K@xhmNY!UfwV1BY6?d&1} z5HZZjByx(x2OrQ*q+mCyPeB&5Rf$I!_NrQj@BY@138znzCA z_P8az7AaZnf-}re(M`&NQgRFZZ+i-huo_EpZdbw|yj)&gK>-A^FY7899c2@%$vyAr z&Hm(WGhSqD5ctkxd?6Pq^1Y`pX~~QxCx*JbJsuJDVG2#Y84sW}0i|~v<=bX$qZX@c z7gLNx(-G_p z`z}Agdj-v$<0qFV;ql-=g2Oo1z}1A4Bs2MU z1y(Xc(d+qWS~=)*&ASy5$z1NcMO>4bjsTgNW{{p}yGefZ1kt6|#DHE%%eVvUZuO&^ zV#L8nq89!9d%rp&Hci3n3WF7*KbZMrK0kfh1=7uH*yqvq8TA(UdtB?z#yLLmk zFQ!*KPM6CGB`3lrvaRuNXD}FO-uz|~_of?vFuGlBZ`6=dUYUFRj%30b+sUy149@WU zUx91U)bl3X(bP)-KEt2_a_c}la_Y_H66;4pWDHYfpdk<19Av!KT=GQn(sO;VLi$PE z{}K8UCv5)0+zsbS7)ItlW(}`4CIrYKSosH-BH;!8hFU2(3l$;)2FM1Y5p|g&kOBF? zFnhRY;11Lzg@DnUBeT)$#r~#zOZ-W}G{g4+|ByYu7b7(A${kN4D zLg2gCr_x(tG$giRRMHVzi&p5Op4b*63GCrr9ki>aZra-zUJC79wCAbf#nZV8OVO~X zD-p0aH!kWD`l`;OWD?aL{kft|V&ACdKr5kdC+Y(&`*(A~VC<@i*BH7q9Kt4{jUl62 zpC(*w3aP(^VvIf}0%)wF_h>T+4iY5xb2wovC!*33mE(c|l1bZ~?x#ZCN7FQBe@J$Y zV=_}l@R=`gq_)sEri>GXZK=47C%?CQ1a>)U-ey*~#GKzOycfl*KJz*602CF9QOp}UPa%L{n|%?l zN~W*wK{?j-kWNsu#d1sQ1<+8+@%1|@T*#X8##R%&gA*?Bk38R+Fcb=4O*fwIXR84k z@}faMSC*$ErU-5WUfBX%fIN-}_0$tz{Tv^pC74LbogX&i%R3vSgghk~Dabbv^(I)z zk09c`Z@>F})pJwbpa)rRJm)MFF-t1>q`KZruXx<~ayA=LPWx0;seIw5T*!(9>2