From 6b9795849d497b41514aa9462690cf7c2802e4f6 Mon Sep 17 00:00:00 2001 From: hako-mikan <122196982+hako-mikan@users.noreply.github.com> Date: Thu, 9 Nov 2023 20:23:37 +0900 Subject: [PATCH 001/135] Fix model switch bug --- extensions-builtin/Lora/networks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py index 96f935b23..a21ea0fa3 100644 --- a/extensions-builtin/Lora/networks.py +++ b/extensions-builtin/Lora/networks.py @@ -418,7 +418,7 @@ def network_forward(module, input, original_forward): def network_reset_cached_weight(self: Union[torch.nn.Conv2d, torch.nn.Linear]): self.network_current_names = () self.network_weights_backup = None - + self.network_bias_backup = None def network_Linear_forward(self, input): if shared.opts.lora_functional: From 25e8273d2f6481deca221b29d35093f6d0c9da6a Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 21 Jan 2024 02:21:36 +0900 Subject: [PATCH 002/135] re-work multi --styles-file --styles-file change to append str --styles-file is [] then defaults to [styles.csv] --styles-file accepts paths or paths with wildcard "*" the first `--styles-file` entry is use as the default styles file path if filename a wildcard then the first matching file is used if no match is found, create a new "styles.csv" in the same dir as the first path when saving a new style it will be save in the default styles file when saving a existing style, it will be saved to file it belongs to order of the styles files in the styles dropdown can be controlled to a certain degree by the order of --styles-file --- modules/cmd_args.py | 2 +- modules/shared.py | 3 +- modules/styles.py | 79 +++++++++++++++++++------------------ modules/ui_prompt_styles.py | 9 +++-- 4 files changed, 49 insertions(+), 44 deletions(-) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index e58059a1f..f1251b6c8 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -88,7 +88,7 @@ parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anythin parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything") parser.add_argument("--gradio-allowed-path", action='append', help="add path to gradio's allowed_paths, make it possible to serve files from it", default=[data_path]) parser.add_argument("--opt-channelslast", action='store_true', help="change memory type for stable diffusion to channels last") -parser.add_argument("--styles-file", type=str, help="filename to use for styles", default=os.path.join(data_path, 'styles.csv')) +parser.add_argument("--styles-file", type=str, action='append', help="path or wildcard path of styles files, allow multiple entries.", default=[]) parser.add_argument("--autolaunch", action='store_true', help="open the webui URL in the system's default browser upon launch", default=False) parser.add_argument("--theme", type=str, help="launches the UI with light or dark theme", default=None) parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False) diff --git a/modules/shared.py b/modules/shared.py index 636619391..ccdca4e70 100644 --- a/modules/shared.py +++ b/modules/shared.py @@ -1,3 +1,4 @@ +import os import sys import gradio as gr @@ -11,7 +12,7 @@ parser = shared_cmd_options.parser batch_cond_uncond = True # old field, unused now in favor of shared.opts.batch_cond_uncond parallel_processing_allowed = True -styles_filename = cmd_opts.styles_file +styles_filename = cmd_opts.styles_file = cmd_opts.styles_file if len(cmd_opts.styles_file) > 0 else [os.path.join(data_path, 'styles.csv')] config_filename = cmd_opts.ui_settings_file hide_dirs = {"visible": not cmd_opts.hide_ui_dir_config} diff --git a/modules/styles.py b/modules/styles.py index 026c43001..9edcc7e44 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -1,16 +1,15 @@ +from pathlib import Path import csv -import fnmatch import os -import os.path import typing import shutil class PromptStyle(typing.NamedTuple): name: str - prompt: str - negative_prompt: str - path: str = None + prompt: str | None + negative_prompt: str | None + path: str | None = None def merge_prompts(style_prompt: str, prompt: str) -> str: @@ -79,14 +78,19 @@ def extract_original_prompts(style: PromptStyle, prompt, negative_prompt): class StyleDatabase: - def __init__(self, path: str): + def __init__(self, paths: list[str | Path]): self.no_style = PromptStyle("None", "", "", None) self.styles = {} - self.path = path + self.paths = paths + self.all_styles_files: list[Path] = [] - folder, file = os.path.split(self.path) - filename, _, ext = file.partition('*') - self.default_path = os.path.join(folder, filename + ext) + folder, file = os.path.split(self.paths[0]) + if '*' in file or '?' in file: + # if the first path is a wildcard pattern, find the first match else use "folder/styles.csv" as the default path + self.default_path = next(Path(folder).glob(file), Path(os.path.join(folder, 'styles.csv'))) + self.paths.insert(0, self.default_path) + else: + self.default_path = Path(self.paths[0]) self.prompt_fields = [field for field in PromptStyle._fields if field != "path"] @@ -99,33 +103,31 @@ class StyleDatabase: """ self.styles.clear() - path, filename = os.path.split(self.path) + # scans for all styles files + all_styles_files = [] + for pattern in self.paths: + folder, file = os.path.split(pattern) + if '*' in file or '?' in file: + found_files = Path(folder).glob(file) + [all_styles_files.append(file) for file in found_files] + else: + # if os.path.exists(pattern): + all_styles_files.append(Path(pattern)) - if "*" in filename: - fileglob = filename.split("*")[0] + "*.csv" - filelist = [] - for file in os.listdir(path): - if fnmatch.fnmatch(file, fileglob): - filelist.append(file) - # Add a visible divider to the style list - half_len = round(len(file) / 2) - divider = f"{'-' * (20 - half_len)} {file.upper()}" - divider = f"{divider} {'-' * (40 - len(divider))}" - self.styles[divider] = PromptStyle( - f"{divider}", None, None, "do_not_save" - ) - # Add styles from this CSV file - self.load_from_csv(os.path.join(path, file)) - if len(filelist) == 0: - print(f"No styles found in {path} matching {fileglob}") - return - elif not os.path.exists(self.path): - print(f"Style database not found: {self.path}") - return - else: - self.load_from_csv(self.path) + # Remove any duplicate entries + seen = set() + self.all_styles_files = [s for s in all_styles_files if not (s in seen or seen.add(s))] - def load_from_csv(self, path: str): + for styles_file in self.all_styles_files: + if len(all_styles_files) > 1: + # add divider when more than styles file + # '---------------- STYLES ----------------' + divider = f' {styles_file.stem.upper()} '.center(40, '-') + self.styles[divider] = PromptStyle(f"{divider}", None, None, "do_not_save") + if styles_file.is_file(): + self.load_from_csv(styles_file) + + def load_from_csv(self, path: str | Path): with open(path, "r", encoding="utf-8-sig", newline="") as file: reader = csv.DictReader(file, skipinitialspace=True) for row in reader: @@ -137,7 +139,7 @@ class StyleDatabase: negative_prompt = row.get("negative_prompt", "") # Add style to database self.styles[row["name"]] = PromptStyle( - row["name"], prompt, negative_prompt, path + row["name"], prompt, negative_prompt, str(path) ) def get_style_paths(self) -> set: @@ -145,11 +147,11 @@ class StyleDatabase: # Update any styles without a path to the default path for style in list(self.styles.values()): if not style.path: - self.styles[style.name] = style._replace(path=self.default_path) + self.styles[style.name] = style._replace(path=str(self.default_path)) # Create a list of all distinct paths, including the default path style_paths = set() - style_paths.add(self.default_path) + style_paths.add(str(self.default_path)) for _, style in self.styles.items(): if style.path: style_paths.add(style.path) @@ -177,7 +179,6 @@ class StyleDatabase: def save_styles(self, path: str = None) -> None: # The path argument is deprecated, but kept for backwards compatibility - _ = path style_paths = self.get_style_paths() diff --git a/modules/ui_prompt_styles.py b/modules/ui_prompt_styles.py index 0d74c23fa..d67e3f17e 100644 --- a/modules/ui_prompt_styles.py +++ b/modules/ui_prompt_styles.py @@ -22,9 +22,12 @@ def save_style(name, prompt, negative_prompt): if not name: return gr.update(visible=False) - style = styles.PromptStyle(name, prompt, negative_prompt) + existing_style = shared.prompt_styles.styles.get(name) + path = existing_style.path if existing_style is not None else None + + style = styles.PromptStyle(name, prompt, negative_prompt, path) shared.prompt_styles.styles[style.name] = style - shared.prompt_styles.save_styles(shared.styles_filename) + shared.prompt_styles.save_styles() return gr.update(visible=True) @@ -34,7 +37,7 @@ def delete_style(name): return shared.prompt_styles.styles.pop(name, None) - shared.prompt_styles.save_styles(shared.styles_filename) + shared.prompt_styles.save_styles() return '', '', '' From 2974b9cee94dc474ffbc9e9617d14c9aaf9e1e63 Mon Sep 17 00:00:00 2001 From: Stefan Benten Date: Sun, 21 Jan 2024 14:05:47 +0100 Subject: [PATCH 003/135] modules/api/api.py: add api endpoint to refresh embeddings list --- modules/api/api.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/modules/api/api.py b/modules/api/api.py index b3d74e513..b6bb9d06a 100644 --- a/modules/api/api.py +++ b/modules/api/api.py @@ -230,6 +230,7 @@ class Api: self.add_api_route("/sdapi/v1/realesrgan-models", self.get_realesrgan_models, methods=["GET"], response_model=list[models.RealesrganItem]) self.add_api_route("/sdapi/v1/prompt-styles", self.get_prompt_styles, methods=["GET"], response_model=list[models.PromptStyleItem]) self.add_api_route("/sdapi/v1/embeddings", self.get_embeddings, methods=["GET"], response_model=models.EmbeddingsResponse) + self.add_api_route("/sdapi/v1/refresh-embeddings", self.refresh_embeddings, methods=["POST"]) self.add_api_route("/sdapi/v1/refresh-checkpoints", self.refresh_checkpoints, methods=["POST"]) self.add_api_route("/sdapi/v1/refresh-vae", self.refresh_vae, methods=["POST"]) self.add_api_route("/sdapi/v1/create/embedding", self.create_embedding, methods=["POST"], response_model=models.CreateResponse) @@ -643,6 +644,10 @@ class Api: "skipped": convert_embeddings(db.skipped_embeddings), } + def refresh_embeddings(self): + with self.queue_lock: + sd_hijack.model_hijack.embedding_db.load_textual_inversion_embeddings(force_reload=True) + def refresh_checkpoints(self): with self.queue_lock: shared.refresh_checkpoints() From fd383140cf405100f3c619f106472273a7545beb Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Mon, 22 Jan 2024 02:52:34 -0800 Subject: [PATCH 004/135] fix: wrong devices for eye and constraint --- extensions-builtin/Lora/network_oft.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 342fcd0dc..d1c46a4b2 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -57,12 +57,12 @@ class NetworkModuleOFT(network.NetworkModule): def calc_updown(self, orig_weight): oft_blocks = self.oft_blocks.to(orig_weight.device) - eye = torch.eye(self.block_size, device=self.oft_blocks.device) + eye = torch.eye(self.block_size, device=oft_blocks.device) if self.is_kohya: block_Q = oft_blocks - oft_blocks.transpose(1, 2) # ensure skew-symmetric orthogonal matrix norm_Q = torch.norm(block_Q.flatten()) - new_norm_Q = torch.clamp(norm_Q, max=self.constraint) + new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device)) block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) oft_blocks = torch.matmul(eye + block_Q, (eye - block_Q).float().inverse()) From 0466ee2a83680090e28de74881ccf306cb89667c Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 25 Jan 2024 05:45:45 +0900 Subject: [PATCH 005/135] xyz filter blank for number axes --- scripts/xyz_grid.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/scripts/xyz_grid.py b/scripts/xyz_grid.py index 2f385ebf2..6d3e42c06 100644 --- a/scripts/xyz_grid.py +++ b/scripts/xyz_grid.py @@ -554,6 +554,8 @@ class Script(scripts.Script): valslist_ext = [] for val in valslist: + if val.strip() == '': + continue m = re_range.fullmatch(val) mc = re_range_count.fullmatch(val) if m is not None: @@ -576,6 +578,8 @@ class Script(scripts.Script): valslist_ext = [] for val in valslist: + if val.strip() == '': + continue m = re_range_float.fullmatch(val) mc = re_range_count_float.fullmatch(val) if m is not None: From 47cf92039b524bb6dad05e96607208b8b9df7ede Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 26 Jan 2024 17:16:53 +0900 Subject: [PATCH 006/135] minor fix to #14525 --- modules/launch_utils.py | 1 - modules/options.py | 2 ++ 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 8e58d7145..3ff4576a3 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -251,7 +251,6 @@ def list_extensions(settings_file): except Exception: errors.report(f'\nCould not load settings\nThe config file "{settings_file}" is likely corrupted\nIt has been moved to the "tmp/config.json"\nReverting config to default\n\n''', exc_info=True) os.replace(settings_file, os.path.join(script_path, "tmp", "config.json")) - settings = {} disabled_extensions = set(settings.get('disabled_extensions', [])) disable_all_extensions = settings.get('disable_all_extensions', 'none') diff --git a/modules/options.py b/modules/options.py index 503b40e98..35ccade25 100644 --- a/modules/options.py +++ b/modules/options.py @@ -198,6 +198,8 @@ class Options: try: with open(filename, "r", encoding="utf8") as file: self.data = json.load(file) + except FileNotFoundError: + self.data = {} except Exception: errors.report(f'\nCould not load settings\nThe config file "{filename}" is likely corrupted\nIt has been moved to the "tmp/config.json"\nReverting config to default\n\n''', exc_info=True) os.replace(filename, os.path.join(script_path, "tmp", "config.json")) From 36d1fefc19ecb7f1f8b11a8f3bc269be0a36de5e Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 27 Jan 2024 14:42:52 +0900 Subject: [PATCH 007/135] rework set_named_arg change identifying a script from using Scripts class name to Scripts internal name an as not all Script have unique names raise RuntimeError when there's issue --- modules/scripts.py | 27 +++++++++++++++++++-------- 1 file changed, 19 insertions(+), 8 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index 060069cf3..4b38ca320 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -939,22 +939,33 @@ class ScriptRunner: except Exception: errors.report(f"Error running setup: {script.filename}", exc_info=True) - def set_named_arg(self, args, script_type, arg_elem_id, value): - script = next((x for x in self.scripts if type(x).__name__ == script_type), None) + def set_named_arg(self, args, script_name, arg_elem_id, value): + """Locate an arg of a specific script in script_args and set its value + Args: + args: all script args of process p, p.script_args + script_name: the name target script name to + arg_elem_id: the elem_id of the target arg + value: the value to set + Returns: + Updated script args + when script_name in not found or arg_elem_id is not found in script controls, raise RuntimeError + """ + script = next((x for x in self.scripts if x.name == script_name), None) if script is None: - return + raise RuntimeError(f"script {script_name} not found") for i, control in enumerate(script.controls): - if arg_elem_id in control.elem_id: + if arg_elem_id == control.elem_id: index = script.args_from + i - if isinstance(args, list): + if isinstance(args, tuple): + return args[:index] + (value,) + args[index + 1:] + elif isinstance(args, list): args[index] = value return args - elif isinstance(args, tuple): - return args[:index] + (value,) + args[index+1:] else: - return None + raise RuntimeError(f"args is not a list or tuple, but {type(args)}") + raise RuntimeError(f"arg_elem_id {arg_elem_id} not found in script {script_name}") scripts_txt2img: ScriptRunner = None From 2996e43ff71ca50f6e5b64676f998946d8041d19 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 27 Jan 2024 14:49:50 +0900 Subject: [PATCH 008/135] fix txt2img_upscale use force_enable_hr to set p.enable_hr = True allow Script.setup() have access to the correct value add a comment for p.txt2img_upscale --- modules/txt2img.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/txt2img.py b/modules/txt2img.py index 4efcb4c3d..fc56b8a86 100644 --- a/modules/txt2img.py +++ b/modules/txt2img.py @@ -60,10 +60,10 @@ def txt2img_upscale(id_task: str, request: gr.Request, gallery, gallery_index, g assert len(gallery) > 0, 'No image to upscale' assert 0 <= gallery_index < len(gallery), f'Bad image index: {gallery_index}' - p = txt2img_create_processing(id_task, request, *args) - p.enable_hr = True + p = txt2img_create_processing(id_task, request, *args, force_enable_hr=True) p.batch_size = 1 p.n_iter = 1 + # txt2img_upscale attribute that signifies this is called by txt2img_upscale p.txt2img_upscale = True geninfo = json.loads(generation_info) From 486aeda3a75ed20e75d85d6ae6845d66d1cbe1de Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 27 Jan 2024 19:34:07 +0900 Subject: [PATCH 009/135] fix CLIP Interrogator topN regex Co-Authored-By: Martin Rizzo <60830236+martin-rizzo@users.noreply.github.com> --- modules/interrogate.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/interrogate.py b/modules/interrogate.py index 35a627ca1..c93e7aa86 100644 --- a/modules/interrogate.py +++ b/modules/interrogate.py @@ -17,7 +17,7 @@ clip_model_name = 'ViT-L/14' Category = namedtuple("Category", ["name", "topn", "items"]) -re_topn = re.compile(r"\.top(\d+)\.") +re_topn = re.compile(r"\.top(\d+)$") def category_types(): return [f.stem for f in Path(shared.interrogator.content_dir).glob('*.txt')] From eae0bb89fd0e0836098235540765d934c377d15c Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 27 Jan 2024 18:31:53 +0900 Subject: [PATCH 010/135] set_named_arg fuzzy option --- modules/scripts.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/scripts.py b/modules/scripts.py index 4b38ca320..94690a22f 100644 --- a/modules/scripts.py +++ b/modules/scripts.py @@ -939,13 +939,14 @@ class ScriptRunner: except Exception: errors.report(f"Error running setup: {script.filename}", exc_info=True) - def set_named_arg(self, args, script_name, arg_elem_id, value): + def set_named_arg(self, args, script_name, arg_elem_id, value, fuzzy=False): """Locate an arg of a specific script in script_args and set its value Args: args: all script args of process p, p.script_args script_name: the name target script name to arg_elem_id: the elem_id of the target arg value: the value to set + fuzzy: if True, arg_elem_id can be a substring of the control.elem_id else exact match Returns: Updated script args when script_name in not found or arg_elem_id is not found in script controls, raise RuntimeError @@ -955,7 +956,7 @@ class ScriptRunner: raise RuntimeError(f"script {script_name} not found") for i, control in enumerate(script.controls): - if arg_elem_id == control.elem_id: + if arg_elem_id in control.elem_id if fuzzy else arg_elem_id == control.elem_id: index = script.args_from + i if isinstance(args, tuple): From 757dda9ade9d47cb2a755dad0475c8c4fbcaa114 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 27 Jan 2024 22:30:12 +0300 Subject: [PATCH 011/135] Add Pad conds v0 option --- modules/infotext_versions.py | 5 ++- modules/sd_samplers_cfg_denoiser.py | 70 +++++++++++++++++++++++++---- modules/sd_samplers_common.py | 7 +++ modules/sd_samplers_kdiffusion.py | 6 +-- modules/sd_samplers_timesteps.py | 6 +-- modules/shared_options.py | 3 +- 6 files changed, 78 insertions(+), 19 deletions(-) diff --git a/modules/infotext_versions.py b/modules/infotext_versions.py index a5afeebf1..23b45c3f9 100644 --- a/modules/infotext_versions.py +++ b/modules/infotext_versions.py @@ -31,9 +31,12 @@ def backcompat(d): if ver is None: return - if ver < v160: + if ver < v160 and '[' in d.get('Prompt', ''): d["Old prompt editing timelines"] = True + if ver < v160 and d.get('Sampler', '') in ('DDIM', 'PLMS'): + d["Pad conds v0"] = True + if ver < v170_tsnr: d["Downcast alphas_cumprod"] = True diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index 6d76aa965..ef2373969 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -53,6 +53,7 @@ class CFGDenoiser(torch.nn.Module): self.step = 0 self.image_cfg_scale = None self.padded_cond_uncond = False + self.padded_cond_uncond_v0 = False self.sampler = sampler self.model_wrap = None self.p = None @@ -91,6 +92,62 @@ class CFGDenoiser(torch.nn.Module): self.sampler.sampler_extra_args['cond'] = c self.sampler.sampler_extra_args['uncond'] = uc + def pad_cond_uncond(self, cond, uncond): + empty = shared.sd_model.cond_stage_model_empty_prompt + num_repeats = (cond.shape[1] - cond.shape[1]) // empty.shape[1] + + if num_repeats < 0: + cond = pad_cond(cond, -num_repeats, empty) + self.padded_cond_uncond = True + elif num_repeats > 0: + uncond = pad_cond(uncond, num_repeats, empty) + self.padded_cond_uncond = True + + return cond, uncond + + def pad_cond_uncond_v0(self, cond, uncond): + """ + Pads the 'uncond' tensor to match the shape of the 'cond' tensor. + + If 'uncond' is a dictionary, it is assumed that the 'crossattn' key holds the tensor to be padded. + If 'uncond' is a tensor, it is padded directly. + + If the number of columns in 'uncond' is less than the number of columns in 'cond', the last column of 'uncond' + is repeated to match the number of columns in 'cond'. + + If the number of columns in 'uncond' is greater than the number of columns in 'cond', 'uncond' is truncated + to match the number of columns in 'cond'. + + Args: + cond (torch.Tensor or DictWithShape): The condition tensor to match the shape of 'uncond'. + uncond (torch.Tensor or DictWithShape): The tensor to be padded, or a dictionary containing the tensor to be padded. + + Returns: + tuple: A tuple containing the 'cond' tensor and the padded 'uncond' tensor. + + Note: + This is the padding that was always used in DDIM before version 1.6.0 + """ + + is_dict_cond = isinstance(uncond, dict) + uncond_vec = uncond['crossattn'] if is_dict_cond else uncond + + if uncond_vec.shape[1] < cond.shape[1]: + last_vector = uncond_vec[:, -1:] + last_vector_repeated = last_vector.repeat([1, cond.shape[1] - uncond_vec.shape[1], 1]) + uncond_vec = torch.hstack([uncond_vec, last_vector_repeated]) + self.padded_cond_uncond_v0 = True + elif uncond_vec.shape[1] > cond.shape[1]: + uncond_vec = uncond_vec[:, :cond.shape[1]] + self.padded_cond_uncond_v0 = True + + if is_dict_cond: + uncond['crossattn'] = uncond_vec + else: + uncond = uncond_vec + + return cond, uncond + def forward(self, x, sigma, uncond, cond, cond_scale, s_min_uncond, image_cond): if state.interrupted or state.skipped: raise sd_samplers_common.InterruptedException @@ -162,16 +219,11 @@ class CFGDenoiser(torch.nn.Module): sigma_in = sigma_in[:-batch_size] self.padded_cond_uncond = False + self.padded_cond_uncond_v0 = False if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: - empty = shared.sd_model.cond_stage_model_empty_prompt - num_repeats = (tensor.shape[1] - uncond.shape[1]) // empty.shape[1] - - if num_repeats < 0: - tensor = pad_cond(tensor, -num_repeats, empty) - self.padded_cond_uncond = True - elif num_repeats > 0: - uncond = pad_cond(uncond, num_repeats, empty) - self.padded_cond_uncond = True + tensor, uncond = self.pad_cond_uncond(tensor, uncond) + elif shared.opts.pad_cond_uncond_v0 and tensor.shape[1] != uncond.shape[1]: + tensor, uncond = self.pad_cond_uncond_v0(tensor, uncond) if tensor.shape[1] == uncond.shape[1] or skip_uncond: if is_edit_model: diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 58efcad23..6bd38e12a 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -335,3 +335,10 @@ class Sampler: def sample_img2img(self, p, x, noise, conditioning, unconditional_conditioning, steps=None, image_conditioning=None): raise NotImplementedError() + + def add_infotext(self, p): + if self.model_wrap_cfg.padded_cond_uncond: + p.extra_generation_params["Pad conds"] = True + + if self.model_wrap_cfg.padded_cond_uncond_v0: + p.extra_generation_params["Pad conds v0"] = True diff --git a/modules/sd_samplers_kdiffusion.py b/modules/sd_samplers_kdiffusion.py index 8a8c87e0d..337106c02 100644 --- a/modules/sd_samplers_kdiffusion.py +++ b/modules/sd_samplers_kdiffusion.py @@ -187,8 +187,7 @@ class KDiffusionSampler(sd_samplers_common.Sampler): samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True + self.add_infotext(p) return samples @@ -234,8 +233,7 @@ class KDiffusionSampler(sd_samplers_common.Sampler): samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True + self.add_infotext(p) return samples diff --git a/modules/sd_samplers_timesteps.py b/modules/sd_samplers_timesteps.py index 777dd8d0e..8cc7d3848 100644 --- a/modules/sd_samplers_timesteps.py +++ b/modules/sd_samplers_timesteps.py @@ -133,8 +133,7 @@ class CompVisSampler(sd_samplers_common.Sampler): samples = self.launch_sampling(t_enc + 1, lambda: self.func(self.model_wrap_cfg, xi, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True + self.add_infotext(p) return samples @@ -158,8 +157,7 @@ class CompVisSampler(sd_samplers_common.Sampler): } samples = self.launch_sampling(steps, lambda: self.func(self.model_wrap_cfg, x, extra_args=self.sampler_extra_args, disable=False, callback=self.callback_state, **extra_params_kwargs)) - if self.model_wrap_cfg.padded_cond_uncond: - p.extra_generation_params["Pad conds"] = True + self.add_infotext(p) return samples diff --git a/modules/shared_options.py b/modules/shared_options.py index fef1fb836..bdd066c4a 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -210,7 +210,8 @@ options_templates.update(options_section(('optimizations', "Optimizations", "sd" "token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"), "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio hr').info("only applies if non-zero and overrides above"), - "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt to be same length", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"), + "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"), + "pad_cond_uncond_v0": OptionInfo(False, "Pad prompt/negative prompt (v0)", infotext='Pad conds v0').info("alternative implementation for the above; used prior to 1.6.0 for DDIM sampler; ignored if the above is set; changes seeds"), "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"), "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"), "fp8_storage": OptionInfo("Disable", "FP8 weight", gr.Radio, {"choices": ["Disable", "Enable for SDXL", "Enable"]}).info("Use FP8 to store Linear/Conv layers' weight. Require pytorch>=2.1.0."), From 2cb1b65309256763814a006a07b683c0f1013a30 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sun, 28 Jan 2024 22:18:46 +0800 Subject: [PATCH 012/135] Bump safetensors' version to 0.4.2 --- requirements_versions.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements_versions.txt b/requirements_versions.txt index 2a922f288..5e30b5ea1 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -19,7 +19,7 @@ piexif==1.1.3 psutil==5.9.5 pytorch_lightning==1.9.4 resize-right==0.0.2 -safetensors==0.3.1 +safetensors==0.4.2 scikit-image==0.21.0 spandrel==0.1.6 tomesd==0.1.3 From baaf39b6f92f24275a1b264a634514bac571dfae Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 29 Jan 2024 10:20:27 +0300 Subject: [PATCH 013/135] fix the typo -- thanks Cyberbeing --- modules/sd_samplers_cfg_denoiser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index ef2373969..941dff4b3 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -94,7 +94,7 @@ class CFGDenoiser(torch.nn.Module): def pad_cond_uncond(self, cond, uncond): empty = shared.sd_model.cond_stage_model_empty_prompt - num_repeats = (cond.shape[1] - cond.shape[1]) // empty.shape[1] + num_repeats = (cond.shape[1] - uncond.shape[1]) // empty.shape[1] if num_repeats < 0: cond = pad_cond(cond, -num_repeats, empty) From ec124607f47371a6cfd61a795f86a7f1cbd44651 Mon Sep 17 00:00:00 2001 From: wangshuai09 <391746016@qq.com> Date: Sat, 27 Jan 2024 17:21:32 +0800 Subject: [PATCH 014/135] Add NPU Support --- modules/devices.py | 9 +++-- modules/initialize.py | 6 +++- modules/npu_specific.py | 34 +++++++++++++++++++ .../textual_inversion/textual_inversion.py | 4 +++ requirements.txt | 4 +++ requirements_versions.txt | 4 +++ webui.sh | 4 +++ 7 files changed, 62 insertions(+), 3 deletions(-) create mode 100644 modules/npu_specific.py diff --git a/modules/devices.py b/modules/devices.py index ea1f712f9..f1e565015 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -3,7 +3,7 @@ import contextlib from functools import lru_cache import torch -from modules import errors, shared +from modules import errors, shared, npu_specific if sys.platform == "darwin": from modules import mac_specific @@ -40,6 +40,9 @@ def get_optimal_device_name(): if has_xpu(): return xpu_specific.get_xpu_device_string() + if npu_specific.has_npu: + return npu_specific.get_npu_device_string() + return "cpu" @@ -67,6 +70,9 @@ def torch_gc(): if has_xpu(): xpu_specific.torch_xpu_gc() + if npu_specific.has_npu: + npu_specific.torch_npu_gc() + def enable_tf32(): if torch.cuda.is_available(): @@ -164,4 +170,3 @@ def first_time_calculation(): x = torch.zeros((1, 1, 3, 3)).to(device, dtype) conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype) conv2d(x) - diff --git a/modules/initialize.py b/modules/initialize.py index ac95fc6f0..3285cc3c1 100644 --- a/modules/initialize.py +++ b/modules/initialize.py @@ -143,13 +143,17 @@ def initialize_rest(*, reload_script_modules=False): its optimization may be None because the list of optimizaers has neet been filled by that time, so we apply optimization again. """ + from modules import devices + # Work around due to bug in torch_npu, revert me after fixed, @see https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue + if devices.npu_specific.has_npu: + import torch + torch.npu.set_device(0) shared.sd_model # noqa: B018 if sd_hijack.current_optimizer is None: sd_hijack.apply_optimizations() - from modules import devices devices.first_time_calculation() if not shared.cmd_opts.skip_load_model_at_start: Thread(target=load_model).start() diff --git a/modules/npu_specific.py b/modules/npu_specific.py new file mode 100644 index 000000000..d8aebf9c2 --- /dev/null +++ b/modules/npu_specific.py @@ -0,0 +1,34 @@ +import importlib +import torch + +from modules import shared + + +def check_for_npu(): + if importlib.util.find_spec("torch_npu") is None: + return False + import torch_npu + torch_npu.npu.set_device(0) + + try: + # Will raise a RuntimeError if no NPU is found + _ = torch.npu.device_count() + return torch.npu.is_available() + except RuntimeError: + return False + + +def get_npu_device_string(): + if shared.cmd_opts.device_id is not None: + return f"npu:{shared.cmd_opts.device_id}" + return "npu:0" + + +def torch_npu_gc(): + # Work around due to bug in torch_npu, revert me after fixed, @see https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue + torch.npu.set_device(0) + with torch.npu.device(get_npu_device_string()): + torch.npu.empty_cache() + + +has_npu = check_for_npu() diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index 04dda585c..9c062503e 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -151,6 +151,10 @@ class EmbeddingDatabase: return embedding def get_expected_shape(self): + # workaround + if devices.npu_specific.has_npu: + import torch + torch.npu.set_device(0) vec = shared.sd_model.cond_stage_model.encode_embedding_init_text(",", 1) return vec.shape[1] diff --git a/requirements.txt b/requirements.txt index 80b438455..4537402bb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -5,6 +5,8 @@ accelerate basicsr blendmodes clean-fid +cloudpickle +decorator einops fastapi>=0.90.1 gfpgan @@ -26,9 +28,11 @@ resize-right safetensors scikit-image>=0.19 +synr==0.5.0 timm tomesd torch torchdiffeq torchsde +tornado transformers==4.30.2 diff --git a/requirements_versions.txt b/requirements_versions.txt index cb7403a9d..95515b55a 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -4,6 +4,8 @@ accelerate==0.21.0 basicsr==1.4.2 blendmodes==2022 clean-fid==0.1.35 +cloudpickle==3.0.0 +decorator==5.1.1 einops==0.4.1 fastapi==0.94.0 gfpgan==1.3.8 @@ -23,10 +25,12 @@ realesrgan==0.3.0 resize-right==0.0.2 safetensors==0.3.1 scikit-image==0.21.0 +synr==0.5.0 timm==0.9.2 tomesd==0.1.3 torch torchdiffeq==0.2.3 torchsde==0.2.6 +tornado==6.4 transformers==4.30.2 httpx==0.24.1 diff --git a/webui.sh b/webui.sh index cff433272..3f6e87fd8 100755 --- a/webui.sh +++ b/webui.sh @@ -159,6 +159,10 @@ then if echo "$gpu_info" | grep -q "AMD" && [[ -z "${TORCH_COMMAND}" ]] then export TORCH_COMMAND="pip install torch==2.0.1+rocm5.4.2 torchvision==0.15.2+rocm5.4.2 --index-url https://download.pytorch.org/whl/rocm5.4.2" + elif echo "$gpu_info" | grep -q "Huawei" && [[ -z "${TORCH_COMMAND}" ]] + then + export TORCH_COMMAND="pip install torch==2.1.0 torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu; pip install torch_npu" + fi fi From 750dd6014a45397979cad42a74634451d0861581 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Mon, 29 Jan 2024 22:27:53 +0800 Subject: [PATCH 015/135] Fix potential bugs --- modules/devices.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/modules/devices.py b/modules/devices.py index dfffaf24f..60f7d6d7c 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -141,7 +141,12 @@ def manual_cast_forward(target_dtype): args = [arg.to(target_dtype) if isinstance(arg, torch.Tensor) else arg for arg in args] kwargs = {k: v.to(target_dtype) if isinstance(v, torch.Tensor) else v for k, v in kwargs.items()} - org_dtype = torch_utils.get_param(self).dtype + org_dtype = target_dtype + for param in self.parameters(): + if param.dtype != target_dtype: + org_dtype = param.dtype + break + if org_dtype != target_dtype: self.to(target_dtype) result = self.org_forward(*args, **kwargs) @@ -170,7 +175,7 @@ def manual_cast(target_dtype): continue applied = True org_forward = module_type.forward - if module_type == torch.nn.MultiheadAttention and has_xpu(): + if module_type == torch.nn.MultiheadAttention: module_type.forward = manual_cast_forward(torch.float32) else: module_type.forward = manual_cast_forward(target_dtype) From 6e7f0860f7ae4a0ce59f9416fb9b2f3bcab44f1d Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Mon, 29 Jan 2024 22:46:43 +0800 Subject: [PATCH 016/135] linting --- modules/devices.py | 1 - 1 file changed, 1 deletion(-) diff --git a/modules/devices.py b/modules/devices.py index 60f7d6d7c..8f49f7a48 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -4,7 +4,6 @@ from functools import lru_cache import torch from modules import errors, shared -from modules import torch_utils if sys.platform == "darwin": from modules import mac_specific From d243e24f539d717b221992e894a5db5a321bf3cd Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Mon, 29 Jan 2024 22:49:45 +0800 Subject: [PATCH 017/135] Try to reverse the dtype checking mechanism --- modules/devices.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/modules/devices.py b/modules/devices.py index 8f49f7a48..f9648e9a0 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -4,6 +4,7 @@ from functools import lru_cache import torch from modules import errors, shared +from modules import torch_utils if sys.platform == "darwin": from modules import mac_specific @@ -140,11 +141,7 @@ def manual_cast_forward(target_dtype): args = [arg.to(target_dtype) if isinstance(arg, torch.Tensor) else arg for arg in args] kwargs = {k: v.to(target_dtype) if isinstance(v, torch.Tensor) else v for k, v in kwargs.items()} - org_dtype = target_dtype - for param in self.parameters(): - if param.dtype != target_dtype: - org_dtype = param.dtype - break + org_dtype = torch_utils.get_param(self).dtype if org_dtype != target_dtype: self.to(target_dtype) From f9ba7e648ad5bf7dbdf2b95fa207936179bf784e Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Mon, 29 Jan 2024 22:54:12 +0800 Subject: [PATCH 018/135] Revert "Try to reverse the dtype checking mechanism" This reverts commit d243e24f539d717b221992e894a5db5a321bf3cd. --- modules/devices.py | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/modules/devices.py b/modules/devices.py index f9648e9a0..8f49f7a48 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -4,7 +4,6 @@ from functools import lru_cache import torch from modules import errors, shared -from modules import torch_utils if sys.platform == "darwin": from modules import mac_specific @@ -141,7 +140,11 @@ def manual_cast_forward(target_dtype): args = [arg.to(target_dtype) if isinstance(arg, torch.Tensor) else arg for arg in args] kwargs = {k: v.to(target_dtype) if isinstance(v, torch.Tensor) else v for k, v in kwargs.items()} - org_dtype = torch_utils.get_param(self).dtype + org_dtype = target_dtype + for param in self.parameters(): + if param.dtype != target_dtype: + org_dtype = param.dtype + break if org_dtype != target_dtype: self.to(target_dtype) From c4255d12f7531725e591160e1cfe47d7a2fc0f02 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Wed, 31 Jan 2024 04:36:11 +0900 Subject: [PATCH 019/135] add tooltip create_submit_box --- modules/ui_toprow.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/ui_toprow.py b/modules/ui_toprow.py index fbe705be1..457fbf520 100644 --- a/modules/ui_toprow.py +++ b/modules/ui_toprow.py @@ -96,9 +96,9 @@ class Toprow: with gr.Row(elem_id=f"{self.id_part}_generate_box", elem_classes=["generate-box"] + (["generate-box-compact"] if self.is_compact else []), render=not self.is_compact) as submit_box: self.submit_box = submit_box - self.interrupt = gr.Button('Interrupt', elem_id=f"{self.id_part}_interrupt", elem_classes="generate-box-interrupt") - self.skip = gr.Button('Skip', elem_id=f"{self.id_part}_skip", elem_classes="generate-box-skip") - self.submit = gr.Button('Generate', elem_id=f"{self.id_part}_generate", variant='primary') + self.interrupt = gr.Button('Interrupt', elem_id=f"{self.id_part}_interrupt", elem_classes="generate-box-interrupt", tooltip="End generation immediately or after completing current batch") + self.skip = gr.Button('Skip', elem_id=f"{self.id_part}_skip", elem_classes="generate-box-skip", tooltip="Stop generation of current batch and continues onto next batch") + self.submit = gr.Button('Generate', elem_id=f"{self.id_part}_generate", variant='primary', tooltip="Right click generate forever menu") self.skip.click( fn=lambda: shared.state.skip(), From cc3f604310458eed7d26456c1b3934d582283ffe Mon Sep 17 00:00:00 2001 From: wangshuai09 <391746016@qq.com> Date: Wed, 31 Jan 2024 10:46:53 +0800 Subject: [PATCH 020/135] Update --- modules/devices.py | 7 +++++++ modules/initialize.py | 5 +---- modules/launch_utils.py | 8 ++++++++ modules/npu_specific.py | 5 +---- modules/textual_inversion/textual_inversion.py | 5 +---- requirements.txt | 4 ---- requirements_npu.txt | 4 ++++ requirements_versions.txt | 4 ---- 8 files changed, 22 insertions(+), 20 deletions(-) create mode 100644 requirements_npu.txt diff --git a/modules/devices.py b/modules/devices.py index c737162ac..28c0c54d8 100644 --- a/modules/devices.py +++ b/modules/devices.py @@ -88,9 +88,16 @@ def torch_gc(): xpu_specific.torch_xpu_gc() if npu_specific.has_npu: + torch_npu_set_device() npu_specific.torch_npu_gc() +def torch_npu_set_device(): + # Work around due to bug in torch_npu, revert me after fixed, @see https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue + if npu_specific.has_npu: + torch.npu.set_device(0) + + def enable_tf32(): if torch.cuda.is_available(): diff --git a/modules/initialize.py b/modules/initialize.py index cc34fd6f8..f7313ff4d 100644 --- a/modules/initialize.py +++ b/modules/initialize.py @@ -143,10 +143,7 @@ def initialize_rest(*, reload_script_modules=False): by that time, so we apply optimization again. """ from modules import devices - # Work around due to bug in torch_npu, revert me after fixed, @see https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue - if devices.npu_specific.has_npu: - import torch - torch.npu.set_device(0) + devices.torch_npu_set_device() shared.sd_model # noqa: B018 diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 3ff4576a3..107c72b02 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -338,6 +338,7 @@ def prepare_environment(): torch_index_url = os.environ.get('TORCH_INDEX_URL', "https://pytorch-extension.intel.com/release-whl/stable/xpu/us/") torch_command = os.environ.get('TORCH_COMMAND', f"pip install torch==2.0.0a0 intel-extension-for-pytorch==2.0.110+gitba7f6c1 --extra-index-url {torch_index_url}") requirements_file = os.environ.get('REQS_FILE', "requirements_versions.txt") + requirements_file_for_npu = os.environ.get('REQS_FILE_FOR_NPU', "requirements_npu.txt") xformers_package = os.environ.get('XFORMERS_PACKAGE', 'xformers==0.0.23.post1') clip_package = os.environ.get('CLIP_PACKAGE', "https://github.com/openai/CLIP/archive/d50d76daa670286dd6cacf3bcd80b5e4823fc8e1.zip") @@ -421,6 +422,13 @@ def prepare_environment(): run_pip(f"install -r \"{requirements_file}\"", "requirements") startup_timer.record("install requirements") + if not os.path.isfile(requirements_file_for_npu): + requirements_file_for_npu = os.path.join(script_path, requirements_file_for_npu) + + if "torch_npu" in torch_command and not requirements_met(requirements_file_for_npu): + run_pip(f"install -r \"{requirements_file_for_npu}\"", "requirements_for_npu") + startup_timer.record("install requirements_for_npu") + if not args.skip_install: run_extensions_installers(settings_file=args.ui_settings_file) diff --git a/modules/npu_specific.py b/modules/npu_specific.py index d8aebf9c2..941006911 100644 --- a/modules/npu_specific.py +++ b/modules/npu_specific.py @@ -8,11 +8,10 @@ def check_for_npu(): if importlib.util.find_spec("torch_npu") is None: return False import torch_npu - torch_npu.npu.set_device(0) try: # Will raise a RuntimeError if no NPU is found - _ = torch.npu.device_count() + _ = torch_npu.npu.device_count() return torch.npu.is_available() except RuntimeError: return False @@ -25,8 +24,6 @@ def get_npu_device_string(): def torch_npu_gc(): - # Work around due to bug in torch_npu, revert me after fixed, @see https://gitee.com/ascend/pytorch/issues/I8KECW?from=project-issue - torch.npu.set_device(0) with torch.npu.device(get_npu_device_string()): torch.npu.empty_cache() diff --git a/modules/textual_inversion/textual_inversion.py b/modules/textual_inversion/textual_inversion.py index d16e3b9a5..6d815c0b3 100644 --- a/modules/textual_inversion/textual_inversion.py +++ b/modules/textual_inversion/textual_inversion.py @@ -150,10 +150,7 @@ class EmbeddingDatabase: return embedding def get_expected_shape(self): - # workaround - if devices.npu_specific.has_npu: - import torch - torch.npu.set_device(0) + devices.torch_npu_set_device() vec = shared.sd_model.cond_stage_model.encode_embedding_init_text(",", 1) return vec.shape[1] diff --git a/requirements.txt b/requirements.txt index d1e4ede91..731a1be7d 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,8 +4,6 @@ accelerate blendmodes clean-fid -cloudpickle -decorator einops facexlib fastapi>=0.90.1 @@ -26,10 +24,8 @@ resize-right safetensors scikit-image>=0.19 -synr==0.5.0 tomesd torch torchdiffeq torchsde -tornado transformers==4.30.2 diff --git a/requirements_npu.txt b/requirements_npu.txt new file mode 100644 index 000000000..5e6a43646 --- /dev/null +++ b/requirements_npu.txt @@ -0,0 +1,4 @@ +cloudpickle +decorator +synr==0.5.0 +tornado diff --git a/requirements_versions.txt b/requirements_versions.txt index 1c66cd8cf..5e30b5ea1 100644 --- a/requirements_versions.txt +++ b/requirements_versions.txt @@ -3,8 +3,6 @@ Pillow==9.5.0 accelerate==0.21.0 blendmodes==2022 clean-fid==0.1.35 -cloudpickle==3.0.0 -decorator==5.1.1 einops==0.4.1 facexlib==0.3.0 fastapi==0.94.0 @@ -23,12 +21,10 @@ pytorch_lightning==1.9.4 resize-right==0.0.2 safetensors==0.4.2 scikit-image==0.21.0 -synr==0.5.0 spandrel==0.1.6 tomesd==0.1.3 torch torchdiffeq==0.2.3 torchsde==0.2.6 -tornado==6.4 transformers==4.30.2 httpx==0.24.1 From 74b214a92a2959948dcd05a78b7380e046163871 Mon Sep 17 00:00:00 2001 From: Cyberbeing Date: Mon, 29 Jan 2024 02:06:50 -0800 Subject: [PATCH 021/135] Fix potential autocast NaNs in image upscale --- modules/upscaler_utils.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/upscaler_utils.py b/modules/upscaler_utils.py index afed8b407..b5e5a80ca 100644 --- a/modules/upscaler_utils.py +++ b/modules/upscaler_utils.py @@ -6,7 +6,7 @@ import torch import tqdm from PIL import Image -from modules import images, shared, torch_utils +from modules import devices, images, shared, torch_utils logger = logging.getLogger(__name__) @@ -44,7 +44,8 @@ def upscale_pil_patch(model, img: Image.Image) -> Image.Image: with torch.no_grad(): tensor = pil_image_to_torch_bgr(img).unsqueeze(0) # add batch dimension tensor = tensor.to(device=param.device, dtype=param.dtype) - return torch_bgr_to_pil_image(model(tensor)) + with devices.without_autocast(): + return torch_bgr_to_pil_image(model(tensor)) def upscale_with_model( From bbe8e02d74ab8f2efca44407488e38a1bb733983 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 1 Feb 2024 15:40:15 +0900 Subject: [PATCH 022/135] catch load style.csv error --- modules/styles.py | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/modules/styles.py b/modules/styles.py index 9edcc7e44..60bd8a7fb 100644 --- a/modules/styles.py +++ b/modules/styles.py @@ -1,4 +1,5 @@ from pathlib import Path +from modules import errors import csv import os import typing @@ -128,19 +129,22 @@ class StyleDatabase: self.load_from_csv(styles_file) def load_from_csv(self, path: str | Path): - with open(path, "r", encoding="utf-8-sig", newline="") as file: - reader = csv.DictReader(file, skipinitialspace=True) - for row in reader: - # Ignore empty rows or rows starting with a comment - if not row or row["name"].startswith("#"): - continue - # Support loading old CSV format with "name, text"-columns - prompt = row["prompt"] if "prompt" in row else row["text"] - negative_prompt = row.get("negative_prompt", "") - # Add style to database - self.styles[row["name"]] = PromptStyle( - row["name"], prompt, negative_prompt, str(path) - ) + try: + with open(path, "r", encoding="utf-8-sig", newline="") as file: + reader = csv.DictReader(file, skipinitialspace=True) + for row in reader: + # Ignore empty rows or rows starting with a comment + if not row or row["name"].startswith("#"): + continue + # Support loading old CSV format with "name, text"-columns + prompt = row["prompt"] if "prompt" in row else row["text"] + negative_prompt = row.get("negative_prompt", "") + # Add style to database + self.styles[row["name"]] = PromptStyle( + row["name"], prompt, negative_prompt, str(path) + ) + except Exception: + errors.report(f'Error loading styles from {path}: ', exc_info=True) def get_style_paths(self) -> set: """Returns a set of all distinct paths of files that styles are loaded from.""" From 9f3ba383143e117601666e4711ceeff2dfda2526 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 1 Feb 2024 22:34:29 +0300 Subject: [PATCH 023/135] Add "Interrupting..." placeholder. --- javascript/ui.js | 13 +++++++++++-- modules/ui_toprow.py | 16 +++++----------- style.css | 6 +++--- 3 files changed, 19 insertions(+), 16 deletions(-) diff --git a/javascript/ui.js b/javascript/ui.js index 3430b3fef..d5d85bb67 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -119,9 +119,18 @@ function create_submit_args(args) { return res; } +function setSubmitButtonsVisibility(tabname, showInterrupt, showSkip, showInterrupting) { + gradioApp().getElementById(tabname + '_interrupt').style.display = showInterrupt ? "block" : "none"; + gradioApp().getElementById(tabname + '_skip').style.display = showSkip ? "block" : "none"; + gradioApp().getElementById(tabname + '_interrupting').style.display = showInterrupting ? "block" : "none"; +} + function showSubmitButtons(tabname, show) { - gradioApp().getElementById(tabname + '_interrupt').style.display = show ? "none" : "block"; - gradioApp().getElementById(tabname + '_skip').style.display = show ? "none" : "block"; + setSubmitButtonsVisibility(tabname, ! show, !show, false); +} + +function showSubmitInterruptingPlaceholder(tabname) { + setSubmitButtonsVisibility(tabname, false, true, true); } function showRestoreProgressButton(tabname, show) { diff --git a/modules/ui_toprow.py b/modules/ui_toprow.py index 457fbf520..30cf1b1b8 100644 --- a/modules/ui_toprow.py +++ b/modules/ui_toprow.py @@ -17,6 +17,7 @@ class Toprow: button_deepbooru = None interrupt = None + interrupting = None skip = None submit = None @@ -98,14 +99,9 @@ class Toprow: self.interrupt = gr.Button('Interrupt', elem_id=f"{self.id_part}_interrupt", elem_classes="generate-box-interrupt", tooltip="End generation immediately or after completing current batch") self.skip = gr.Button('Skip', elem_id=f"{self.id_part}_skip", elem_classes="generate-box-skip", tooltip="Stop generation of current batch and continues onto next batch") + self.interrupting = gr.Button('Interrupting...', elem_id=f"{self.id_part}_interrupting", elem_classes="generate-box-interrupting", tooltip="Interrupting generation...") self.submit = gr.Button('Generate', elem_id=f"{self.id_part}_generate", variant='primary', tooltip="Right click generate forever menu") - self.skip.click( - fn=lambda: shared.state.skip(), - inputs=[], - outputs=[], - ) - def interrupt_function(): if not shared.state.stopping_generation and shared.state.job_count > 1 and shared.opts.interrupt_after_current: shared.state.stop_generating() @@ -113,11 +109,9 @@ class Toprow: else: shared.state.interrupt() - self.interrupt.click( - fn=interrupt_function, - inputs=[], - outputs=[], - ) + self.skip.click(fn=shared.state.skip) + self.interrupt.click(fn=interrupt_function, _js='function(){ showSubmitInterruptingPlaceholder("' + self.id_part + '"); }') + self.interrupting.click(fn=interrupt_function) def create_tools_row(self): with gr.Row(elem_id=f"{self.id_part}_tools"): diff --git a/style.css b/style.css index 4352737ee..04b0a2faa 100644 --- a/style.css +++ b/style.css @@ -331,17 +331,17 @@ input[type="checkbox"].input-accordion-checkbox{ .generate-box{ position: relative; } -.gradio-button.generate-box-skip, .gradio-button.generate-box-interrupt{ +.gradio-button.generate-box-skip, .gradio-button.generate-box-interrupt, .gradio-button.generate-box-interrupting{ position: absolute; width: 50%; height: 100%; display: none; background: #b4c0cc; } -.gradio-button.generate-box-skip:hover, .gradio-button.generate-box-interrupt:hover{ +.gradio-button.generate-box-skip:hover, .gradio-button.generate-box-interrupt:hover, .gradio-button.generate-box-interrupting:hover{ background: #c2cfdb; } -.gradio-button.generate-box-interrupt{ +.gradio-button.generate-box-interrupt, .gradio-button.generate-box-interrupting{ left: 0; border-radius: 0.5rem 0 0 0.5rem; } From 26003706593786d4e17b71139967ded109c3b7b3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 1 Feb 2024 23:54:57 +0300 Subject: [PATCH 024/135] fix error when editing extra networks card --- modules/ui_extra_networks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 325d848ee..58981daf6 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -135,7 +135,7 @@ def get_single_card(page: str = "", tabname: str = "", name: str = ""): item = page.items.get(name) page.read_user_metadata(item) - item_html = page.create_item_html(tabname, item) + item_html = page.create_item_html(tabname, item, shared.html("extra-networks-card.html")) return JSONResponse({"html": item_html}) From 5904e3f6b3d4bb71ccca8a5f3eb0fd8f1514265e Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Fri, 2 Feb 2024 19:30:59 +0300 Subject: [PATCH 025/135] fix page refresh not re-applying sort/filter for #14588 fix path sortkey not including the filename for #14588 --- javascript/extraNetworks.js | 2 +- modules/ui_extra_networks.py | 23 +++++++++++------------ 2 files changed, 12 insertions(+), 13 deletions(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 4ef1a96fb..f96af4475 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -450,7 +450,7 @@ function extraNetworksControlRefreshOnClick(event, tabname, extra_networks_tabna * @param tabname The name of the active tab in the sd webui. Ex: txt2img, img2img, etc. * @param extra_networks_tabname The id of the active extraNetworks tab. Ex: lora, checkpoints, etc. */ - var btn_refresh_internal = gradioApp().getElementById(tabname + "_extra_refresh_internal"); + var btn_refresh_internal = gradioApp().getElementById(tabname + "_" + extra_networks_tabname + "_extra_refresh_internal"); btn_refresh_internal.dispatchEvent(new Event("click")); } diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 58981daf6..8f3839373 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -559,7 +559,7 @@ class ExtraNetworksPage: "date_created": int(mtime), "date_modified": int(ctime), "name": pth.name.lower(), - "path": str(pth.parent).lower(), + "path": str(pth).lower(), } def find_preview(self, path): @@ -638,6 +638,7 @@ def pages_in_preferred_order(pages): return sorted(pages, key=lambda x: tab_scores[x.name]) + def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): ui = ExtraNetworksUi() ui.pages = [] @@ -648,8 +649,6 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): related_tabs = [] - button_refresh = gr.Button("Refresh", elem_id=f"{tabname}_extra_refresh_internal", visible=False) - for page in ui.stored_extra_pages: with gr.Tab(page.title, elem_id=f"{tabname}_{page.extra_networks_tabname}", elem_classes=["extra-page"]) as tab: with gr.Column(elem_id=f"{tabname}_{page.extra_networks_tabname}_prompts", elem_classes=["extra-page-prompts"]): @@ -678,6 +677,15 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): ) tab.select(fn=None, _js=jscode, inputs=[], outputs=[], show_progress=False) + def refresh(): + for pg in ui.stored_extra_pages: + pg.refresh() + create_html() + return ui.pages_contents + + button_refresh = gr.Button("Refresh", elem_id=f"{tabname}_{page.extra_networks_tabname}_extra_refresh_internal", visible=False) + button_refresh.click(fn=refresh, inputs=[], outputs=ui.pages).then(fn=lambda: None, _js="function(){ " + f"applyExtraNetworkFilter('{tabname}_{page.extra_networks_tabname}');" + " }") + def create_html(): ui.pages_contents = [pg.create_html(ui.tabname) for pg in ui.stored_extra_pages] @@ -686,16 +694,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): create_html() return ui.pages_contents - def refresh(): - for pg in ui.stored_extra_pages: - pg.refresh() - create_html() - return ui.pages_contents - interface.load(fn=pages_html, inputs=[], outputs=ui.pages) - # NOTE: Event is manually fired in extraNetworks.js:extraNetworksTreeRefreshOnClick() - # button is unused and hidden at all times. Only used in order to fire this event. - button_refresh.click(fn=refresh, inputs=[], outputs=ui.pages) return ui From 5084b39ea58b31392ae7fdec4b5051ad278819eb Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Fri, 2 Feb 2024 19:41:07 +0300 Subject: [PATCH 026/135] fix checkpoint selection not working for #14588 --- modules/ui_extra_networks_checkpoints.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui_extra_networks_checkpoints.py b/modules/ui_extra_networks_checkpoints.py index a8c336719..d69d144db 100644 --- a/modules/ui_extra_networks_checkpoints.py +++ b/modules/ui_extra_networks_checkpoints.py @@ -30,7 +30,7 @@ class ExtraNetworksPageCheckpoints(ui_extra_networks.ExtraNetworksPage): "preview": self.find_preview(path), "description": self.find_description(path), "search_terms": search_terms, - "onclick": html.escape(f"return selectCheckpoint('{name}');"), + "onclick": html.escape(f"return selectCheckpoint({ui_extra_networks.quote_js(name)})"), "local_preview": f"{path}.{shared.opts.samples_format}", "metadata": checkpoint.metadata, "sort_keys": {'default': index, **self.get_sort_keys(checkpoint.filename)}, From 1ff1c5be6420d5088a94285bad2faf4258de42e4 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Fri, 2 Feb 2024 20:51:54 +0300 Subject: [PATCH 027/135] fix refresh button forgetting sort order for extra networks #14588 --- javascript/extraNetworks.js | 18 ++++++++++++------ javascript/ui.js | 2 +- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index f96af4475..9a3a23926 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -48,7 +48,7 @@ function setupExtraNetworksForTab(tabname) { return; // `return` is equivalent of `continue` but for forEach loops. } - var applyFilter = function() { + var applyFilter = function(force) { var searchTerm = search.value.toLowerCase(); gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card').forEach(function(elem) { var searchOnly = elem.querySelector('.search_only'); @@ -67,17 +67,17 @@ function setupExtraNetworksForTab(tabname) { } }); - applySort(); + applySort(force); }; - var applySort = function() { + var applySort = function(force) { var cards = gradioApp().querySelectorAll('#' + tabname + '_extra_tabs div.card'); var reverse = sort_dir.dataset.sortdir == "Descending"; var sortKey = sort_mode.dataset.sortmode.toLowerCase().replace("sort", "").replaceAll(" ", "_").replace(/_+$/, "").trim() || "name"; sortKey = "sort" + sortKey.charAt(0).toUpperCase() + sortKey.slice(1); var sortKeyStore = sortKey + "-" + (reverse ? "Descending" : "Ascending") + "-" + cards.length; - if (sortKeyStore == sort_mode.dataset.sortkey) { + if (sortKeyStore == sort_mode.dataset.sortkey && !force) { return; } sort_mode.dataset.sortkey = sortKeyStore; @@ -167,11 +167,17 @@ function extraNetworksTabSelected(tabname, id, showPrompt, showNegativePrompt, t } function applyExtraNetworkFilter(tabname_full) { - setTimeout(extraNetworksApplyFilter[tabname_full], 1); + var doFilter = function() { + extraNetworksApplyFilter[tabname_full](true); + }; + setTimeout(doFilter, 1); } function applyExtraNetworkSort(tabname_full) { - setTimeout(extraNetworksApplySort[tabname_full], 1); + var doSort = function() { + extraNetworksApplySort[tabname_full](true); + }; + setTimeout(doSort, 1); } var extraNetworksApplyFilter = {}; diff --git a/javascript/ui.js b/javascript/ui.js index d5d85bb67..9e66cd245 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -126,7 +126,7 @@ function setSubmitButtonsVisibility(tabname, showInterrupt, showSkip, showInterr } function showSubmitButtons(tabname, show) { - setSubmitButtonsVisibility(tabname, ! show, !show, false); + setSubmitButtonsVisibility(tabname, !show, !show, false); } function showSubmitInterruptingPlaceholder(tabname) { From 321b2db06710cb11f7087f72aa5fee3544a596fc Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Fri, 2 Feb 2024 22:47:51 +0300 Subject: [PATCH 028/135] fix extra networks metadata failing to work properly when you create the .json file with metadata for the first time. --- modules/ui_extra_networks.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 8f3839373..c03b9f081 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -134,7 +134,7 @@ def get_single_card(page: str = "", tabname: str = "", name: str = ""): errors.display(e, "creating item for extra network") item = page.items.get(name) - page.read_user_metadata(item) + page.read_user_metadata(item, use_cache=False) item_html = page.create_item_html(tabname, item, shared.html("extra-networks-card.html")) return JSONResponse({"html": item_html}) @@ -173,9 +173,9 @@ class ExtraNetworksPage: def refresh(self): pass - def read_user_metadata(self, item): + def read_user_metadata(self, item, use_cache=True): filename = item.get("filename", None) - metadata = extra_networks.get_user_metadata(filename, lister=self.lister) + metadata = extra_networks.get_user_metadata(filename, lister=self.lister if use_cache else None) desc = metadata.get("description", None) if desc is not None: From 99c6c4a51bd806f0cb4184cf5a6e974988752475 Mon Sep 17 00:00:00 2001 From: Andray Date: Wed, 7 Feb 2024 16:06:17 +0400 Subject: [PATCH 029/135] add button for refreshing extensions list --- modules/ui_extensions.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/ui_extensions.py b/modules/ui_extensions.py index dc1e34c8a..a24ea32ef 100644 --- a/modules/ui_extensions.py +++ b/modules/ui_extensions.py @@ -548,6 +548,7 @@ def create_ui(): extensions_disable_all = gr.Radio(label="Disable all extensions", choices=["none", "extra", "all"], value=shared.opts.disable_all_extensions, elem_id="extensions_disable_all") extensions_disabled_list = gr.Text(elem_id="extensions_disabled_list", visible=False, container=False) extensions_update_list = gr.Text(elem_id="extensions_update_list", visible=False, container=False) + refresh = gr.Button(value='Refresh', variant="compact") html = "" @@ -566,7 +567,8 @@ def create_ui(): with gr.Row(elem_classes="progress-container"): extensions_table = gr.HTML('Loading...', elem_id="extensions_installed_html") - ui.load(fn=extension_table, inputs=[], outputs=[extensions_table]) + ui.load(fn=extension_table, inputs=[], outputs=[extensions_table], show_progress=False) + refresh.click(fn=extension_table, inputs=[], outputs=[extensions_table], show_progress=False) apply.click( fn=apply_and_restart, From 9588721197bc3c61354811eca5aff6f470b0b2f8 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Wed, 7 Feb 2024 04:49:17 -0800 Subject: [PATCH 030/135] feat: support LyCORIS BOFT --- extensions-builtin/Lora/network_oft.py | 44 ++++++++++++++++++++------ 1 file changed, 35 insertions(+), 9 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index d1c46a4b2..8a37828cc 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -1,6 +1,6 @@ import torch import network -from lyco_helpers import factorization +from lyco_helpers import factorization, butterfly_factor from einops import rearrange @@ -36,6 +36,12 @@ class NetworkModuleOFT(network.NetworkModule): # self.alpha is unused self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) + self.is_boft = False + if "boft" in weights.w.keys(): + self.is_boft = True + self.boft_b = weights.w["boft_b"] + self.boft_m = weights.w["boft_m"] + is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear] is_conv = type(self.sd_module) in [torch.nn.Conv2d] is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] # unsupported @@ -68,14 +74,34 @@ class NetworkModuleOFT(network.NetworkModule): R = oft_blocks.to(orig_weight.device) - # This errors out for MultiheadAttention, might need to be handled up-stream - merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) - merged_weight = torch.einsum( - 'k n m, k n ... -> k m ...', - R, - merged_weight - ) - merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') + if not self.is_boft: + # This errors out for MultiheadAttention, might need to be handled up-stream + merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) + merged_weight = torch.einsum( + 'k n m, k n ... -> k m ...', + R, + merged_weight + ) + merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') + else: + scale = 1.0 + m = self.boft_m.to(device=oft_blocks.device, dtype=oft_blocks.dtype) + b = self.boft_b.to(device=oft_blocks.device, dtype=oft_blocks.dtype) + r_b = b // 2 + inp = orig_weight + for i in range(m): + bi = R[i] # b_num, b_size, b_size + if i == 0: + # Apply multiplier/scale and rescale into first weight + bi = bi * scale + (1 - scale) * eye + #if self.rescaled: + # bi = bi * self.rescale + inp = rearrange(inp, "(c g k) ... -> (c k g) ...", g=2, k=2**i * r_b) + inp = rearrange(inp, "(d b) ... -> d b ...", b=b) + inp = torch.einsum("b i j, b j ... -> b i ...", bi, inp) + inp = rearrange(inp, "d b ... -> (d b) ...") + inp = rearrange(inp, "(c k g) ... -> (c g k) ...", g=2, k=2**i * r_b) + merged_weight = inp updown = merged_weight.to(orig_weight.device) - orig_weight.to(merged_weight.dtype) output_shape = orig_weight.shape From a4668a16b6f8e98bc6e1553aa754735f9148770f Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Wed, 7 Feb 2024 04:51:22 -0800 Subject: [PATCH 031/135] fix: calculate butterfly factor --- extensions-builtin/Lora/network_oft.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 8a37828cc..0f20d701b 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -57,6 +57,9 @@ class NetworkModuleOFT(network.NetworkModule): self.constraint = self.alpha * self.out_dim self.num_blocks = self.dim self.block_size = self.out_dim // self.dim + elif self.is_boft: + self.constraint = None + self.block_size, self.block_num = butterfly_factor(self.out_dim, self.dim) else: self.constraint = None self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) From 81c16c965e532c6d86a969284c320ff8fcb0451d Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Wed, 7 Feb 2024 04:54:14 -0800 Subject: [PATCH 032/135] fix: add butterfly_factor fn --- extensions-builtin/Lora/lyco_helpers.py | 26 +++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/extensions-builtin/Lora/lyco_helpers.py b/extensions-builtin/Lora/lyco_helpers.py index 1679a0ce6..3c4f5bad2 100644 --- a/extensions-builtin/Lora/lyco_helpers.py +++ b/extensions-builtin/Lora/lyco_helpers.py @@ -66,3 +66,29 @@ def factorization(dimension: int, factor:int=-1) -> tuple[int, int]: n, m = m, n return m, n +# from https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/boft.py +def butterfly_factor(dimension: int, factor: int = -1) -> tuple[int, int]: + """ + m = 2k + n = 2**p + m*n = dim + """ + + # Find the first solution and check if it is even doable + m = n = 0 + while m <= factor: + m += 2 + while dimension % m != 0 and m < dimension: + m += 2 + if m > factor: + break + if sum(int(i) for i in f"{dimension//m:b}") == 1: + n = dimension // m + + if n == 0: + raise ValueError( + f"It is impossible to decompose {dimension} with factor {factor} under BOFT constrains." + ) + + #log_butterfly_factorize(dimension, factor, (dimension // n, n)) + return dimension // n, n From 2f1073dc6edf2d1388f6aee4af91cb354099a463 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Wed, 7 Feb 2024 04:55:11 -0800 Subject: [PATCH 033/135] style: fix lint --- extensions-builtin/Lora/network_oft.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 0f20d701b..dc6db56f1 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -96,7 +96,7 @@ class NetworkModuleOFT(network.NetworkModule): bi = R[i] # b_num, b_size, b_size if i == 0: # Apply multiplier/scale and rescale into first weight - bi = bi * scale + (1 - scale) * eye + bi = bi * scale + (1 - scale) * eye #if self.rescaled: # bi = bi * self.rescale inp = rearrange(inp, "(c g k) ... -> (c k g) ...", g=2, k=2**i * r_b) From 325eaeb584f8565d49ce73553165088f794d3d12 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Thu, 8 Feb 2024 11:55:05 -0800 Subject: [PATCH 034/135] fix: get boft params from weight shape --- extensions-builtin/Lora/network_oft.py | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index dc6db56f1..fc7132651 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -1,6 +1,6 @@ import torch import network -from lyco_helpers import factorization, butterfly_factor +from lyco_helpers import factorization from einops import rearrange @@ -37,10 +37,8 @@ class NetworkModuleOFT(network.NetworkModule): self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) self.is_boft = False - if "boft" in weights.w.keys(): + if weights.w["oft_diag"].dim() == 4: self.is_boft = True - self.boft_b = weights.w["boft_b"] - self.boft_m = weights.w["boft_m"] is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear] is_conv = type(self.sd_module) in [torch.nn.Conv2d] @@ -59,7 +57,11 @@ class NetworkModuleOFT(network.NetworkModule): self.block_size = self.out_dim // self.dim elif self.is_boft: self.constraint = None - self.block_size, self.block_num = butterfly_factor(self.out_dim, self.dim) + self.boft_m = weights.w["oft_diag"].shape[0] + self.block_num = weights.w["oft_diag"].shape[1] + self.block_size = weights.w["oft_diag"].shape[2] + self.boft_b = self.block_size + #self.block_size, self.block_num = butterfly_factor(self.out_dim, self.dim) else: self.constraint = None self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) @@ -88,8 +90,8 @@ class NetworkModuleOFT(network.NetworkModule): merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') else: scale = 1.0 - m = self.boft_m.to(device=oft_blocks.device, dtype=oft_blocks.dtype) - b = self.boft_b.to(device=oft_blocks.device, dtype=oft_blocks.dtype) + m = self.boft_m + b = self.boft_b r_b = b // 2 inp = orig_weight for i in range(m): From 613b0d9548a859408433bff7a6dca7fd0f2eae7e Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Thu, 8 Feb 2024 21:58:59 -0800 Subject: [PATCH 035/135] doc: add boft comment --- extensions-builtin/Lora/network_oft.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index fc7132651..d7b317029 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -29,13 +29,14 @@ class NetworkModuleOFT(network.NetworkModule): self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size) self.alpha = weights.w["alpha"] # alpha is constraint self.dim = self.oft_blocks.shape[0] # lora dim - # LyCORIS + # LyCORIS OFT elif "oft_diag" in weights.w.keys(): self.is_kohya = False self.oft_blocks = weights.w["oft_diag"] # self.alpha is unused self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) + # LyCORIS BOFT self.is_boft = False if weights.w["oft_diag"].dim() == 4: self.is_boft = True @@ -89,6 +90,7 @@ class NetworkModuleOFT(network.NetworkModule): ) merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') else: + # TODO: determine correct value for scale scale = 1.0 m = self.boft_m b = self.boft_b @@ -99,8 +101,6 @@ class NetworkModuleOFT(network.NetworkModule): if i == 0: # Apply multiplier/scale and rescale into first weight bi = bi * scale + (1 - scale) * eye - #if self.rescaled: - # bi = bi * self.rescale inp = rearrange(inp, "(c g k) ... -> (c k g) ...", g=2, k=2**i * r_b) inp = rearrange(inp, "(d b) ... -> d b ...", b=b) inp = torch.einsum("b i j, b j ... -> b i ...", bi, inp) From eb6f2df826087fdc62f6680364a0e16f666eef64 Mon Sep 17 00:00:00 2001 From: v0xie <28695009+v0xie@users.noreply.github.com> Date: Thu, 8 Feb 2024 22:00:15 -0800 Subject: [PATCH 036/135] Revert "fix: add butterfly_factor fn" This reverts commit 81c16c965e532c6d86a969284c320ff8fcb0451d. --- extensions-builtin/Lora/lyco_helpers.py | 26 ------------------------- 1 file changed, 26 deletions(-) diff --git a/extensions-builtin/Lora/lyco_helpers.py b/extensions-builtin/Lora/lyco_helpers.py index 3c4f5bad2..1679a0ce6 100644 --- a/extensions-builtin/Lora/lyco_helpers.py +++ b/extensions-builtin/Lora/lyco_helpers.py @@ -66,29 +66,3 @@ def factorization(dimension: int, factor:int=-1) -> tuple[int, int]: n, m = m, n return m, n -# from https://github.com/KohakuBlueleaf/LyCORIS/blob/dev/lycoris/modules/boft.py -def butterfly_factor(dimension: int, factor: int = -1) -> tuple[int, int]: - """ - m = 2k - n = 2**p - m*n = dim - """ - - # Find the first solution and check if it is even doable - m = n = 0 - while m <= factor: - m += 2 - while dimension % m != 0 and m < dimension: - m += 2 - if m > factor: - break - if sum(int(i) for i in f"{dimension//m:b}") == 1: - n = dimension // m - - if n == 0: - raise ValueError( - f"It is impossible to decompose {dimension} with factor {factor} under BOFT constrains." - ) - - #log_butterfly_factorize(dimension, factor, (dimension // n, n)) - return dimension // n, n From 6b8458eb9f5838931b34c47c65397d195150720d Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Fri, 9 Feb 2024 23:19:16 +0900 Subject: [PATCH 037/135] if extensions page not loaded, prevent apply since they are built-in extensions we can make the assumption that they will be at least one or more extensions Co-Authored-By: Andray <33491867+light-and-ray@users.noreply.github.com> --- javascript/extensions.js | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/javascript/extensions.js b/javascript/extensions.js index 312131b76..cc8ee220b 100644 --- a/javascript/extensions.js +++ b/javascript/extensions.js @@ -2,8 +2,11 @@ function extensions_apply(_disabled_list, _update_list, disable_all) { var disable = []; var update = []; - - gradioApp().querySelectorAll('#extensions input[type="checkbox"]').forEach(function(x) { + const extensions_input = gradioApp().querySelectorAll('#extensions input[type="checkbox"]'); + if (extensions_input.length == 0) { + throw Error("Extensions page not yet loaded."); + } + extensions_input.forEach(function(x) { if (x.name.startsWith("enable_") && !x.checked) { disable.push(x.name.substring(7)); } From 6b3f7039b6b71132349d294e884be82ca7c88d87 Mon Sep 17 00:00:00 2001 From: hako-mikan <122196982+hako-mikan@users.noreply.github.com> Date: Fri, 9 Feb 2024 23:57:46 +0900 Subject: [PATCH 038/135] add option --- modules/sd_hijack_clip.py | 4 +++- modules/shared_options.py | 1 + 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 8f29057a9..673b29eac 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -279,7 +279,9 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): original_mean = z.mean() z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) new_mean = z.mean() - z = z * (original_mean / new_mean) + + if not getattr(opts, "disable_normalize_embeddings", False): + z = z * (original_mean / new_mean) if pooled is not None: z.pooled = pooled diff --git a/modules/shared_options.py b/modules/shared_options.py index d2e86ff10..0b2d7ea34 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -150,6 +150,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion", "sd"), { "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(), "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), + "disable_normalize_embeddings": OptionInfo(False, "Disable normalize embeddings").info("Do not normalize embeddings after calculating emphasis. It can be expected to be effective in preventing artifacts in SDXL."), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}, infotext="Clip skip").link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), From c3c88ca8b46a19f48104d0421e14be28853b2a92 Mon Sep 17 00:00:00 2001 From: hako-mikan <122196982+hako-mikan@users.noreply.github.com> Date: Sat, 10 Feb 2024 00:18:08 +0900 Subject: [PATCH 039/135] Update sd_hijack_clip.py --- modules/sd_hijack_clip.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 673b29eac..89634fbf5 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -279,7 +279,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): original_mean = z.mean() z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) new_mean = z.mean() - + if not getattr(opts, "disable_normalize_embeddings", False): z = z * (original_mean / new_mean) From 542611cce4882b9acb885372bfbdf545e699fdfa Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 10 Feb 2024 05:39:01 +0900 Subject: [PATCH 040/135] walk_files extensions case insensitive --- modules/util.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/util.py b/modules/util.py index ee373e92c..8d1aea44f 100644 --- a/modules/util.py +++ b/modules/util.py @@ -42,7 +42,7 @@ def walk_files(path, allowed_extensions=None): for filename in sorted(files, key=natural_sort_key): if allowed_extensions is not None: _, ext = os.path.splitext(filename) - if ext not in allowed_extensions: + if ext.lower() not in allowed_extensions: continue if not shared.opts.list_hidden_files and ("/." in root or "\\." in root): From 2ba0277b52d352966046cb7b0c5c047cd8a7cdd4 Mon Sep 17 00:00:00 2001 From: analysisjp Date: Sat, 10 Feb 2024 10:09:19 +0900 Subject: [PATCH 041/135] fix: prepare_tcmalloc (Fixed memory leak issue in Ubuntu 22.04 or modern linux environment) --- webui.sh | 54 ++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 18 deletions(-) diff --git a/webui.sh b/webui.sh index 25b949062..794cfb8ae 100755 --- a/webui.sh +++ b/webui.sh @@ -226,30 +226,48 @@ fi # Try using TCMalloc on Linux prepare_tcmalloc() { if [[ "${OSTYPE}" == "linux"* ]] && [[ -z "${NO_TCMALLOC}" ]] && [[ -z "${LD_PRELOAD}" ]]; then + # check glibc version + LIBC_LIB="$(PATH=/usr/sbin:$PATH ldconfig -p | grep -P "libc.so.6" | head -n 1)" + LIBC_INFO=$(echo ${LIBC_LIB} | awk '{print $NF}') + LIBC_VER=$(echo $(${LIBC_INFO} | awk 'NR==1 {print $NF}') | grep -oP '\d+\.\d+') + echo "glibc version is $LIBC_VER" + libc_vernum=$(expr $LIBC_VER) + # Since 2.34 libpthread is integrated into libc.so + libc_v234=2.34 # Define Tcmalloc Libs arrays TCMALLOC_LIBS=("libtcmalloc(_minimal|)\.so\.\d" "libtcmalloc\.so\.\d") - # Traversal array for lib in "${TCMALLOC_LIBS[@]}" do - #Determine which type of tcmalloc library the library supports - TCMALLOC="$(PATH=/usr/sbin:$PATH ldconfig -p | grep -P $lib | head -n 1)" - TC_INFO=(${TCMALLOC//=>/}) - if [[ ! -z "${TC_INFO}" ]]; then - echo "Using TCMalloc: ${TC_INFO}" - #Determine if the library is linked to libptthread and resolve undefined symbol: ptthread_Key_Create - if ldd ${TC_INFO[2]} | grep -q 'libpthread'; then - echo "$TC_INFO is linked with libpthread,execute LD_PRELOAD=${TC_INFO}" - export LD_PRELOAD="${TC_INFO}" - break - else - echo "$TC_INFO is not linked with libpthreadand will trigger undefined symbol: ptthread_Key_Create error" - fi - else - printf "\e[1m\e[31mCannot locate TCMalloc (improves CPU memory usage)\e[0m\n" - fi + # Determine which type of tcmalloc library the library supports + TCMALLOC="$(PATH=/usr/sbin:$PATH ldconfig -p | grep -P $lib | head -n 1)" + TC_INFO=(${TCMALLOC//=>/}) + if [[ ! -z "${TC_INFO}" ]]; then + echo "Check TCMalloc: ${TC_INFO}" + # Determine if the library is linked to libptthread and resolve undefined symbol: ptthread_key_create + if [ $(echo "$libc_vernum < $libc_v234" | bc) -eq 1 ]; then + # glibc < 2.33 pthread_key_create into libpthead.so. check linking libpthread.so... + if ldd ${TC_INFO[2]} | grep -q 'libpthread'; then + echo "$TC_INFO is linked with libpthread,execute LD_PRELOAD=${TC_INFO[2]}" + # set fullpath LD_PRELOAD (To be on the safe side) + export LD_PRELOAD="${TC_INFO[2]}" + break + else + echo "$TC_INFO is not linked with libpthread will trigger undefined symbol: pthread_Key_create error" + fi + else + # Version 2.34 of libc.so (glibc) includes the pthead library IN GLIBC. (USE ubuntu 22.04 and modern linux system and WSL) + # libc.so(glibc) is linked with a library that works in ALMOST ALL Linux userlands. SO NO CHECK! + echo "$TC_INFO is linked with libc.so,execute LD_PRELOAD=${TC_INFO[2]}" + # set fullpath LD_PRELOAD (To be on the safe side) + export LD_PRELOAD="${TC_INFO[2]}" + break + fi + fi done - + if [[ -z "${LD_PRELOAD}" ]]; then + printf "\e[1m\e[31mCannot locate TCMalloc. Do you have tcmalloc or gperftools installed on your system? (improves CPU memory usage)\e[0m\n" + fi fi } From 82e2e25325f7976358b4eaca2442742e33be4f3d Mon Sep 17 00:00:00 2001 From: Andray Date: Sat, 10 Feb 2024 13:00:16 +0400 Subject: [PATCH 042/135] ResizeHandleRow png_info and train --- modules/ui.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/modules/ui.py b/modules/ui.py index 177c68720..44ce98325 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -863,7 +863,7 @@ def create_ui(): ui_postprocessing.create_ui() with gr.Blocks(analytics_enabled=False) as pnginfo_interface: - with gr.Row(equal_height=False): + with ResizeHandleRow(equal_height=False): with gr.Column(variant='panel'): image = gr.Image(elem_id="pnginfo_image", label="Source", source="upload", interactive=True, type="pil") @@ -891,7 +891,7 @@ def create_ui(): with gr.Row(equal_height=False): gr.HTML(value="

See wiki for detailed explanation.

") - with gr.Row(variant="compact", equal_height=False): + with ResizeHandleRow(variant="compact", equal_height=False): with gr.Tabs(elem_id="train_tabs"): with gr.Tab(label="Create embedding", id="create_embedding"): From 75833517604964942b5ecaf83d9056efc01bd9d2 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sat, 10 Feb 2024 18:09:10 +0900 Subject: [PATCH 043/135] extensions tab table row hover highlight --- style.css | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/style.css b/style.css index 04b0a2faa..988c28c00 100644 --- a/style.css +++ b/style.css @@ -846,6 +846,20 @@ table.popup-table .link{ display: inline-block; } +/* extensions tab table row hover highlight */ + +#extensions tr:hover td, +#config_state_extensions tr:hover td, +#available_extensions tr:hover td { + background: rgba(0, 0, 0, 0.15) +} + +.dark #extensions tr:hover td , +.dark #config_state_extensions tr:hover td , +.dark #available_extensions tr:hover td { + background: rgba(255, 255, 255, 0.15) +} + /* replace original footer with ours */ footer { From c04c4b95debd2b31c96a97cf4e46e8047a519881 Mon Sep 17 00:00:00 2001 From: missionfloyd Date: Sat, 10 Feb 2024 14:41:32 -0700 Subject: [PATCH 044/135] Always add timestamp to displayed image --- modules/ui_tempdir.py | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 85015db56..91f40ea42 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -35,12 +35,7 @@ def save_pil_to_file(self, pil_image, dir=None, format="png"): already_saved_as = getattr(pil_image, 'already_saved_as', None) if already_saved_as and os.path.isfile(already_saved_as): register_tmp_file(shared.demo, already_saved_as) - filename = already_saved_as - - if not shared.opts.save_images_add_number: - filename += f'?{os.path.getmtime(already_saved_as)}' - - return filename + return f'{already_saved_as}?{os.path.getmtime(already_saved_as)}' if shared.opts.temp_dir != "": dir = shared.opts.temp_dir From e2b19900ec37ef517d8175a7d86c1925ca9f9e91 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 11 Feb 2024 09:39:51 +0300 Subject: [PATCH 045/135] add infotext entry for emphasis; put emphasis into a separate file, add an option to parse but still ignore emphasis --- modules/infotext_utils.py | 3 ++ modules/processing.py | 1 + modules/sd_emphasis.py | 70 +++++++++++++++++++++++++++++++++++ modules/sd_hijack_clip.py | 21 ++++++----- modules/sd_hijack_clip_old.py | 2 +- modules/shared_options.py | 5 +-- 6 files changed, 89 insertions(+), 13 deletions(-) create mode 100644 modules/sd_emphasis.py diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index 1049c6c3c..a938aa2a7 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -356,6 +356,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model if "Cache FP16 weight for LoRA" not in res and res["FP8 weight"] != "Disable": res["Cache FP16 weight for LoRA"] = False + if "Emphasis" not in res: + res["Emphasis"] = "Original" + infotext_versions.backcompat(res) for key in skip_fields: diff --git a/modules/processing.py b/modules/processing.py index 52f00bfb9..f4aa165de 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -455,6 +455,7 @@ class StableDiffusionProcessing: self.height, opts.fp8_storage, opts.cache_fp16_weight, + opts.emphasis, ) def get_conds_with_caching(self, function, required_prompts, steps, caches, extra_network_data, hires_steps=None): diff --git a/modules/sd_emphasis.py b/modules/sd_emphasis.py new file mode 100644 index 000000000..654817b60 --- /dev/null +++ b/modules/sd_emphasis.py @@ -0,0 +1,70 @@ +from __future__ import annotations +import torch + + +class Emphasis: + """Emphasis class decides how to death with (emphasized:1.1) text in prompts""" + + name: str = "Base" + description: str = "" + + tokens: list[list[int]] + """tokens from the chunk of the prompt""" + + multipliers: torch.Tensor + """tensor with multipliers, once for each token""" + + z: torch.Tensor + """output of cond transformers network (CLIP)""" + + def after_transformers(self): + """Called after cond transformers network has processed the chunk of the prompt; this function should modify self.z to apply the emphasis""" + + pass + + +class EmphasisNone(Emphasis): + name = "None" + description = "disable the mechanism entirely and treat (:.1.1) as literal characters" + + +class EmphasisIgnore(Emphasis): + name = "Ignore" + description = "treat all empasised words as if they have no emphasis" + + +class EmphasisOriginal(Emphasis): + name = "Original" + description = "the orginal emphasis implementation" + + def after_transformers(self): + original_mean = self.z.mean() + self.z = self.z * self.multipliers.reshape(self.multipliers.shape + (1,)).expand(self.z.shape) + + # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise + new_mean = self.z.mean() + self.z = self.z * (original_mean / new_mean) + + +class EmphasisOriginalNoNorm(EmphasisOriginal): + name = "No norm" + description = "same as orginal, but without normalization (seems to work better for SDXL)" + + def after_transformers(self): + self.z = self.z * self.multipliers.reshape(self.multipliers.shape + (1,)).expand(self.z.shape) + + +def get_current_option(emphasis_option_name): + return next(iter([x for x in options if x.name == emphasis_option_name]), EmphasisOriginal) + + +def get_options_descriptions(): + return ", ".join(f"{x.name}: {x.description}" for x in options) + + +options = [ + EmphasisNone, + EmphasisIgnore, + EmphasisOriginal, + EmphasisOriginalNoNorm, +] diff --git a/modules/sd_hijack_clip.py b/modules/sd_hijack_clip.py index 89634fbf5..98350ac43 100644 --- a/modules/sd_hijack_clip.py +++ b/modules/sd_hijack_clip.py @@ -3,7 +3,7 @@ from collections import namedtuple import torch -from modules import prompt_parser, devices, sd_hijack +from modules import prompt_parser, devices, sd_hijack, sd_emphasis from modules.shared import opts @@ -88,7 +88,7 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): Returns the list and the total number of tokens in the prompt. """ - if opts.enable_emphasis: + if opts.emphasis != "None": parsed = prompt_parser.parse_prompt_attention(line) else: parsed = [[line, 1.0]] @@ -249,6 +249,9 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): hashes.append(self.hijack.extra_generation_params.get("TI hashes")) self.hijack.extra_generation_params["TI hashes"] = ", ".join(hashes) + if any(x for x in texts if "(" in x or "[" in x) and opts.emphasis != "Original": + self.hijack.extra_generation_params["Emphasis"] = opts.emphasis + if getattr(self.wrapped, 'return_pooled', False): return torch.hstack(zs), zs[0].pooled else: @@ -274,14 +277,14 @@ class FrozenCLIPEmbedderWithCustomWordsBase(torch.nn.Module): pooled = getattr(z, 'pooled', None) - # restoring original mean is likely not correct, but it seems to work well to prevent artifacts that happen otherwise - batch_multipliers = torch.asarray(batch_multipliers).to(devices.device) - original_mean = z.mean() - z = z * batch_multipliers.reshape(batch_multipliers.shape + (1,)).expand(z.shape) - new_mean = z.mean() + emphasis = sd_emphasis.get_current_option(opts.emphasis)() + emphasis.tokens = remade_batch_tokens + emphasis.multipliers = torch.asarray(batch_multipliers).to(devices.device) + emphasis.z = z - if not getattr(opts, "disable_normalize_embeddings", False): - z = z * (original_mean / new_mean) + emphasis.after_transformers() + + z = emphasis.z if pooled is not None: z.pooled = pooled diff --git a/modules/sd_hijack_clip_old.py b/modules/sd_hijack_clip_old.py index c5c6270b9..43e9b9529 100644 --- a/modules/sd_hijack_clip_old.py +++ b/modules/sd_hijack_clip_old.py @@ -32,7 +32,7 @@ def process_text_old(self: sd_hijack_clip.FrozenCLIPEmbedderWithCustomWordsBase, embedding, embedding_length_in_tokens = self.hijack.embedding_db.find_embedding_at_position(tokens, i) - mult_change = self.token_mults.get(token) if shared.opts.enable_emphasis else None + mult_change = self.token_mults.get(token) if shared.opts.emphasis != "None" else None if mult_change is not None: mult *= mult_change i += 1 diff --git a/modules/shared_options.py b/modules/shared_options.py index 417a42b28..ba6d731d8 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -1,7 +1,7 @@ import os import gradio as gr -from modules import localization, ui_components, shared_items, shared, interrogate, shared_gradio_themes, util +from modules import localization, ui_components, shared_items, shared, interrogate, shared_gradio_themes, util, sd_emphasis from modules.paths_internal import models_path, script_path, data_path, sd_configs_path, sd_default_config, sd_model_file, default_sd_model_file, extensions_dir, extensions_builtin_dir, default_output_dir # noqa: F401 from modules.shared_cmd_options import cmd_opts from modules.options import options_section, OptionInfo, OptionHTML, categories @@ -154,8 +154,7 @@ options_templates.update(options_section(('sd', "Stable Diffusion", "sd"), { "sd_checkpoint_cache": OptionInfo(0, "Checkpoints to cache in RAM", gr.Slider, {"minimum": 0, "maximum": 10, "step": 1}).info("obsolete; set to 0 and use the two settings above instead"), "sd_unet": OptionInfo("Automatic", "SD Unet", gr.Dropdown, lambda: {"choices": shared_items.sd_unet_items()}, refresh=shared_items.refresh_unet_list).info("choose Unet model: Automatic = use one with same filename as checkpoint; None = use Unet from checkpoint"), "enable_quantization": OptionInfo(False, "Enable quantization in K samplers for sharper and cleaner results. This may change existing seeds").needs_reload_ui(), - "enable_emphasis": OptionInfo(True, "Enable emphasis").info("use (text) to make model pay more attention to text and [text] to make it pay less attention"), - "disable_normalize_embeddings": OptionInfo(False, "Disable normalize embeddings").info("Do not normalize embeddings after calculating emphasis. It can be expected to be effective in preventing artifacts in SDXL."), + "emphasis": OptionInfo("Original", "Emphasis mode", gr.Radio, lambda: {"choices": [x.name for x in sd_emphasis.options]}, infotext="Emphasis").info("makes it possible to make model to pay (more:1.1) or (less:0.9) attention to text when you use the syntax in prompt; " + sd_emphasis.get_options_descriptions()), "enable_batch_seeds": OptionInfo(True, "Make K-diffusion samplers produce same images in a batch as when making a single image"), "comma_padding_backtrack": OptionInfo(20, "Prompt word wrap length limit", gr.Slider, {"minimum": 0, "maximum": 74, "step": 1}).info("in tokens - for texts shorter than specified, if they don't fit into 75 token limit, move them to the next 75 token chunk"), "CLIP_stop_at_last_layers": OptionInfo(1, "Clip skip", gr.Slider, {"minimum": 1, "maximum": 12, "step": 1}, infotext="Clip skip").link("wiki", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#clip-skip").info("ignore last layers of CLIP network; 1 ignores none, 2 ignores one layer"), From b531b0bbef7802f5691b6ffbd389cd83f94ffb12 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 11 Feb 2024 12:23:04 +0300 Subject: [PATCH 046/135] add propmpt comments support --- modules/processing_scripts/comments.py | 32 ++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) create mode 100644 modules/processing_scripts/comments.py diff --git a/modules/processing_scripts/comments.py b/modules/processing_scripts/comments.py new file mode 100644 index 000000000..316356c77 --- /dev/null +++ b/modules/processing_scripts/comments.py @@ -0,0 +1,32 @@ +from modules import scripts, shared +import re + + +def strip_comments(text): + text = re.sub('(^|\n)#[^\n]*(\n|$)', '\n', text) # while line comment + text = re.sub('#[^\n]*(\n|$)', '\n', text) # in the middle of the line comment + + return text + + +class ScriptStripComments(scripts.Script): + def title(self): + return "Comments" + + def show(self, is_img2img): + return scripts.AlwaysVisible + + def process(self, p, *args): + if not shared.opts.enable_prompt_comments: + return + + p.all_prompts = [strip_comments(x) for x in p.all_prompts] + p.all_negative_prompts = [strip_comments(x) for x in p.all_negative_prompts] + + p.main_prompt = strip_comments(p.main_prompt) + p.main_negative_prompt = strip_comments(p.main_negative_prompt) + + +shared.options_templates.update(shared.options_section(('sd', "Stable Diffusion", "sd"), { + "enable_prompt_comments": shared.OptionInfo(True, "Enable comments").info("Use # anywhere in the prompt to hide the text between # and the end of the line from the generation."), +})) From f6e476d7a88b4092b1384b204bb54ef1277bf21f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 11 Feb 2024 12:24:02 +0300 Subject: [PATCH 047/135] call the right function for token counter in img2img --- modules/ui.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/ui.py b/modules/ui.py index 44ce98325..9e90133b4 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -825,7 +825,7 @@ def create_ui(): ) toprow.token_button.click(fn=update_token_counter, inputs=[toprow.prompt, steps], outputs=[toprow.token_counter]) - toprow.negative_token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter]) + toprow.negative_token_button.click(fn=wrap_queued_call(update_negative_prompt_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter]) img2img_paste_fields = [ (toprow.prompt, "Prompt"), From 02ab75b86a91f2ae28edc4ca6ef00c3ab4b51d89 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 11 Feb 2024 12:40:27 +0300 Subject: [PATCH 048/135] Count tokens of enabled styles --- modules/shared_options.py | 1 + modules/ui.py | 23 ++++++++++++++++------- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index ba6d731d8..f1ab5d6e2 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -271,6 +271,7 @@ options_templates.update(options_section(('ui_prompt_editing', "Prompt editing", "keyedit_delimiters_whitespace": OptionInfo(["Tab", "Carriage Return", "Line Feed"], "Ctrl+up/down whitespace delimiters", gr.CheckboxGroup, lambda: {"choices": ["Tab", "Carriage Return", "Line Feed"]}), "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), + "include_styles_into_token_counters": OptionInfo(True, "Count tokens of enabled styles").info("When calculating how many tokens the prompt has, also consider tokens added by enabled styles."), })) options_templates.update(options_section(('ui_gallery', "Gallery", "ui"), { diff --git a/modules/ui.py b/modules/ui.py index 9e90133b4..5284a6300 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -151,7 +151,12 @@ def connect_clear_prompt(button): ) -def update_token_counter(text, steps, *, is_positive=True): +def update_token_counter(text, steps, styles, *, is_positive=True): + + if shared.opts.include_styles_into_token_counters: + apply_styles = shared.prompt_styles.apply_styles_to_prompt if is_positive else shared.prompt_styles.apply_negative_styles_to_prompt + text = apply_styles(text, styles) + try: text, _ = extra_networks.parse_prompt(text) @@ -173,8 +178,8 @@ def update_token_counter(text, steps, *, is_positive=True): return f"{token_count}/{max_length}" -def update_negative_prompt_token_counter(text, steps): - return update_token_counter(text, steps, is_positive=False) +def update_negative_prompt_token_counter(*args): + return update_token_counter(*args, is_positive=False) def setup_progressbar(*args, **kwargs): @@ -485,8 +490,10 @@ def create_ui(): height, ] - toprow.token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps], outputs=[toprow.token_counter]) - toprow.negative_token_button.click(fn=wrap_queued_call(update_negative_prompt_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter]) + toprow.ui_styles.dropdown.change(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps, toprow.ui_styles.dropdown], outputs=[toprow.token_counter]) + toprow.ui_styles.dropdown.change(fn=wrap_queued_call(update_negative_prompt_token_counter), inputs=[toprow.negative_prompt, steps, toprow.ui_styles.dropdown], outputs=[toprow.negative_token_counter]) + toprow.token_button.click(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps, toprow.ui_styles.dropdown], outputs=[toprow.token_counter]) + toprow.negative_token_button.click(fn=wrap_queued_call(update_negative_prompt_token_counter), inputs=[toprow.negative_prompt, steps, toprow.ui_styles.dropdown], outputs=[toprow.negative_token_counter]) extra_networks_ui = ui_extra_networks.create_ui(txt2img_interface, [txt2img_generation_tab], 'txt2img') ui_extra_networks.setup_ui(extra_networks_ui, output_panel.gallery) @@ -824,8 +831,10 @@ def create_ui(): **interrogate_args, ) - toprow.token_button.click(fn=update_token_counter, inputs=[toprow.prompt, steps], outputs=[toprow.token_counter]) - toprow.negative_token_button.click(fn=wrap_queued_call(update_negative_prompt_token_counter), inputs=[toprow.negative_prompt, steps], outputs=[toprow.negative_token_counter]) + toprow.ui_styles.dropdown.change(fn=wrap_queued_call(update_token_counter), inputs=[toprow.prompt, steps, toprow.ui_styles.dropdown], outputs=[toprow.token_counter]) + toprow.ui_styles.dropdown.change(fn=wrap_queued_call(update_negative_prompt_token_counter), inputs=[toprow.negative_prompt, steps, toprow.ui_styles.dropdown], outputs=[toprow.negative_token_counter]) + toprow.token_button.click(fn=update_token_counter, inputs=[toprow.prompt, steps, toprow.ui_styles.dropdown], outputs=[toprow.token_counter]) + toprow.negative_token_button.click(fn=wrap_queued_call(update_negative_prompt_token_counter), inputs=[toprow.negative_prompt, steps, toprow.ui_styles.dropdown], outputs=[toprow.negative_token_counter]) img2img_paste_fields = [ (toprow.prompt, "Prompt"), From b7f45e67dcf48914d2f34d4ace977a431a5aa12e Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sun, 11 Feb 2024 12:56:53 +0300 Subject: [PATCH 049/135] add before_token_counter callback and use it for prompt comments --- modules/processing_scripts/comments.py | 12 +++++++++++- modules/script_callbacks.py | 26 ++++++++++++++++++++++++++ modules/ui.py | 6 ++++++ 3 files changed, 43 insertions(+), 1 deletion(-) diff --git a/modules/processing_scripts/comments.py b/modules/processing_scripts/comments.py index 316356c77..638e39f29 100644 --- a/modules/processing_scripts/comments.py +++ b/modules/processing_scripts/comments.py @@ -1,4 +1,4 @@ -from modules import scripts, shared +from modules import scripts, shared, script_callbacks import re @@ -27,6 +27,16 @@ class ScriptStripComments(scripts.Script): p.main_negative_prompt = strip_comments(p.main_negative_prompt) +def before_token_counter(params: script_callbacks.BeforeTokenCounterParams): + if not shared.opts.enable_prompt_comments: + return + + params.prompt = strip_comments(params.prompt) + + +script_callbacks.on_before_token_counter(before_token_counter) + + shared.options_templates.update(shared.options_section(('sd', "Stable Diffusion", "sd"), { "enable_prompt_comments": shared.OptionInfo(True, "Enable comments").info("Use # anywhere in the prompt to hide the text between # and the end of the line from the generation."), })) diff --git a/modules/script_callbacks.py b/modules/script_callbacks.py index a54cb3ebb..08bc52564 100644 --- a/modules/script_callbacks.py +++ b/modules/script_callbacks.py @@ -1,3 +1,4 @@ +import dataclasses import inspect import os from collections import namedtuple @@ -106,6 +107,15 @@ class ImageGridLoopParams: self.rows = rows +@dataclasses.dataclass +class BeforeTokenCounterParams: + prompt: str + steps: int + styles: list + + is_positive: bool = True + + ScriptCallback = namedtuple("ScriptCallback", ["script", "callback"]) callback_map = dict( callbacks_app_started=[], @@ -128,6 +138,7 @@ callback_map = dict( callbacks_on_reload=[], callbacks_list_optimizers=[], callbacks_list_unets=[], + callbacks_before_token_counter=[], ) @@ -309,6 +320,14 @@ def list_unets_callback(): return res +def before_token_counter_callback(params: BeforeTokenCounterParams): + for c in callback_map['callbacks_before_token_counter']: + try: + c.callback(params) + except Exception: + report_exception(c, 'before_token_counter') + + def add_callback(callbacks, fun): stack = [x for x in inspect.stack() if x.filename != __file__] filename = stack[0].filename if stack else 'unknown file' @@ -483,3 +502,10 @@ def on_list_unets(callback): The function will be called with one argument, a list, and shall add objects of type modules.sd_unet.SdUnetOption to it.""" add_callback(callback_map['callbacks_list_unets'], callback) + + +def on_before_token_counter(callback): + """register a function to be called when UI is counting tokens for a prompt. + The function will be called with one argument of type BeforeTokenCounterParams, and should modify its fields if necessary.""" + + add_callback(callback_map['callbacks_before_token_counter'], callback) diff --git a/modules/ui.py b/modules/ui.py index 5284a6300..dcba8e885 100644 --- a/modules/ui.py +++ b/modules/ui.py @@ -152,6 +152,12 @@ def connect_clear_prompt(button): def update_token_counter(text, steps, styles, *, is_positive=True): + params = script_callbacks.BeforeTokenCounterParams(text, steps, styles, is_positive=is_positive) + script_callbacks.before_token_counter_callback(params) + text = params.prompt + steps = params.steps + styles = params.styles + is_positive = params.is_positive if shared.opts.include_styles_into_token_counters: apply_styles = shared.prompt_styles.apply_styles_to_prompt if is_positive else shared.prompt_styles.apply_negative_styles_to_prompt From 13fd466c181e9d4e29eaa2430749ea35d5265393 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Mon, 12 Feb 2024 04:07:14 +0900 Subject: [PATCH 050/135] fix extra-network-control--enabled color also add forgotten semicolon --- style.css | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/style.css b/style.css index 988c28c00..a6b287b95 100644 --- a/style.css +++ b/style.css @@ -851,13 +851,13 @@ table.popup-table .link{ #extensions tr:hover td, #config_state_extensions tr:hover td, #available_extensions tr:hover td { - background: rgba(0, 0, 0, 0.15) + background: rgba(0, 0, 0, 0.15); } .dark #extensions tr:hover td , .dark #config_state_extensions tr:hover td , .dark #available_extensions tr:hover td { - background: rgba(255, 255, 255, 0.15) + background: rgba(255, 255, 255, 0.15); } /* replace original footer with ours */ @@ -1513,12 +1513,12 @@ body.resizing .resize-handle { background-color: var(--input-placeholder-color); } -.dark .extra-network-control .extra-network-control--enabled { - background-color: var(--neutral-700); +.extra-network-control .extra-network-control--enabled { + background-color: rgba(0, 0, 0, 0.15); } .dark .extra-network-control .extra-network-control--enabled { - background-color: var(--neutral-300); + background-color: rgba(255, 255, 255, 0.15); } /* ==== REFRESH ICON ACTIONS ==== */ From 90441294db16383bce6f341e8a1f67fe422172d4 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Mon, 12 Feb 2024 14:25:09 +0800 Subject: [PATCH 051/135] Add rescale mechanism LyCORIS will support save oft_blocks instead of oft_diag in the near future (for both OFT and BOFT) But this means we need to store the rescale if user enable it. --- extensions-builtin/Lora/network_oft.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index d7b317029..ed221d8fe 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -40,6 +40,7 @@ class NetworkModuleOFT(network.NetworkModule): self.is_boft = False if weights.w["oft_diag"].dim() == 4: self.is_boft = True + self.rescale = weight.w.get('rescale', None) is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear] is_conv = type(self.sd_module) in [torch.nn.Conv2d] @@ -108,6 +109,10 @@ class NetworkModuleOFT(network.NetworkModule): inp = rearrange(inp, "(c k g) ... -> (c g k) ...", g=2, k=2**i * r_b) merged_weight = inp + # Rescale mechanism + if self.rescale is not None: + merged_weight = self.rescale.to(merged_weight) * merged_weight + updown = merged_weight.to(orig_weight.device) - orig_weight.to(merged_weight.dtype) output_shape = orig_weight.shape return self.finalize_updown(updown, orig_weight, output_shape) From 69f9564a6db8ced1b5fbfc7f20c644abd54fca11 Mon Sep 17 00:00:00 2001 From: analysisjp Date: Tue, 13 Feb 2024 21:49:23 +0900 Subject: [PATCH 052/135] fixed webui.sh issue that occurred in WSL environment (fix: #14883) --- webui.sh | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/webui.sh b/webui.sh index 794cfb8ae..f116376f7 100755 --- a/webui.sh +++ b/webui.sh @@ -227,9 +227,7 @@ fi prepare_tcmalloc() { if [[ "${OSTYPE}" == "linux"* ]] && [[ -z "${NO_TCMALLOC}" ]] && [[ -z "${LD_PRELOAD}" ]]; then # check glibc version - LIBC_LIB="$(PATH=/usr/sbin:$PATH ldconfig -p | grep -P "libc.so.6" | head -n 1)" - LIBC_INFO=$(echo ${LIBC_LIB} | awk '{print $NF}') - LIBC_VER=$(echo $(${LIBC_INFO} | awk 'NR==1 {print $NF}') | grep -oP '\d+\.\d+') + LIBC_VER=$(echo $(ldd --version | awk 'NR==1 {print $NF}') | grep -oP '\d+\.\d+') echo "glibc version is $LIBC_VER" libc_vernum=$(expr $LIBC_VER) # Since 2.34 libpthread is integrated into libc.so @@ -244,9 +242,9 @@ prepare_tcmalloc() { TC_INFO=(${TCMALLOC//=>/}) if [[ ! -z "${TC_INFO}" ]]; then echo "Check TCMalloc: ${TC_INFO}" - # Determine if the library is linked to libptthread and resolve undefined symbol: ptthread_key_create + # Determine if the library is linked to libpthread and resolve undefined symbol: pthread_key_create if [ $(echo "$libc_vernum < $libc_v234" | bc) -eq 1 ]; then - # glibc < 2.33 pthread_key_create into libpthead.so. check linking libpthread.so... + # glibc < 2.34 pthread_key_create into libpthread.so. check linking libpthread.so... if ldd ${TC_INFO[2]} | grep -q 'libpthread'; then echo "$TC_INFO is linked with libpthread,execute LD_PRELOAD=${TC_INFO[2]}" # set fullpath LD_PRELOAD (To be on the safe side) @@ -256,7 +254,7 @@ prepare_tcmalloc() { echo "$TC_INFO is not linked with libpthread will trigger undefined symbol: pthread_Key_create error" fi else - # Version 2.34 of libc.so (glibc) includes the pthead library IN GLIBC. (USE ubuntu 22.04 and modern linux system and WSL) + # Version 2.34 of libc.so (glibc) includes the pthread library IN GLIBC. (USE ubuntu 22.04 and modern linux system and WSL) # libc.so(glibc) is linked with a library that works in ALMOST ALL Linux userlands. SO NO CHECK! echo "$TC_INFO is linked with libc.so,execute LD_PRELOAD=${TC_INFO[2]}" # set fullpath LD_PRELOAD (To be on the safe side) @@ -266,7 +264,7 @@ prepare_tcmalloc() { fi done if [[ -z "${LD_PRELOAD}" ]]; then - printf "\e[1m\e[31mCannot locate TCMalloc. Do you have tcmalloc or gperftools installed on your system? (improves CPU memory usage)\e[0m\n" + printf "\e[1m\e[31mCannot locate TCMalloc. Do you have tcmalloc or google-perftool installed on your system? (improves CPU memory usage)\e[0m\n" fi fi } From 1142201a3a1dbfeafc25e2dee2f25eac9a1fb321 Mon Sep 17 00:00:00 2001 From: Andray Date: Wed, 14 Feb 2024 15:26:57 +0400 Subject: [PATCH 053/135] Use original App Title in progress bar --- javascript/progressbar.js | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/javascript/progressbar.js b/javascript/progressbar.js index 777614954..f068bac6a 100644 --- a/javascript/progressbar.js +++ b/javascript/progressbar.js @@ -45,8 +45,15 @@ function formatTime(secs) { } } + +var originalAppTitle = undefined; + +onUiLoaded(function() { + originalAppTitle = document.title; +}); + function setTitle(progress) { - var title = 'Stable Diffusion'; + var title = originalAppTitle; if (opts.show_progress_in_title && progress) { title = '[' + progress.trim() + '] ' + title; From 18ec22bffea77158777c3a3225e632e25eb33138 Mon Sep 17 00:00:00 2001 From: RedDeltas <160131179+RedDeltas@users.noreply.github.com> Date: Thu, 15 Feb 2024 12:26:14 +0000 Subject: [PATCH 054/135] Added core.filemode=false so doesn't track changes in file permissions in more restrictive environments --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 107c72b02..ad04eb362 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -188,7 +188,7 @@ def git_clone(url, dir, name, commithash=None): return try: - run(f'"{git}" clone "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}", live=True) + run(f'"{git}" clone --config core.filemode=false "{url}" "{dir}"', f"Cloning {name} into {dir}...", f"Couldn't clone {name}", live=True) except RuntimeError: shutil.rmtree(dir, ignore_errors=True) raise From 46988af63696a232cbf94cdeaca6de32cc56e23c Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 15 Feb 2024 13:05:39 -0500 Subject: [PATCH 055/135] Fix `Esc` interrupt when button not visible --- script.js | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/script.js b/script.js index be1bc317e..25cf0973a 100644 --- a/script.js +++ b/script.js @@ -167,8 +167,10 @@ document.addEventListener('keydown', function(e) { const lightboxModal = document.querySelector('#lightboxModal'); if (!globalPopup || globalPopup.style.display === 'none') { if (document.activeElement === lightboxModal) return; - interruptButton.click(); - e.preventDefault(); + if (interruptButton.style.display !== 'none') { + interruptButton.click(); + e.preventDefault(); + } } } }); From 6ee4012c0a4d16fb3a6b2a9ea80a7a4b54073193 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 15 Feb 2024 13:31:44 -0500 Subject: [PATCH 056/135] Gracefully handle mtime read exception from cache --- modules/hashes.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/modules/hashes.py b/modules/hashes.py index b7a33b427..d22e5fadc 100644 --- a/modules/hashes.py +++ b/modules/hashes.py @@ -21,7 +21,10 @@ def calculate_sha256(filename): def sha256_from_cache(filename, title, use_addnet_hash=False): hashes = cache("hashes-addnet") if use_addnet_hash else cache("hashes") - ondisk_mtime = os.path.getmtime(filename) + try: + ondisk_mtime = os.path.getmtime(filename) + except FileNotFoundError: + return None if title not in hashes: return None From 06ab10a1be812036605e1472f054228562ea08d9 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Thu, 15 Feb 2024 14:22:13 -0500 Subject: [PATCH 057/135] Normalize cmd arg paths In particular, this fixes an issue on Windows where some functions will misbehave if forward slashes are provided rather than double backslashes. --- extensions-builtin/Lora/preload.py | 5 ++-- modules/cmd_args.py | 40 +++++++++++++++--------------- modules/paths_internal.py | 4 +++ 3 files changed, 27 insertions(+), 22 deletions(-) diff --git a/extensions-builtin/Lora/preload.py b/extensions-builtin/Lora/preload.py index 50961be33..52fab29b0 100644 --- a/extensions-builtin/Lora/preload.py +++ b/extensions-builtin/Lora/preload.py @@ -1,7 +1,8 @@ import os from modules import paths +from modules.paths_internal import normalized_filepath def preload(parser): - parser.add_argument("--lora-dir", type=str, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora')) - parser.add_argument("--lyco-dir-backcompat", type=str, help="Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir).", default=os.path.join(paths.models_path, 'LyCORIS')) + parser.add_argument("--lora-dir", type=normalized_filepath, help="Path to directory with Lora networks.", default=os.path.join(paths.models_path, 'Lora')) + parser.add_argument("--lyco-dir-backcompat", type=normalized_filepath, help="Path to directory with LyCORIS networks (for backawards compatibility; can also use --lyco-dir).", default=os.path.join(paths.models_path, 'LyCORIS')) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index f1251b6c8..312dabffc 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -1,7 +1,7 @@ import argparse import json import os -from modules.paths_internal import models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file # noqa: F401 +from modules.paths_internal import normalized_filepath, models_path, script_path, data_path, extensions_dir, extensions_builtin_dir, sd_default_config, sd_model_file # noqa: F401 parser = argparse.ArgumentParser() @@ -19,21 +19,21 @@ parser.add_argument("--skip-install", action='store_true', help="launch.py argum parser.add_argument("--dump-sysinfo", action='store_true', help="launch.py argument: dump limited sysinfo file (without information about extensions, options) to disk and quit") parser.add_argument("--loglevel", type=str, help="log level; one of: CRITICAL, ERROR, WARNING, INFO, DEBUG", default=None) parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint") -parser.add_argument("--data-dir", type=str, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored") -parser.add_argument("--config", type=str, default=sd_default_config, help="path to config which constructs model",) -parser.add_argument("--ckpt", type=str, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",) -parser.add_argument("--ckpt-dir", type=str, default=None, help="Path to directory with stable diffusion checkpoints") -parser.add_argument("--vae-dir", type=str, default=None, help="Path to directory with VAE files") -parser.add_argument("--gfpgan-dir", type=str, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) -parser.add_argument("--gfpgan-model", type=str, help="GFPGAN model file name", default=None) +parser.add_argument("--data-dir", type=normalized_filepath, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored") +parser.add_argument("--config", type=normalized_filepath, default=sd_default_config, help="path to config which constructs model",) +parser.add_argument("--ckpt", type=normalized_filepath, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",) +parser.add_argument("--ckpt-dir", type=normalized_filepath, default=None, help="Path to directory with stable diffusion checkpoints") +parser.add_argument("--vae-dir", type=normalized_filepath, default=None, help="Path to directory with VAE files") +parser.add_argument("--gfpgan-dir", type=normalized_filepath, help="GFPGAN directory", default=('./src/gfpgan' if os.path.exists('./src/gfpgan') else './GFPGAN')) +parser.add_argument("--gfpgan-model", type=normalized_filepath, help="GFPGAN model file name", default=None) parser.add_argument("--no-half", action='store_true', help="do not switch the model to 16-bit floats") parser.add_argument("--no-half-vae", action='store_true', help="do not switch the VAE model to 16-bit floats") parser.add_argument("--no-progressbar-hiding", action='store_true', help="do not hide progressbar in gradio UI (we hide it because it slows down ML if you have hardware acceleration in browser)") parser.add_argument("--max-batch-count", type=int, default=16, help="maximum batch count value for the UI") -parser.add_argument("--embeddings-dir", type=str, default=os.path.join(data_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)") -parser.add_argument("--textual-inversion-templates-dir", type=str, default=os.path.join(script_path, 'textual_inversion_templates'), help="directory with textual inversion templates") -parser.add_argument("--hypernetwork-dir", type=str, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory") -parser.add_argument("--localizations-dir", type=str, default=os.path.join(script_path, 'localizations'), help="localizations directory") +parser.add_argument("--embeddings-dir", type=normalized_filepath, default=os.path.join(data_path, 'embeddings'), help="embeddings directory for textual inversion (default: embeddings)") +parser.add_argument("--textual-inversion-templates-dir", type=normalized_filepath, default=os.path.join(script_path, 'textual_inversion_templates'), help="directory with textual inversion templates") +parser.add_argument("--hypernetwork-dir", type=normalized_filepath, default=os.path.join(models_path, 'hypernetworks'), help="hypernetwork directory") +parser.add_argument("--localizations-dir", type=normalized_filepath, default=os.path.join(script_path, 'localizations'), help="localizations directory") parser.add_argument("--allow-code", action='store_true', help="allow custom script execution from webui") parser.add_argument("--medvram", action='store_true', help="enable stable diffusion model optimizations for sacrificing a little speed for low VRM usage") parser.add_argument("--medvram-sdxl", action='store_true', help="enable --medvram optimization just for SDXL models") @@ -48,12 +48,12 @@ parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to g parser.add_argument("--ngrok-region", type=str, help="does not do anything.", default="") parser.add_argument("--ngrok-options", type=json.loads, help='The options to pass to ngrok in JSON format, e.g.: \'{"authtoken_from_env":true, "basic_auth":"user:password", "oauth_provider":"google", "oauth_allow_emails":"user@asdf.com"}\'', default=dict()) parser.add_argument("--enable-insecure-extension-access", action='store_true', help="enable extensions tab regardless of other options") -parser.add_argument("--codeformer-models-path", type=str, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer')) -parser.add_argument("--gfpgan-models-path", type=str, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN')) -parser.add_argument("--esrgan-models-path", type=str, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN')) -parser.add_argument("--bsrgan-models-path", type=str, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN')) -parser.add_argument("--realesrgan-models-path", type=str, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN')) -parser.add_argument("--clip-models-path", type=str, help="Path to directory with CLIP model file(s).", default=None) +parser.add_argument("--codeformer-models-path", type=normalized_filepath, help="Path to directory with codeformer model file(s).", default=os.path.join(models_path, 'Codeformer')) +parser.add_argument("--gfpgan-models-path", type=normalized_filepath, help="Path to directory with GFPGAN model file(s).", default=os.path.join(models_path, 'GFPGAN')) +parser.add_argument("--esrgan-models-path", type=normalized_filepath, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN')) +parser.add_argument("--bsrgan-models-path", type=normalized_filepath, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN')) +parser.add_argument("--realesrgan-models-path", type=normalized_filepath, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN')) +parser.add_argument("--clip-models-path", type=normalized_filepath, help="Path to directory with CLIP model file(s).", default=None) parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers") parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work") parser.add_argument("--xformers-flash-attention", action='store_true', help="enable xformers with Flash Attention to improve reproducibility (supported for SD2.x or variant only)") @@ -83,7 +83,7 @@ parser.add_argument("--freeze-specific-settings", type=str, help='disable editin parser.add_argument("--ui-settings-file", type=str, help="filename to use for ui settings", default=os.path.join(data_path, 'config.json')) parser.add_argument("--gradio-debug", action='store_true', help="launch gradio with --debug option") parser.add_argument("--gradio-auth", type=str, help='set gradio authentication like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None) -parser.add_argument("--gradio-auth-path", type=str, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None) +parser.add_argument("--gradio-auth-path", type=normalized_filepath, help='set gradio authentication file path ex. "/path/to/auth/file" same auth format as --gradio-auth', default=None) parser.add_argument("--gradio-img2img-tool", type=str, help='does not do anything') parser.add_argument("--gradio-inpaint-tool", type=str, help="does not do anything") parser.add_argument("--gradio-allowed-path", action='append', help="add path to gradio's allowed_paths, make it possible to serve files from it", default=[data_path]) @@ -94,7 +94,7 @@ parser.add_argument("--theme", type=str, help="launches the UI with light or dar parser.add_argument("--use-textbox-seed", action='store_true', help="use textbox for seeds in UI (no up/down, but possible to input long seeds)", default=False) parser.add_argument("--disable-console-progressbars", action='store_true', help="do not output progressbars to console", default=False) parser.add_argument("--enable-console-prompts", action='store_true', help="does not do anything", default=False) # Legacy compatibility, use as default value shared.opts.enable_console_prompts -parser.add_argument('--vae-path', type=str, help='Checkpoint to use as VAE; setting this argument disables all settings related to VAE', default=None) +parser.add_argument('--vae-path', type=normalized_filepath, help='Checkpoint to use as VAE; setting this argument disables all settings related to VAE', default=None) parser.add_argument("--disable-safe-unpickle", action='store_true', help="disable checking pytorch models for malicious code", default=False) parser.add_argument("--api", action='store_true', help="use api=True to launch the API together with the webui (use --nowebui instead for only the API)") parser.add_argument("--api-auth", type=str, help='Set authentication for API like "username:password"; or comma-delimit multiple like "u1:p1,u2:p2,u3:p3"', default=None) diff --git a/modules/paths_internal.py b/modules/paths_internal.py index b86ecd7f1..2ed1392a4 100644 --- a/modules/paths_internal.py +++ b/modules/paths_internal.py @@ -4,6 +4,10 @@ import argparse import os import sys import shlex +from pathlib import Path + + +normalized_filepath = lambda filepath: str(Path(filepath).resolve()) commandline_args = os.environ.get('COMMANDLINE_ARGS', "") sys.argv += shlex.split(commandline_args) From 23f03d4796be944c36470479c41ec682f5793d9a Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Fri, 16 Feb 2024 16:43:43 +0800 Subject: [PATCH 058/135] Update extraNetworks.js --- javascript/extraNetworks.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 9a3a23926..7ec6a04d7 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -625,7 +625,7 @@ function scheduleAfterScriptsCallbacks() { document.addEventListener("DOMContentLoaded", function() { var mutationObserver = new MutationObserver(function(m) { if (!executedAfterScripts && - gradioApp().querySelectorAll("[id$='_extra_search']").length == 8) { + gradioApp().querySelectorAll("[id$='_extra_search']").length >= 6) { executedAfterScripts = true; scheduleAfterScriptsCallbacks(); } From 2e1b61e5903f004d2313943a5fddf13cfeff493f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 17 Feb 2024 09:45:03 +0300 Subject: [PATCH 059/135] change condition for scheduleAfterScriptsCallbacks() to properly reflect the needed amount of search fields --- javascript/extraNetworks.js | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 7ec6a04d7..195525b0e 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -622,10 +622,13 @@ function scheduleAfterScriptsCallbacks() { }, 200); } -document.addEventListener("DOMContentLoaded", function() { +onUiLoaded(function() { var mutationObserver = new MutationObserver(function(m) { - if (!executedAfterScripts && - gradioApp().querySelectorAll("[id$='_extra_search']").length >= 6) { + let existingSearchfields = gradioApp().querySelectorAll("[id$='_extra_search']").length; + let neededSearchfields = gradioApp().querySelectorAll("[id$='_extra_tabs'] > .tab-nav > button").length - 2; + + if (!executedAfterScripts && existingSearchfields >= neededSearchfields) { + mutationObserver.disconnect(); executedAfterScripts = true; scheduleAfterScriptsCallbacks(); } From 7dae6bb3b52a33dd01ef8c11b55d20049c254a23 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 17 Feb 2024 09:45:48 +0300 Subject: [PATCH 060/135] fix search UI invisible in an extra network tab that just loaded --- javascript/extraNetworks.js | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 195525b0e..4fd810d3d 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -114,6 +114,10 @@ function setupExtraNetworksForTab(tabname) { var controls = gradioApp().querySelector("#" + tabname_full + "_controls"); controlsDiv.insertBefore(controls, null); + + if (elem.style.display != "none") { + extraNetworksShowControlsForPage(tabname, tabname_full); + } }); registerPrompt(tabname, tabname + "_prompt"); From dd1641ecc4bbbdf2c319fb32a3250a8e16dd8c77 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 17 Feb 2024 09:46:04 +0300 Subject: [PATCH 061/135] fix an exception when filtering extra networks very early --- javascript/extraNetworks.js | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 4fd810d3d..d5855fe96 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -172,7 +172,11 @@ function extraNetworksTabSelected(tabname, id, showPrompt, showNegativePrompt, t function applyExtraNetworkFilter(tabname_full) { var doFilter = function() { - extraNetworksApplyFilter[tabname_full](true); + var applyFunction = extraNetworksApplyFilter[tabname_full]; + + if (applyFunction) { + applyFunction(true); + } }; setTimeout(doFilter, 1); } From 1466daeafc1cc9dcf0319012a6ec6129d51ebd2c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 17 Feb 2024 10:31:16 +0300 Subject: [PATCH 062/135] Disable prompt token counters option actually disables token counting rather than just hiding results. Disable prompt token counters option does not require reload UI. token counters do not become visible until they are positioned correctly. --- .eslintrc.js | 2 -- javascript/token-counters.js | 34 +++++++++++++++++++++++----------- javascript/ui.js | 2 -- modules/shared_options.py | 2 +- modules/ui_toprow.py | 4 ++-- style.css | 4 ++++ 6 files changed, 30 insertions(+), 18 deletions(-) diff --git a/.eslintrc.js b/.eslintrc.js index cf8397695..9c70eff85 100644 --- a/.eslintrc.js +++ b/.eslintrc.js @@ -86,8 +86,6 @@ module.exports = { // imageviewer.js modalPrevImage: "readonly", modalNextImage: "readonly", - // token-counters.js - setupTokenCounters: "readonly", // localStorage.js localSet: "readonly", localGet: "readonly", diff --git a/javascript/token-counters.js b/javascript/token-counters.js index 2ecc7d910..5d53fe471 100644 --- a/javascript/token-counters.js +++ b/javascript/token-counters.js @@ -48,11 +48,6 @@ function setupTokenCounting(id, id_counter, id_button) { var counter = gradioApp().getElementById(id_counter); var textarea = gradioApp().querySelector(`#${id} > label > textarea`); - if (opts.disable_token_counters) { - counter.style.display = "none"; - return; - } - if (counter.parentElement == prompt.parentElement) { return; } @@ -61,15 +56,32 @@ function setupTokenCounting(id, id_counter, id_button) { prompt.parentElement.style.position = "relative"; var func = onEdit(id, textarea, 800, function() { - gradioApp().getElementById(id_button)?.click(); + if(counter.classList.contains("token-counter-visible")){ + gradioApp().getElementById(id_button)?.click(); + } }); promptTokenCountUpdateFunctions[id] = func; promptTokenCountUpdateFunctions[id_button] = func; } -function setupTokenCounters() { - setupTokenCounting('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button'); - setupTokenCounting('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button'); - setupTokenCounting('img2img_prompt', 'img2img_token_counter', 'img2img_token_button'); - setupTokenCounting('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button'); +function toggleTokenCountingVisibility(id, id_counter, id_button) { + var counter = gradioApp().getElementById(id_counter); + + counter.style.display = opts.disable_token_counters ? "none" : "block"; + counter.classList.toggle("token-counter-visible", ! opts.disable_token_counters); } + +function runCodeForTokenCounters(fun){ + fun('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button'); + fun('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button'); + fun('img2img_prompt', 'img2img_token_counter', 'img2img_token_button'); + fun('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button'); +} + +onUiLoaded(function(){ + runCodeForTokenCounters(setupTokenCounting); +}); + +onOptionsChanged(function(){ + runCodeForTokenCounters(toggleTokenCountingVisibility); +}); diff --git a/javascript/ui.js b/javascript/ui.js index 9e66cd245..3d079b3df 100644 --- a/javascript/ui.js +++ b/javascript/ui.js @@ -319,8 +319,6 @@ onAfterUiUpdate(function() { }); json_elem.parentElement.style.display = "none"; - - setupTokenCounters(); }); onOptionsChanged(function() { diff --git a/modules/shared_options.py b/modules/shared_options.py index f1ab5d6e2..e1d11c8e0 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -270,7 +270,7 @@ options_templates.update(options_section(('ui_prompt_editing', "Prompt editing", "keyedit_delimiters": OptionInfo(r".,\/!?%^*;:{}=`~() ", "Word delimiters when editing the prompt with Ctrl+up/down"), "keyedit_delimiters_whitespace": OptionInfo(["Tab", "Carriage Return", "Line Feed"], "Ctrl+up/down whitespace delimiters", gr.CheckboxGroup, lambda: {"choices": ["Tab", "Carriage Return", "Line Feed"]}), "keyedit_move": OptionInfo(True, "Alt+left/right moves prompt elements"), - "disable_token_counters": OptionInfo(False, "Disable prompt token counters").needs_reload_ui(), + "disable_token_counters": OptionInfo(False, "Disable prompt token counters"), "include_styles_into_token_counters": OptionInfo(True, "Count tokens of enabled styles").info("When calculating how many tokens the prompt has, also consider tokens added by enabled styles."), })) diff --git a/modules/ui_toprow.py b/modules/ui_toprow.py index 30cf1b1b8..dc3c3aa38 100644 --- a/modules/ui_toprow.py +++ b/modules/ui_toprow.py @@ -127,9 +127,9 @@ class Toprow: self.restore_progress_button = ToolButton(value=restore_progress_symbol, elem_id=f"{self.id_part}_restore_progress", visible=False, tooltip="Restore progress") - self.token_counter = gr.HTML(value="0/75", elem_id=f"{self.id_part}_token_counter", elem_classes=["token-counter"]) + self.token_counter = gr.HTML(value="0/75", elem_id=f"{self.id_part}_token_counter", elem_classes=["token-counter"], visible=False) self.token_button = gr.Button(visible=False, elem_id=f"{self.id_part}_token_button") - self.negative_token_counter = gr.HTML(value="0/75", elem_id=f"{self.id_part}_negative_token_counter", elem_classes=["token-counter"]) + self.negative_token_counter = gr.HTML(value="0/75", elem_id=f"{self.id_part}_negative_token_counter", elem_classes=["token-counter"], visible=False) self.negative_token_button = gr.Button(visible=False, elem_id=f"{self.id_part}_negative_token_button") self.clear_prompt_button.click( diff --git a/style.css b/style.css index a6b287b95..8ce78ff0c 100644 --- a/style.css +++ b/style.css @@ -222,6 +222,10 @@ input[type="checkbox"].input-accordion-checkbox{ top: -0.75em; } +.block.token-counter-visible{ + display: block !important; +} + .block.token-counter span{ background: var(--input-background-fill) !important; box-shadow: 0 0 0.0 0.3em rgba(192,192,192,0.15), inset 0 0 0.6em rgba(192,192,192,0.075); From db19c46d6de32878145e051d8f30ff1ef5b2227c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 17 Feb 2024 10:32:10 +0300 Subject: [PATCH 063/135] lint --- javascript/token-counters.js | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/javascript/token-counters.js b/javascript/token-counters.js index 5d53fe471..eeea7a5d2 100644 --- a/javascript/token-counters.js +++ b/javascript/token-counters.js @@ -56,7 +56,7 @@ function setupTokenCounting(id, id_counter, id_button) { prompt.parentElement.style.position = "relative"; var func = onEdit(id, textarea, 800, function() { - if(counter.classList.contains("token-counter-visible")){ + if (counter.classList.contains("token-counter-visible")) { gradioApp().getElementById(id_button)?.click(); } }); @@ -68,20 +68,20 @@ function toggleTokenCountingVisibility(id, id_counter, id_button) { var counter = gradioApp().getElementById(id_counter); counter.style.display = opts.disable_token_counters ? "none" : "block"; - counter.classList.toggle("token-counter-visible", ! opts.disable_token_counters); + counter.classList.toggle("token-counter-visible", !opts.disable_token_counters); } -function runCodeForTokenCounters(fun){ +function runCodeForTokenCounters(fun) { fun('txt2img_prompt', 'txt2img_token_counter', 'txt2img_token_button'); fun('txt2img_neg_prompt', 'txt2img_negative_token_counter', 'txt2img_negative_token_button'); fun('img2img_prompt', 'img2img_token_counter', 'img2img_token_button'); fun('img2img_neg_prompt', 'img2img_negative_token_counter', 'img2img_negative_token_button'); } -onUiLoaded(function(){ +onUiLoaded(function() { runCodeForTokenCounters(setupTokenCounting); }); -onOptionsChanged(function(){ +onOptionsChanged(function() { runCodeForTokenCounters(toggleTokenCountingVisibility); }); From 310d6b9075c6edb3b884bd2a41e1c17259b141e1 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 17 Feb 2024 11:35:36 +0300 Subject: [PATCH 064/135] update changelog --- CHANGELOG.md | 121 +++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 121 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 67429bbff..9fd366fa7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,124 @@ +## 1.8.0-RC + +### Features: +* Update torch to version 2.1.2 +* Soft Inpainting ([#14208](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14208)) +* FP8 support ([#14031](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14031), [#14327](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14327)) +* Support for SDXL-Inpaint Model ([#14390](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14390)) +* Use Spandrel for upscaling and face restoration architectures ([#14425](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14425), [#14467](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14467), [#14473](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14473), [#14474](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14474), [#14477](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14477), [#14476](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14476), [#14484](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14484), [#14500](https://github.com/AUTOMATIC1111/stable-difusion-webui/pull/14500), [#14501](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14501), [#14504](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14504), [#14524](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14524), [#14809](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14809)) +* Automatic backwards version compatibility (when loading infotexts from old images with program version specified, will add compatibility settings) +* Implement zero terminal SNR noise schedule option (**[SEED BREAKING CHANGE](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Seed-breaking-changes#180-dev-170-225-2024-01-01---zero-terminal-snr-noise-schedule-option)**, [#14145](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14145)) +* Add a [✨] button to run hires fix on selected image in the gallery (with help from [#14598](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14598), [#14626](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14626), [#14728](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14728)) +* [Separate assets repository](https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets); serve fonts locally rather than from google's servers +* Official LCM Sampler Support ([#14583](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14583)) +* Add support for DAT upscaler models ([#14690](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14690)) +* Extra Networks Tree View ([#14588](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14588), [#14900](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14900)) +* NPU Support ([#14801](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14801)) +* Propmpt comments support + +### Minor: +* Allow pasting in WIDTHxHEIGHT strings into the width/height fields ([#14296](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14296)) +* add option: Live preview in full page image viewer ([#14230](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14230), [#14307](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14307)) +* Add keyboard shortcuts for generate/skip/interrupt ([#14269](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14269)) +* Better TCMALLOC support on different platforms ([#14227](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14227), [#14883](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14883), [#14910](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14910)) +* Lora not found warning ([#14464](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14464)) +* Adding negative prompts to Loras in extra networks ([#14475](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14475)) +* xyz_grid: allow varying the seed along an axis separate from axis options ([#12180](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12180)) +* option to convert VAE to bfloat16 (implementation of [#9295](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9295)) +* Better IPEX support ([#14229](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14229), [#14353](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14353), [#14559](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14559), [#14562](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14562), [#14597](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14597)) +* Option to interrupt after current generation rather than immediately ([#13653](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13653), [#14659](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14659)) +* Fullscreen Preview control fading/disable ([#14291](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14291)) +* Finer settings freezing control ([#13789](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13789)) +* Increase Upscaler Limits ([#14589](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14589)) +* Adjust brush size with hotkeys ([#14638](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14638)) +* Add checkpoint info to csv log file when saving images ([#14663](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14663)) +* Make more columns resizable ([#14740](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14740), [#14884](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14884)) +* Add an option to not overlay original image for inpainting for #14727 +* Add Pad conds v0 option +* Add "Interrupting..." placeholder. +* Button for refresh extensions list ([#14857](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14857)) +* Add an option to disable normalization after calculating emphasis. ([#14874](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14874)) +* When counting tokens, also include enabled styles (can be disabled in settings to revert to previous behavior) + +### Extensions and API: +* Enable task ids for API ([#14314](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14314)) +* add override_settings support for infotext API +* rename generation_parameters_copypaste module to infotext_utils +* prevent crash due to Script __init__ exception ([#14407](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14407)) +* Bump numpy to 1.26.2 ([#14471](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14471)) +* Add utility to inspect a model's dtype/device ([#14478](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14478)) +* Implement general forward method for all method in built-in lora ext ([#14547](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14547)) +* Execute model_loaded_callback after moving to target device ([#14563](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14563)) +* Add self to CFGDenoiserParams ([#14573](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14573)) +* Allow TLS with API only mode (--nowebui) ([#14593](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14593)) +* New callback: postprocess_image_after_composite ([#14657](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14657)) +* modules/api/api.py: add api endpoint to refresh embeddings list ([#14715](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14715)) +* set_named_arg ([#14773](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14773)) +* add before_token_counter callback and use it for prompt comments + +### Performance +* Massive performance improvement for extra networks directories with a huge number of files in them in an attempt to tackle #14507 ([#14528](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14528)) +* Reduce unnecessary re-indexing extra networks directory ([#14512](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14512)) +* Avoid unnecessary `isfile`/`exists` calls ([#14527](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14527)) + +### Bug Fixes: +* fix multiple bugs related to styles multi-file support ([#14203](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14203), [#14276](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14276), [#14707](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14707)) +* Lora fixes ([#14300](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14300), [#14237](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14237), [#14546](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14546), [#14726](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14726)) +* Re-add setting lost as part of e294e46 ([#14266](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14266)) +* fix extras caption BLIP ([#14330](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14330)) +* include infotext into saved init image for img2img ([#14452](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14452)) +* xyz grid handle axis_type is None ([#14394](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14394)) +* Update Added (Fixed) IPV6 Functionality When there is No Webui Argument Passed webui.py ([#14354](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14354)) +* fix API thread safe issues of txt2img and img2img ([#14421](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14421)) +* handle selectable script_index is None ([#14487](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14487)) +* handle config.json failed to load ([#14525](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14525), [#14767](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14767)) +* paste infotext cast int as float ([#14523](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14523)) +* Ensure GRADIO_ANALYTICS_ENABLED is set early enough ([#14537](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14537)) +* Fix logging configuration again ([#14538](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14538)) +* Handle CondFunc exception when resolving attributes ([#14560](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14560)) +* Fix extras big batch crashes ([#14699](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14699)) +* Fix using wrong model caused by alias ([#14655](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14655)) +* Add # to the invalid_filename_chars list ([#14640](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14640)) +* Fix extension check for requirements ([#14639](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14639)) +* Fix tab indexes are reset after restart UI ([#14637](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14637)) +* Fix nested manual cast ([#14689](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14689)) +* Keep postprocessing upscale selected tab after restart ([#14702](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14702)) +* XYZ grid: filter out blank vals when axis is int or float type (like int axis seed) ([#14754](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14754)) +* fix CLIP Interrogator topN regex ([#14775](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14775)) +* Fix dtype error in MHA layer/change dtype checking mechanism for manual cast ([#14791](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14791)) +* catch load style.csv error ([#14814](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14814)) +* fix error when editing extra networks card +* fix extra networks metadata failing to work properly when you create the .json file with metadata for the first time. +* util.walk_files extensions case insensitive ([#14879](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14879)) +* if extensions page not loaded, prevent apply ([#14873](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14873)) +* call the right function for token counter in img2img +* Fix the bugs that search/reload will disappear when using other ExtraNetworks extensions ([#14939](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14939)) +* Gracefully handle mtime read exception from cache ([#14933](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14933)) +* Only trigger interrupt on `Esc` when interrupt button visible ([#14932](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14932)) +* Disable prompt token counters option actually disables token counting rather than just hiding results. + +### Other: +* Assign id for "extra_options". Replace numeric field with slider. ([#14270](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14270)) +* change state dict comparison to ref compare ([#14216](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14216)) +* Bump torch-rocm to 5.6/5.7 ([#14293](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14293)) +* Base output path off data path ([#14446](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14446)) +* reorder training preprocessing modules in extras tab ([#14367](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14367)) +* Remove `cleanup_models` code ([#14472](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14472)) +* only rewrite ui-config when there is change ([#14352](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14352)) +* Fix lint issue from 501993eb ([#14495](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14495)) +* Update README.md ([#14548](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14548)) +* hires button, fix seeds () +* Logging: set formatter correctly for fallback logger too ([#14618](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14618)) +* Read generation info from infotexts rather than json for internal needs (save, extract seed from generated pic) ([#14645](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14645)) +* improve get_crop_region ([#14709](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14709)) +* Bump safetensors' version to 0.4.2 ([#14782](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14782)) +* add tooltip create_submit_box ([#14803](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14803)) +* extensions tab table row hover highlight ([#14885](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14885)) +* Always add timestamp to displayed image ([#14890](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14890)) +* Added core.filemode=false so doesn't track changes in file permission… ([#14930](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14930)) +* Normalize command-line argument paths ([#14934](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14934)) +* Use original App Title in progress bar ([#14916](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14916)) + ## 1.7.0 ### Features: From 4573195894fffeae08a94c015a94772c1a54a58d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 17 Feb 2024 11:40:53 +0300 Subject: [PATCH 065/135] prevent escape button causing an interrupt when no generation has been made yet --- script.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script.js b/script.js index 25cf0973a..f069b1ef0 100644 --- a/script.js +++ b/script.js @@ -167,7 +167,7 @@ document.addEventListener('keydown', function(e) { const lightboxModal = document.querySelector('#lightboxModal'); if (!globalPopup || globalPopup.style.display === 'none') { if (document.activeElement === lightboxModal) return; - if (interruptButton.style.display !== 'none') { + if (interruptButton.style.display === 'block') { interruptButton.click(); e.preventDefault(); } From 4652fc5ac36df57add76aa2b012a2d7da57963dd Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 17 Feb 2024 11:40:53 +0300 Subject: [PATCH 066/135] prevent escape button causing an interrupt when no generation has been made yet --- script.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/script.js b/script.js index 25cf0973a..f069b1ef0 100644 --- a/script.js +++ b/script.js @@ -167,7 +167,7 @@ document.addEventListener('keydown', function(e) { const lightboxModal = document.querySelector('#lightboxModal'); if (!globalPopup || globalPopup.style.display === 'none') { if (document.activeElement === lightboxModal) return; - if (interruptButton.style.display !== 'none') { + if (interruptButton.style.display === 'block') { interruptButton.click(); e.preventDefault(); } From 4ff1fabc86db927c45642704fda3472d399f3e19 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 17 Feb 2024 13:21:08 +0300 Subject: [PATCH 067/135] Update comment for Pad prompt/negative prompt v0 to add a warning about truncation, make it override the v1 implementation --- modules/sd_samplers_cfg_denoiser.py | 6 +++--- modules/shared_options.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index 941dff4b3..a73d3b036 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -220,10 +220,10 @@ class CFGDenoiser(torch.nn.Module): self.padded_cond_uncond = False self.padded_cond_uncond_v0 = False - if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: - tensor, uncond = self.pad_cond_uncond(tensor, uncond) - elif shared.opts.pad_cond_uncond_v0 and tensor.shape[1] != uncond.shape[1]: + if shared.opts.pad_cond_uncond_v0 and tensor.shape[1] != uncond.shape[1]: tensor, uncond = self.pad_cond_uncond_v0(tensor, uncond) + elif shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: + tensor, uncond = self.pad_cond_uncond(tensor, uncond) if tensor.shape[1] == uncond.shape[1] or skip_uncond: if is_edit_model: diff --git a/modules/shared_options.py b/modules/shared_options.py index e1d11c8e0..25b47aa19 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -211,7 +211,7 @@ options_templates.update(options_section(('optimizations', "Optimizations", "sd" "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio hr').info("only applies if non-zero and overrides above"), "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"), - "pad_cond_uncond_v0": OptionInfo(False, "Pad prompt/negative prompt (v0)", infotext='Pad conds v0').info("alternative implementation for the above; used prior to 1.6.0 for DDIM sampler; ignored if the above is set; changes seeds"), + "pad_cond_uncond_v0": OptionInfo(False, "Pad prompt/negative prompt (v0)", infotext='Pad conds v0').info("alternative implementation for the above; used prior to 1.6.0 for DDIM sampler; overrides the above if set; WARNING: truncates negative prompt if it's too long; changes seeds"), "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"), "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"), "fp8_storage": OptionInfo("Disable", "FP8 weight", gr.Radio, {"choices": ["Disable", "Enable for SDXL", "Enable"]}).info("Use FP8 to store Linear/Conv layers' weight. Require pytorch>=2.1.0."), From 3345218439ab0e74e2b6ea6e9d6291885a6e8fb5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 17 Feb 2024 13:21:08 +0300 Subject: [PATCH 068/135] Update comment for Pad prompt/negative prompt v0 to add a warning about truncation, make it override the v1 implementation --- modules/sd_samplers_cfg_denoiser.py | 6 +++--- modules/shared_options.py | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index 941dff4b3..a73d3b036 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -220,10 +220,10 @@ class CFGDenoiser(torch.nn.Module): self.padded_cond_uncond = False self.padded_cond_uncond_v0 = False - if shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: - tensor, uncond = self.pad_cond_uncond(tensor, uncond) - elif shared.opts.pad_cond_uncond_v0 and tensor.shape[1] != uncond.shape[1]: + if shared.opts.pad_cond_uncond_v0 and tensor.shape[1] != uncond.shape[1]: tensor, uncond = self.pad_cond_uncond_v0(tensor, uncond) + elif shared.opts.pad_cond_uncond and tensor.shape[1] != uncond.shape[1]: + tensor, uncond = self.pad_cond_uncond(tensor, uncond) if tensor.shape[1] == uncond.shape[1] or skip_uncond: if is_edit_model: diff --git a/modules/shared_options.py b/modules/shared_options.py index e1d11c8e0..25b47aa19 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -211,7 +211,7 @@ options_templates.update(options_section(('optimizations', "Optimizations", "sd" "token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"), "token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio hr').info("only applies if non-zero and overrides above"), "pad_cond_uncond": OptionInfo(False, "Pad prompt/negative prompt", infotext='Pad conds').info("improves performance when prompt and negative prompt have different lengths; changes seeds"), - "pad_cond_uncond_v0": OptionInfo(False, "Pad prompt/negative prompt (v0)", infotext='Pad conds v0').info("alternative implementation for the above; used prior to 1.6.0 for DDIM sampler; ignored if the above is set; changes seeds"), + "pad_cond_uncond_v0": OptionInfo(False, "Pad prompt/negative prompt (v0)", infotext='Pad conds v0').info("alternative implementation for the above; used prior to 1.6.0 for DDIM sampler; overrides the above if set; WARNING: truncates negative prompt if it's too long; changes seeds"), "persistent_cond_cache": OptionInfo(True, "Persistent cond cache").info("do not recalculate conds from prompts if prompts have not changed since previous calculation"), "batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond comandline argument"), "fp8_storage": OptionInfo("Disable", "FP8 weight", gr.Radio, {"choices": ["Disable", "Enable for SDXL", "Enable"]}).info("Use FP8 to store Linear/Conv layers' weight. Require pytorch>=2.1.0."), From a18e54ecd756a4101e16e42fc313df259542e07b Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 18 Feb 2024 00:38:05 +0900 Subject: [PATCH 069/135] option "open image button" open the actual dir --- modules/shared_options.py | 2 ++ modules/ui_common.py | 54 +++++++++++++++++++++++++++------------ modules/ui_tempdir.py | 15 +++++++++++ 3 files changed, 54 insertions(+), 17 deletions(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 25b47aa19..7571a7d1d 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -284,6 +284,8 @@ options_templates.update(options_section(('ui_gallery', "Gallery", "ui"), { "sd_webui_modal_lightbox_icon_opacity": OptionInfo(1, "Full page image viewer: control icon unfocused opacity", gr.Slider, {"minimum": 0.0, "maximum": 1, "step": 0.01}, onchange=shared.reload_gradio_theme).info('for mouse only').needs_reload_ui(), "sd_webui_modal_lightbox_toolbar_opacity": OptionInfo(0.9, "Full page image viewer: tool bar opacity", gr.Slider, {"minimum": 0.0, "maximum": 1, "step": 0.01}, onchange=shared.reload_gradio_theme).info('for mouse only').needs_reload_ui(), "gallery_height": OptionInfo("", "Gallery height", gr.Textbox).info("can be any valid CSS value, for example 768px or 20em").needs_reload_ui(), + "button_open_image_actual_dir": OptionInfo(True, '"Open images output directory" button opens the actual directory of the image rather than the output root folder'), + "button_open_image_actual_dir_temp": OptionInfo(False, '"Open images output directory" button opens the actual directory even for temp images'), })) options_templates.update(options_section(('ui_alternatives', "UI alternatives", "ui"), { diff --git a/modules/ui_common.py b/modules/ui_common.py index 29fe7d0e9..78481c6fb 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -9,7 +9,7 @@ import sys import gradio as gr import subprocess as sp -from modules import call_queue, shared +from modules import call_queue, shared, ui_tempdir from modules.infotext_utils import image_from_url_text import modules.images from modules.ui_components import ToolButton @@ -164,29 +164,45 @@ class OutputPanel: def create_output_panel(tabname, outdir, toprow=None): res = OutputPanel() - def open_folder(f): + def open_folder(f, images=None, index=None): + if shared.cmd_opts.hide_ui_dir_config: + return + + try: + if shared.opts.button_open_image_actual_dir and 0 <= index < len(images): + image = images[index] + image_path = image["name"].rsplit('?', 1)[0] + image_dir = os.path.split(image_path)[0] + if shared.opts.button_open_image_actual_dir_temp or not ui_tempdir.is_gradio_temp_path(image_dir): + f = image_dir + except Exception: + pass + if not os.path.exists(f): - print(f'Folder "{f}" does not exist. After you create an image, the folder will be created.') + msg = f'Folder "{f}" does not exist. After you create an image, the folder will be created.' + print(msg) + gr.Info(msg) return elif not os.path.isdir(f): - print(f""" + msg = f""" WARNING An open_folder request was made with an argument that is not a folder. This could be an error or a malicious attempt to run code on your computer. Requested path was: {f} -""", file=sys.stderr) +""" + print(msg, file=sys.stderr) + gr.Warning(msg) return - if not shared.cmd_opts.hide_ui_dir_config: - path = os.path.normpath(f) - if platform.system() == "Windows": - os.startfile(path) - elif platform.system() == "Darwin": - sp.Popen(["open", path]) - elif "microsoft-standard-WSL2" in platform.uname().release: - sp.Popen(["wsl-open", path]) - else: - sp.Popen(["xdg-open", path]) + path = os.path.normpath(f) + if platform.system() == "Windows": + os.startfile(path) + elif platform.system() == "Darwin": + sp.Popen(["open", path]) + elif "microsoft-standard-WSL2" in platform.uname().release: + sp.Popen(["wsl-open", path]) + else: + sp.Popen(["xdg-open", path]) with gr.Column(elem_id=f"{tabname}_results"): if toprow: @@ -213,8 +229,12 @@ Requested path was: {f} res.button_upscale = ToolButton('✨', elem_id=f'{tabname}_upscale', tooltip="Create an upscaled version of the current image using hires fix settings.") open_folder_button.click( - fn=lambda: open_folder(shared.opts.outdir_samples or outdir), - inputs=[], + fn=lambda images, index: open_folder(shared.opts.outdir_samples or outdir, images, index), + _js="(y, w) => [y, selected_gallery_index()]", + inputs=[ + res.gallery, + open_folder_button, # placeholder for index + ], outputs=[], ) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 91f40ea42..621ed1eca 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -81,3 +81,18 @@ def cleanup_tmpdr(): filename = os.path.join(root, name) os.remove(filename) + + +def is_gradio_temp_path(path): + """ + Check if the path is a temp dir used by gradio + """ + path = Path(path) + if shared.opts.temp_dir and path.is_relative_to(shared.opts.temp_dir): + return True + if gradio_temp_dir := os.environ.get("GRADIO_TEMP_DIR"): + if path.is_relative_to(gradio_temp_dir): + return True + if path.is_relative_to(Path(tempfile.gettempdir()) / "gradio"): + return True + return False From 71072f56204c300fa294e15eb7d07592edacda16 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Sun, 18 Feb 2024 02:47:44 +0900 Subject: [PATCH 070/135] re-work open image button settings --- modules/shared_options.py | 3 +-- modules/ui_common.py | 8 +++----- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 7571a7d1d..bb3752ba6 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -284,8 +284,7 @@ options_templates.update(options_section(('ui_gallery', "Gallery", "ui"), { "sd_webui_modal_lightbox_icon_opacity": OptionInfo(1, "Full page image viewer: control icon unfocused opacity", gr.Slider, {"minimum": 0.0, "maximum": 1, "step": 0.01}, onchange=shared.reload_gradio_theme).info('for mouse only').needs_reload_ui(), "sd_webui_modal_lightbox_toolbar_opacity": OptionInfo(0.9, "Full page image viewer: tool bar opacity", gr.Slider, {"minimum": 0.0, "maximum": 1, "step": 0.01}, onchange=shared.reload_gradio_theme).info('for mouse only').needs_reload_ui(), "gallery_height": OptionInfo("", "Gallery height", gr.Textbox).info("can be any valid CSS value, for example 768px or 20em").needs_reload_ui(), - "button_open_image_actual_dir": OptionInfo(True, '"Open images output directory" button opens the actual directory of the image rather than the output root folder'), - "button_open_image_actual_dir_temp": OptionInfo(False, '"Open images output directory" button opens the actual directory even for temp images'), + "open_dir_button_choice": OptionInfo("Subdirectory", "What directory the [📂] button opens", gr.Radio, {"choices": ["Output Root", "Subdirectory", "Subdirectory (even temp dir)"]}), })) options_templates.update(options_section(('ui_alternatives', "UI alternatives", "ui"), { diff --git a/modules/ui_common.py b/modules/ui_common.py index 78481c6fb..cf1b8b32c 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -169,11 +169,9 @@ def create_output_panel(tabname, outdir, toprow=None): return try: - if shared.opts.button_open_image_actual_dir and 0 <= index < len(images): - image = images[index] - image_path = image["name"].rsplit('?', 1)[0] - image_dir = os.path.split(image_path)[0] - if shared.opts.button_open_image_actual_dir_temp or not ui_tempdir.is_gradio_temp_path(image_dir): + if 'Sub' in shared.opts.open_dir_button_choice: + image_dir = os.path.split(images[index]["name"].rsplit('?', 1)[0])[0] + if 'temp' in shared.opts.open_dir_button_choice or not ui_tempdir.is_gradio_temp_path(image_dir): f = image_dir except Exception: pass From cb52279c3e7cf145cc1b284b6a05c883e6995c9f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 17 Feb 2024 21:30:21 +0300 Subject: [PATCH 071/135] Merge pull request #14947 from AUTOMATIC1111/open-button option "open image button" open the actual dir --- modules/shared_options.py | 1 + modules/ui_common.py | 52 ++++++++++++++++++++++++++------------- modules/ui_tempdir.py | 15 +++++++++++ 3 files changed, 51 insertions(+), 17 deletions(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index 25b47aa19..bb3752ba6 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -284,6 +284,7 @@ options_templates.update(options_section(('ui_gallery', "Gallery", "ui"), { "sd_webui_modal_lightbox_icon_opacity": OptionInfo(1, "Full page image viewer: control icon unfocused opacity", gr.Slider, {"minimum": 0.0, "maximum": 1, "step": 0.01}, onchange=shared.reload_gradio_theme).info('for mouse only').needs_reload_ui(), "sd_webui_modal_lightbox_toolbar_opacity": OptionInfo(0.9, "Full page image viewer: tool bar opacity", gr.Slider, {"minimum": 0.0, "maximum": 1, "step": 0.01}, onchange=shared.reload_gradio_theme).info('for mouse only').needs_reload_ui(), "gallery_height": OptionInfo("", "Gallery height", gr.Textbox).info("can be any valid CSS value, for example 768px or 20em").needs_reload_ui(), + "open_dir_button_choice": OptionInfo("Subdirectory", "What directory the [📂] button opens", gr.Radio, {"choices": ["Output Root", "Subdirectory", "Subdirectory (even temp dir)"]}), })) options_templates.update(options_section(('ui_alternatives', "UI alternatives", "ui"), { diff --git a/modules/ui_common.py b/modules/ui_common.py index 29fe7d0e9..cf1b8b32c 100644 --- a/modules/ui_common.py +++ b/modules/ui_common.py @@ -9,7 +9,7 @@ import sys import gradio as gr import subprocess as sp -from modules import call_queue, shared +from modules import call_queue, shared, ui_tempdir from modules.infotext_utils import image_from_url_text import modules.images from modules.ui_components import ToolButton @@ -164,29 +164,43 @@ class OutputPanel: def create_output_panel(tabname, outdir, toprow=None): res = OutputPanel() - def open_folder(f): + def open_folder(f, images=None, index=None): + if shared.cmd_opts.hide_ui_dir_config: + return + + try: + if 'Sub' in shared.opts.open_dir_button_choice: + image_dir = os.path.split(images[index]["name"].rsplit('?', 1)[0])[0] + if 'temp' in shared.opts.open_dir_button_choice or not ui_tempdir.is_gradio_temp_path(image_dir): + f = image_dir + except Exception: + pass + if not os.path.exists(f): - print(f'Folder "{f}" does not exist. After you create an image, the folder will be created.') + msg = f'Folder "{f}" does not exist. After you create an image, the folder will be created.' + print(msg) + gr.Info(msg) return elif not os.path.isdir(f): - print(f""" + msg = f""" WARNING An open_folder request was made with an argument that is not a folder. This could be an error or a malicious attempt to run code on your computer. Requested path was: {f} -""", file=sys.stderr) +""" + print(msg, file=sys.stderr) + gr.Warning(msg) return - if not shared.cmd_opts.hide_ui_dir_config: - path = os.path.normpath(f) - if platform.system() == "Windows": - os.startfile(path) - elif platform.system() == "Darwin": - sp.Popen(["open", path]) - elif "microsoft-standard-WSL2" in platform.uname().release: - sp.Popen(["wsl-open", path]) - else: - sp.Popen(["xdg-open", path]) + path = os.path.normpath(f) + if platform.system() == "Windows": + os.startfile(path) + elif platform.system() == "Darwin": + sp.Popen(["open", path]) + elif "microsoft-standard-WSL2" in platform.uname().release: + sp.Popen(["wsl-open", path]) + else: + sp.Popen(["xdg-open", path]) with gr.Column(elem_id=f"{tabname}_results"): if toprow: @@ -213,8 +227,12 @@ Requested path was: {f} res.button_upscale = ToolButton('✨', elem_id=f'{tabname}_upscale', tooltip="Create an upscaled version of the current image using hires fix settings.") open_folder_button.click( - fn=lambda: open_folder(shared.opts.outdir_samples or outdir), - inputs=[], + fn=lambda images, index: open_folder(shared.opts.outdir_samples or outdir, images, index), + _js="(y, w) => [y, selected_gallery_index()]", + inputs=[ + res.gallery, + open_folder_button, # placeholder for index + ], outputs=[], ) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 91f40ea42..621ed1eca 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -81,3 +81,18 @@ def cleanup_tmpdr(): filename = os.path.join(root, name) os.remove(filename) + + +def is_gradio_temp_path(path): + """ + Check if the path is a temp dir used by gradio + """ + path = Path(path) + if shared.opts.temp_dir and path.is_relative_to(shared.opts.temp_dir): + return True + if gradio_temp_dir := os.environ.get("GRADIO_TEMP_DIR"): + if path.is_relative_to(gradio_temp_dir): + return True + if path.is_relative_to(Path(tempfile.gettempdir()) / "gradio"): + return True + return False From c7808825b12cc5591012b50d93f27e4bce99ec5c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 17 Feb 2024 21:38:19 +0300 Subject: [PATCH 072/135] update changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9fd366fa7..cfebc11c7 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -39,8 +39,10 @@ * Button for refresh extensions list ([#14857](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14857)) * Add an option to disable normalization after calculating emphasis. ([#14874](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14874)) * When counting tokens, also include enabled styles (can be disabled in settings to revert to previous behavior) +* Configuration for the [📂] button for image gallery ([#14947](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14947)) ### Extensions and API: +* Removed packages from requirements: basicsr, gfpgan, realesrgan; as well as their dependencies: absl-py, addict, beautifulsoup4, future, gdown, grpcio, importlib-metadata, lmdb, lpips, Markdown, platformdirs, PySocks, soupsieve, tb-nightly, tensorboard-data-server, tomli, Werkzeug, yapf, zipp, soupsieve * Enable task ids for API ([#14314](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14314)) * add override_settings support for infotext API * rename generation_parameters_copypaste module to infotext_utils From 5a8dd0c549c0221cd3ee1c53816aa52cf7b3b0ae Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Sun, 18 Feb 2024 14:58:41 +0800 Subject: [PATCH 073/135] Fix rescale --- extensions-builtin/Lora/network_oft.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index ed221d8fe..f5e657b82 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -40,7 +40,9 @@ class NetworkModuleOFT(network.NetworkModule): self.is_boft = False if weights.w["oft_diag"].dim() == 4: self.is_boft = True - self.rescale = weight.w.get('rescale', None) + self.rescale = weights.w.get('rescale', None) + if self.rescale is not None: + self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1)) is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear] is_conv = type(self.sd_module) in [torch.nn.Conv2d] From 4eb949625c8cc04ba579fc5486cc10acd541596b Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Mon, 19 Feb 2024 14:43:07 +0800 Subject: [PATCH 074/135] prevent undefined variable --- extensions-builtin/Lora/network_oft.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index f5e657b82..d658ad109 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -22,6 +22,8 @@ class NetworkModuleOFT(network.NetworkModule): self.org_module: list[torch.Module] = [self.sd_module] self.scale = 1.0 + self.is_kohya = False + self.is_boft = False # kohya-ss if "oft_blocks" in weights.w.keys(): @@ -31,13 +33,11 @@ class NetworkModuleOFT(network.NetworkModule): self.dim = self.oft_blocks.shape[0] # lora dim # LyCORIS OFT elif "oft_diag" in weights.w.keys(): - self.is_kohya = False self.oft_blocks = weights.w["oft_diag"] # self.alpha is unused self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) # LyCORIS BOFT - self.is_boft = False if weights.w["oft_diag"].dim() == 4: self.is_boft = True self.rescale = weights.w.get('rescale', None) From 92ab0ef7d65ededa758f81e52cf4f48f72d13564 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 19 Feb 2024 10:05:30 +0300 Subject: [PATCH 075/135] Merge pull request #14871 from v0xie/boft Support inference with LyCORIS BOFT networks --- extensions-builtin/Lora/network_oft.py | 58 +++++++++++++++++++++----- 1 file changed, 48 insertions(+), 10 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index d1c46a4b2..d658ad109 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -22,6 +22,8 @@ class NetworkModuleOFT(network.NetworkModule): self.org_module: list[torch.Module] = [self.sd_module] self.scale = 1.0 + self.is_kohya = False + self.is_boft = False # kohya-ss if "oft_blocks" in weights.w.keys(): @@ -29,13 +31,19 @@ class NetworkModuleOFT(network.NetworkModule): self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size) self.alpha = weights.w["alpha"] # alpha is constraint self.dim = self.oft_blocks.shape[0] # lora dim - # LyCORIS + # LyCORIS OFT elif "oft_diag" in weights.w.keys(): - self.is_kohya = False self.oft_blocks = weights.w["oft_diag"] # self.alpha is unused self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) + # LyCORIS BOFT + if weights.w["oft_diag"].dim() == 4: + self.is_boft = True + self.rescale = weights.w.get('rescale', None) + if self.rescale is not None: + self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1)) + is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear] is_conv = type(self.sd_module) in [torch.nn.Conv2d] is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] # unsupported @@ -51,6 +59,13 @@ class NetworkModuleOFT(network.NetworkModule): self.constraint = self.alpha * self.out_dim self.num_blocks = self.dim self.block_size = self.out_dim // self.dim + elif self.is_boft: + self.constraint = None + self.boft_m = weights.w["oft_diag"].shape[0] + self.block_num = weights.w["oft_diag"].shape[1] + self.block_size = weights.w["oft_diag"].shape[2] + self.boft_b = self.block_size + #self.block_size, self.block_num = butterfly_factor(self.out_dim, self.dim) else: self.constraint = None self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) @@ -68,14 +83,37 @@ class NetworkModuleOFT(network.NetworkModule): R = oft_blocks.to(orig_weight.device) - # This errors out for MultiheadAttention, might need to be handled up-stream - merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) - merged_weight = torch.einsum( - 'k n m, k n ... -> k m ...', - R, - merged_weight - ) - merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') + if not self.is_boft: + # This errors out for MultiheadAttention, might need to be handled up-stream + merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size) + merged_weight = torch.einsum( + 'k n m, k n ... -> k m ...', + R, + merged_weight + ) + merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...') + else: + # TODO: determine correct value for scale + scale = 1.0 + m = self.boft_m + b = self.boft_b + r_b = b // 2 + inp = orig_weight + for i in range(m): + bi = R[i] # b_num, b_size, b_size + if i == 0: + # Apply multiplier/scale and rescale into first weight + bi = bi * scale + (1 - scale) * eye + inp = rearrange(inp, "(c g k) ... -> (c k g) ...", g=2, k=2**i * r_b) + inp = rearrange(inp, "(d b) ... -> d b ...", b=b) + inp = torch.einsum("b i j, b j ... -> b i ...", bi, inp) + inp = rearrange(inp, "d b ... -> (d b) ...") + inp = rearrange(inp, "(c k g) ... -> (c g k) ...", g=2, k=2**i * r_b) + merged_weight = inp + + # Rescale mechanism + if self.rescale is not None: + merged_weight = self.rescale.to(merged_weight) * merged_weight updown = merged_weight.to(orig_weight.device) - orig_weight.to(merged_weight.dtype) output_shape = orig_weight.shape From 532c34082903ca03a17a847d5120647ef418190d Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 19 Feb 2024 10:07:57 +0300 Subject: [PATCH 076/135] update changelog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index cfebc11c7..c7bfc1c98 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,12 +34,13 @@ * Add checkpoint info to csv log file when saving images ([#14663](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14663)) * Make more columns resizable ([#14740](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14740), [#14884](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14884)) * Add an option to not overlay original image for inpainting for #14727 -* Add Pad conds v0 option +* Add Pad conds v0 option to support same generation with DDIM as before 1.6.0 * Add "Interrupting..." placeholder. * Button for refresh extensions list ([#14857](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14857)) * Add an option to disable normalization after calculating emphasis. ([#14874](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14874)) * When counting tokens, also include enabled styles (can be disabled in settings to revert to previous behavior) * Configuration for the [📂] button for image gallery ([#14947](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14947)) +* Support inference with LyCORIS BOFT networks ([#14871](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14871)) ### Extensions and API: * Removed packages from requirements: basicsr, gfpgan, realesrgan; as well as their dependencies: absl-py, addict, beautifulsoup4, future, gdown, grpcio, importlib-metadata, lmdb, lpips, Markdown, platformdirs, PySocks, soupsieve, tb-nightly, tensorboard-data-server, tomli, Werkzeug, yapf, zipp, soupsieve From 33c8fe1221cdc53b9f00b7041b6e06cc9b0e037c Mon Sep 17 00:00:00 2001 From: Andray Date: Mon, 19 Feb 2024 16:57:49 +0400 Subject: [PATCH 077/135] avoid doble upscaling in inpaint --- modules/processing.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index f4aa165de..d208a922d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -74,16 +74,18 @@ def uncrop(image, dest_size, paste_loc): def apply_overlay(image, paste_loc, overlay): if overlay is None: - return image + return image, image.copy() if paste_loc is not None: image = uncrop(image, (overlay.width, overlay.height), paste_loc) + original_denoised_image = image.copy() + image = image.convert('RGBA') image.alpha_composite(overlay) image = image.convert('RGB') - return image + return image, original_denoised_image def create_binary_mask(image, round=True): if image.mode == 'RGBA' and image.getextrema()[-1] != (255, 255): @@ -1021,7 +1023,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.color_corrections is not None and i < len(p.color_corrections): if save_samples and opts.save_images_before_color_correction: - image_without_cc = apply_overlay(image, p.paste_to, overlay_image) + image_without_cc, _ = apply_overlay(image, p.paste_to, overlay_image) images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-color-correction") image = apply_color_correction(p.color_corrections[i], image) @@ -1029,12 +1031,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: # that is being composited over the original image, # we need to keep the original image around # and use it in the composite step. - original_denoised_image = image.copy() - - if p.paste_to is not None: - original_denoised_image = uncrop(original_denoised_image, (overlay_image.width, overlay_image.height), p.paste_to) - - image = apply_overlay(image, p.paste_to, overlay_image) + image, original_denoised_image = apply_overlay(image, p.paste_to, overlay_image) if p.scripts is not None: pp = scripts.PostprocessImageArgs(image) From 140d58b51213c0a025da475a40ef0b8efbc33587 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 19 Feb 2024 18:06:05 +0300 Subject: [PATCH 078/135] Merge pull request #14966 from light-and-ray/avoid_doble_upscaling_in_inpaint [bug] avoid doble upscaling in inpaint --- modules/processing.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index f4aa165de..d208a922d 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -74,16 +74,18 @@ def uncrop(image, dest_size, paste_loc): def apply_overlay(image, paste_loc, overlay): if overlay is None: - return image + return image, image.copy() if paste_loc is not None: image = uncrop(image, (overlay.width, overlay.height), paste_loc) + original_denoised_image = image.copy() + image = image.convert('RGBA') image.alpha_composite(overlay) image = image.convert('RGB') - return image + return image, original_denoised_image def create_binary_mask(image, round=True): if image.mode == 'RGBA' and image.getextrema()[-1] != (255, 255): @@ -1021,7 +1023,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.color_corrections is not None and i < len(p.color_corrections): if save_samples and opts.save_images_before_color_correction: - image_without_cc = apply_overlay(image, p.paste_to, overlay_image) + image_without_cc, _ = apply_overlay(image, p.paste_to, overlay_image) images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-color-correction") image = apply_color_correction(p.color_corrections[i], image) @@ -1029,12 +1031,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: # that is being composited over the original image, # we need to keep the original image around # and use it in the composite step. - original_denoised_image = image.copy() - - if p.paste_to is not None: - original_denoised_image = uncrop(original_denoised_image, (overlay_image.width, overlay_image.height), p.paste_to) - - image = apply_overlay(image, p.paste_to, overlay_image) + image, original_denoised_image = apply_overlay(image, p.paste_to, overlay_image) if p.scripts is not None: pp = scripts.PostprocessImageArgs(image) From a5436a3ac0d0048a36f0652bde56ec2bc9aeb2ca Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Tue, 20 Feb 2024 17:20:14 +0800 Subject: [PATCH 079/135] Update network_oft.py --- extensions-builtin/Lora/network_oft.py | 40 ++++++++++++-------------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index d658ad109..5b899bd63 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -22,24 +22,24 @@ class NetworkModuleOFT(network.NetworkModule): self.org_module: list[torch.Module] = [self.sd_module] self.scale = 1.0 - self.is_kohya = False + self.is_R = False self.is_boft = False - # kohya-ss + # kohya-ss/New LyCORIS OFT/BOFT if "oft_blocks" in weights.w.keys(): - self.is_kohya = True self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size) - self.alpha = weights.w["alpha"] # alpha is constraint + self.alpha = weights.w.get("alpha", self.alpha) # alpha is constraint self.dim = self.oft_blocks.shape[0] # lora dim - # LyCORIS OFT + # Old LyCORIS OFT elif "oft_diag" in weights.w.keys(): + self.is_R = True self.oft_blocks = weights.w["oft_diag"] # self.alpha is unused self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) - # LyCORIS BOFT - if weights.w["oft_diag"].dim() == 4: - self.is_boft = True + # LyCORIS BOFT + if self.oft_blocks.dim() == 4: + self.is_boft = True self.rescale = weights.w.get('rescale', None) if self.rescale is not None: self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1)) @@ -55,26 +55,24 @@ class NetworkModuleOFT(network.NetworkModule): elif is_other_linear: self.out_dim = self.sd_module.embed_dim - if self.is_kohya: - self.constraint = self.alpha * self.out_dim - self.num_blocks = self.dim - self.block_size = self.out_dim // self.dim + self.num_blocks = self.dim + self.block_size = self.out_dim // self.dim + self.constraint = (1 if self.alpha is None else self.alpha) * self.out_dim + if self.is_R: + self.constraint = None + self.block_size = self.dim + self.num_blocks = self.out_dim // self.dim elif self.is_boft: - self.constraint = None - self.boft_m = weights.w["oft_diag"].shape[0] - self.block_num = weights.w["oft_diag"].shape[1] - self.block_size = weights.w["oft_diag"].shape[2] + self.boft_m = self.oft_blocks.shape[0] + self.num_blocks = self.oft_blocks.shape[1] + self.block_size = self.oft_blocks.shape[2] self.boft_b = self.block_size - #self.block_size, self.block_num = butterfly_factor(self.out_dim, self.dim) - else: - self.constraint = None - self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) def calc_updown(self, orig_weight): oft_blocks = self.oft_blocks.to(orig_weight.device) eye = torch.eye(self.block_size, device=oft_blocks.device) - if self.is_kohya: + if not self.is_R: block_Q = oft_blocks - oft_blocks.transpose(1, 2) # ensure skew-symmetric orthogonal matrix norm_Q = torch.norm(block_Q.flatten()) new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device)) From 591470d86d565559d79d14a66ff14ecea2bd7706 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Tue, 20 Feb 2024 17:21:34 +0800 Subject: [PATCH 080/135] linting --- extensions-builtin/Lora/network_oft.py | 1 - 1 file changed, 1 deletion(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index 5b899bd63..f14c183ae 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -1,6 +1,5 @@ import torch import network -from lyco_helpers import factorization from einops import rearrange From f4869f8de3ed76735ea331fe5463abc6190bd4cf Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Tue, 20 Feb 2024 16:18:13 -0500 Subject: [PATCH 081/135] Add compatibility option for refiner switching --- modules/shared_options.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index bb3752ba6..e17eed512 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -227,7 +227,8 @@ options_templates.update(options_section(('compatibility', "Compatibility", "sd" "dont_fix_second_order_samplers_schedule": OptionInfo(False, "Do not fix prompt schedule for second order samplers."), "hires_fix_use_firstpass_conds": OptionInfo(False, "For hires fix, calculate conds of second pass using extra networks of first pass."), "use_old_scheduling": OptionInfo(False, "Use old prompt editing timelines.", infotext="Old prompt editing timelines").info("For [red:green:N]; old: If N < 1, it's a fraction of steps (and hires fix uses range from 0 to 1), if N >= 1, it's an absolute number of steps; new: If N has a decimal point in it, it's a fraction of steps (and hires fix uses range from 1 to 2), othewrwise it's an absolute number of steps"), - "use_downcasted_alpha_bar": OptionInfo(False, "Downcast model alphas_cumprod to fp16 before sampling. For reproducing old seeds.", infotext="Downcast alphas_cumprod") + "use_downcasted_alpha_bar": OptionInfo(False, "Downcast model alphas_cumprod to fp16 before sampling. For reproducing old seeds.", infotext="Downcast alphas_cumprod"), + "refiner_switch_by_sample_steps": OptionInfo(False, "Switch to refiner by sampling steps instead of model timesteps. Old behavior for refiner.", infotext="Refiner switch by sampling steps") })) options_templates.update(options_section(('interrogate', "Interrogate"), { From 09d2e5881120c4a51888633947062b40726c6fef Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Tue, 20 Feb 2024 16:22:40 -0500 Subject: [PATCH 082/135] Pass sigma to apply_refiner --- modules/sd_samplers_cfg_denoiser.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_cfg_denoiser.py b/modules/sd_samplers_cfg_denoiser.py index a73d3b036..93581c9ac 100644 --- a/modules/sd_samplers_cfg_denoiser.py +++ b/modules/sd_samplers_cfg_denoiser.py @@ -152,7 +152,7 @@ class CFGDenoiser(torch.nn.Module): if state.interrupted or state.skipped: raise sd_samplers_common.InterruptedException - if sd_samplers_common.apply_refiner(self): + if sd_samplers_common.apply_refiner(self, sigma): cond = self.sampler.sampler_extra_args['cond'] uncond = self.sampler.sampler_extra_args['uncond'] From 25eeeaa65f819bb40df427141b82b46d3fcf59e9 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Tue, 20 Feb 2024 16:37:29 -0500 Subject: [PATCH 083/135] Allow refiner to be triggered by model timestep instead of sampling --- modules/sd_samplers_common.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 6bd38e12a..8052b021a 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -156,7 +156,16 @@ replace_torchsde_browinan() def apply_refiner(cfg_denoiser): - completed_ratio = cfg_denoiser.step / cfg_denoiser.total_steps + if opts.refiner_switch_by_sample_steps: + completed_ratio = cfg_denoiser.step / cfg_denoiser.total_steps + else: + # torch.max(sigma) only to handle rare case where we might have different sigmas in the same batch + try: + timestep = torch.argmin(torch.abs(cfg_denoiser.inner_model.sigmas - torch.max(sigma))) + except AttributeError: # for samplers that dont use sigmas (DDIM) sigma is actually the timestep + timestep = torch.max(sigma).to(dtype=int) + completed_ratio = (999 - timestep) / 1000 + refiner_switch_at = cfg_denoiser.p.refiner_switch_at refiner_checkpoint_info = cfg_denoiser.p.refiner_checkpoint_info From bf348032bc07d48ec0b4fbb5be1c4648ee8bd49b Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Tue, 20 Feb 2024 16:59:28 -0500 Subject: [PATCH 084/135] fix missing arg --- modules/sd_samplers_common.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 8052b021a..045b9e2fe 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -155,7 +155,7 @@ def replace_torchsde_browinan(): replace_torchsde_browinan() -def apply_refiner(cfg_denoiser): +def apply_refiner(cfg_denoiser, sigma): if opts.refiner_switch_by_sample_steps: completed_ratio = cfg_denoiser.step / cfg_denoiser.total_steps else: From 9c1ece89784e36a86b19f371e3b6e60bb630394e Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Tue, 20 Feb 2024 19:23:21 -0500 Subject: [PATCH 085/135] Protect alphas_cumprod during refiner switchover --- modules/sd_samplers_common.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 6bd38e12a..c9578ffe6 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -181,8 +181,12 @@ def apply_refiner(cfg_denoiser): cfg_denoiser.p.extra_generation_params['Refiner'] = refiner_checkpoint_info.short_title cfg_denoiser.p.extra_generation_params['Refiner switch at'] = refiner_switch_at + alphas_cumprod_original = cfg_denoiser.p.sd_model.alphas_cumprod_original + alphas_cumprod = cfg_denoiser.p.sd_model.alphas_cumprod with sd_models.SkipWritingToConfig(): sd_models.reload_model_weights(info=refiner_checkpoint_info) + cfg_denoiser.p.sd_model.alphas_cumprod_original = alphas_cumprod_original + cfg_denoiser.p.sd_model.alphas_cumprod = alphas_cumprod devices.torch_gc() cfg_denoiser.p.setup_conds() From 64179c32213f986d1378b2f414be6ef86af1a82f Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Wed, 21 Feb 2024 22:50:43 +0800 Subject: [PATCH 086/135] Update network_oft.py --- extensions-builtin/Lora/network_oft.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index f14c183ae..ce931c620 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -72,7 +72,7 @@ class NetworkModuleOFT(network.NetworkModule): eye = torch.eye(self.block_size, device=oft_blocks.device) if not self.is_R: - block_Q = oft_blocks - oft_blocks.transpose(1, 2) # ensure skew-symmetric orthogonal matrix + block_Q = oft_blocks - oft_blocks.transpose(-1, -2) # ensure skew-symmetric orthogonal matrix norm_Q = torch.norm(block_Q.flatten()) new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device)) block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) From c4afdb7895a5a5224915b3c6f27f8e800e18ef41 Mon Sep 17 00:00:00 2001 From: Kohaku-Blueleaf <59680068+KohakuBlueleaf@users.noreply.github.com> Date: Thu, 22 Feb 2024 00:43:32 +0800 Subject: [PATCH 087/135] For no constraint --- extensions-builtin/Lora/network_oft.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index ce931c620..7821a8a7d 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -27,7 +27,7 @@ class NetworkModuleOFT(network.NetworkModule): # kohya-ss/New LyCORIS OFT/BOFT if "oft_blocks" in weights.w.keys(): self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size) - self.alpha = weights.w.get("alpha", self.alpha) # alpha is constraint + self.alpha = weights.w.get("alpha", None) # alpha is constraint self.dim = self.oft_blocks.shape[0] # lora dim # Old LyCORIS OFT elif "oft_diag" in weights.w.keys(): @@ -56,7 +56,7 @@ class NetworkModuleOFT(network.NetworkModule): self.num_blocks = self.dim self.block_size = self.out_dim // self.dim - self.constraint = (1 if self.alpha is None else self.alpha) * self.out_dim + self.constraint = (0 if self.alpha is None else self.alpha) * self.out_dim if self.is_R: self.constraint = None self.block_size = self.dim @@ -73,9 +73,10 @@ class NetworkModuleOFT(network.NetworkModule): if not self.is_R: block_Q = oft_blocks - oft_blocks.transpose(-1, -2) # ensure skew-symmetric orthogonal matrix - norm_Q = torch.norm(block_Q.flatten()) - new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device)) - block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) + if self.constraint != 0: + norm_Q = torch.norm(block_Q.flatten()) + new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device)) + block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) oft_blocks = torch.matmul(eye + block_Q, (eye - block_Q).float().inverse()) R = oft_blocks.to(orig_weight.device) From f537e5a519d080fd2b16d94d91e7fed8dd3fd680 Mon Sep 17 00:00:00 2001 From: dtlnor Date: Thu, 22 Feb 2024 12:26:57 +0900 Subject: [PATCH 088/135] fix #14591 - using translated content to do categories mapping --- javascript/settings.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/settings.js b/javascript/settings.js index e6009290a..b2d981c21 100644 --- a/javascript/settings.js +++ b/javascript/settings.js @@ -55,8 +55,8 @@ onOptionsChanged(function() { }); opts._categories.forEach(function(x) { - var section = x[0]; - var category = x[1]; + var section = localization[x[0]] ?? x[0]; + var category = localization[x[1]] ?? x[1]; var span = document.createElement('SPAN'); span.textContent = category; From 1da05297ea1850c6df5ef1f3d6a487d4bb4c50dd Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 22 Feb 2024 10:27:38 +0300 Subject: [PATCH 089/135] possible fix for reload button not appearing in some cases for extra networks. --- modules/ui_extra_networks.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index c03b9f081..6874a0244 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -472,7 +472,7 @@ class ExtraNetworksPage: return f"
    {res}
" - def create_card_view_html(self, tabname: str) -> str: + def create_card_view_html(self, tabname: str, *, none_message) -> str: """Generates HTML for the network Card View section for a tab. This HTML goes into the `extra-networks-pane.html`
with @@ -480,6 +480,7 @@ class ExtraNetworksPage: Args: tabname: The name of the active tab. + none_message: HTML text to show when there are no cards. Returns: HTML formatted string. @@ -490,24 +491,28 @@ class ExtraNetworksPage: if res == "": dirs = "".join([f"
  • {x}
  • " for x in self.allowed_directories_for_previews()]) - res = shared.html("extra-networks-no-cards.html").format(dirs=dirs) + res = none_message or shared.html("extra-networks-no-cards.html").format(dirs=dirs) return res - def create_html(self, tabname): + def create_html(self, tabname, *, empty=False): """Generates an HTML string for the current pane. The generated HTML uses `extra-networks-pane.html` as a template. Args: tabname: The name of the active tab. + empty: create an empty HTML page with no items Returns: HTML formatted string. """ self.lister.reset() self.metadata = {} - self.items = {x["name"]: x for x in self.list_items()} + + items_list = [] if empty else self.list_items() + self.items = {x["name"]: x for x in items_list} + # Populate the instance metadata for each item. for item in self.items.values(): metadata = item.get("metadata") @@ -536,7 +541,7 @@ class ExtraNetworksPage: "tree_view_btn_extra_class": tree_view_btn_extra_class, "tree_view_div_extra_class": tree_view_div_extra_class, "tree_html": self.create_tree_view_html(tabname), - "items_html": self.create_card_view_html(tabname), + "items_html": self.create_card_view_html(tabname, none_message="Loading..." if empty else None), } ) @@ -655,7 +660,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): pass elem_id = f"{tabname}_{page.extra_networks_tabname}_cards_html" - page_elem = gr.HTML('Loading...', elem_id=elem_id) + page_elem = gr.HTML(page.create_html(tabname, empty=True), elem_id=elem_id) ui.pages.append(page_elem) editor = page.create_user_metadata_editor(ui, tabname) editor.create_ui() From 052fbde3ac41cce7b97d6bd7aa67fb8922866575 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 22 Feb 2024 10:27:38 +0300 Subject: [PATCH 090/135] possible fix for reload button not appearing in some cases for extra networks. --- modules/ui_extra_networks.py | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index c03b9f081..6874a0244 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -472,7 +472,7 @@ class ExtraNetworksPage: return f"
      {res}
    " - def create_card_view_html(self, tabname: str) -> str: + def create_card_view_html(self, tabname: str, *, none_message) -> str: """Generates HTML for the network Card View section for a tab. This HTML goes into the `extra-networks-pane.html`
    with @@ -480,6 +480,7 @@ class ExtraNetworksPage: Args: tabname: The name of the active tab. + none_message: HTML text to show when there are no cards. Returns: HTML formatted string. @@ -490,24 +491,28 @@ class ExtraNetworksPage: if res == "": dirs = "".join([f"
  • {x}
  • " for x in self.allowed_directories_for_previews()]) - res = shared.html("extra-networks-no-cards.html").format(dirs=dirs) + res = none_message or shared.html("extra-networks-no-cards.html").format(dirs=dirs) return res - def create_html(self, tabname): + def create_html(self, tabname, *, empty=False): """Generates an HTML string for the current pane. The generated HTML uses `extra-networks-pane.html` as a template. Args: tabname: The name of the active tab. + empty: create an empty HTML page with no items Returns: HTML formatted string. """ self.lister.reset() self.metadata = {} - self.items = {x["name"]: x for x in self.list_items()} + + items_list = [] if empty else self.list_items() + self.items = {x["name"]: x for x in items_list} + # Populate the instance metadata for each item. for item in self.items.values(): metadata = item.get("metadata") @@ -536,7 +541,7 @@ class ExtraNetworksPage: "tree_view_btn_extra_class": tree_view_btn_extra_class, "tree_view_div_extra_class": tree_view_div_extra_class, "tree_html": self.create_tree_view_html(tabname), - "items_html": self.create_card_view_html(tabname), + "items_html": self.create_card_view_html(tabname, none_message="Loading..." if empty else None), } ) @@ -655,7 +660,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): pass elem_id = f"{tabname}_{page.extra_networks_tabname}_cards_html" - page_elem = gr.HTML('Loading...', elem_id=elem_id) + page_elem = gr.HTML(page.create_html(tabname, empty=True), elem_id=elem_id) ui.pages.append(page_elem) editor = page.create_user_metadata_editor(ui, tabname) editor.create_ui() From 85abbbb8fa8f983222e7fffec1e686c06cf4deae Mon Sep 17 00:00:00 2001 From: Andray Date: Thu, 22 Feb 2024 17:04:56 +0400 Subject: [PATCH 091/135] support resizable columns for touch (tablets) --- javascript/resizeHandle.js | 86 +++++++++++++++++++++++++------------- 1 file changed, 56 insertions(+), 30 deletions(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 8c5c51692..13f2b3719 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -65,21 +65,31 @@ resizeHandle.classList.add('resize-handle'); parent.insertBefore(resizeHandle, rightCol); - resizeHandle.addEventListener('mousedown', (evt) => { - if (evt.button !== 0) return; + ['mousedown', 'touchstart'].forEach((eventType) => { + resizeHandle.addEventListener(eventType, (evt) => { + if (eventType.startsWith('mouse')){ + if (evt.button !== 0) return; + } else { + if (evt.changedTouches.length !== 1) return; + } - evt.preventDefault(); - evt.stopPropagation(); + evt.preventDefault(); + evt.stopPropagation(); - document.body.classList.add('resizing'); + document.body.classList.add('resizing'); - R.tracking = true; - R.parent = parent; - R.parentWidth = parent.offsetWidth; - R.handle = resizeHandle; - R.leftCol = leftCol; - R.leftColStartWidth = leftCol.offsetWidth; - R.screenX = evt.screenX; + R.tracking = true; + R.parent = parent; + R.parentWidth = parent.offsetWidth; + R.handle = resizeHandle; + R.leftCol = leftCol; + R.leftColStartWidth = leftCol.offsetWidth; + if (eventType.startsWith('mouse')){ + R.screenX = evt.screenX; + } else { + R.screenX = evt.changedTouches[0].screenX; + } + }); }); resizeHandle.addEventListener('dblclick', (evt) => { @@ -92,30 +102,46 @@ afterResize(parent); } - window.addEventListener('mousemove', (evt) => { - if (evt.button !== 0) return; + ['mousemove', 'touchmove'].forEach((eventType) => { + window.addEventListener(eventType, (evt) => { + if (eventType.startsWith('mouse')){ + if (evt.button !== 0) return; + } else { + if (evt.changedTouches.length !== 1) return; + } - if (R.tracking) { - evt.preventDefault(); - evt.stopPropagation(); - - const delta = R.screenX - evt.screenX; - const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH); - setLeftColGridTemplate(R.parent, leftColWidth); - } + if (R.tracking) { + evt.preventDefault(); + evt.stopPropagation(); + + if (eventType.startsWith('mouse')){ + var delta = R.screenX - evt.screenX; + } else { + var delta = R.screenX - evt.changedTouches[0].screenX; + } + const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH); + setLeftColGridTemplate(R.parent, leftColWidth); + } + }); }); - window.addEventListener('mouseup', (evt) => { - if (evt.button !== 0) return; + ['mouseup', 'touchend'].forEach((eventType) => { + window.addEventListener(eventType, (evt) => { + if (eventType.startsWith('mouse')){ + if (evt.button !== 0) return; + } else { + if (evt.changedTouches.length !== 1) return; + } - if (R.tracking) { - evt.preventDefault(); - evt.stopPropagation(); + if (R.tracking) { + evt.preventDefault(); + evt.stopPropagation(); - R.tracking = false; + R.tracking = false; - document.body.classList.remove('resizing'); - } + document.body.classList.remove('resizing'); + } + }); }); From ab1e0fa9bff196b4fd6f4eef560218833e6bb387 Mon Sep 17 00:00:00 2001 From: Andray Date: Thu, 22 Feb 2024 17:16:16 +0400 Subject: [PATCH 092/135] fix lint and console warning --- javascript/resizeHandle.js | 19 +++++++++++-------- 1 file changed, 11 insertions(+), 8 deletions(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 13f2b3719..038f4cb06 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -67,7 +67,7 @@ ['mousedown', 'touchstart'].forEach((eventType) => { resizeHandle.addEventListener(eventType, (evt) => { - if (eventType.startsWith('mouse')){ + if (eventType.startsWith('mouse')) { if (evt.button !== 0) return; } else { if (evt.changedTouches.length !== 1) return; @@ -84,7 +84,7 @@ R.handle = resizeHandle; R.leftCol = leftCol; R.leftColStartWidth = leftCol.offsetWidth; - if (eventType.startsWith('mouse')){ + if (eventType.startsWith('mouse')) { R.screenX = evt.screenX; } else { R.screenX = evt.changedTouches[0].screenX; @@ -104,20 +104,23 @@ ['mousemove', 'touchmove'].forEach((eventType) => { window.addEventListener(eventType, (evt) => { - if (eventType.startsWith('mouse')){ + if (eventType.startsWith('mouse')) { if (evt.button !== 0) return; } else { if (evt.changedTouches.length !== 1) return; } if (R.tracking) { - evt.preventDefault(); + if (eventType.startsWith('mouse')) { + evt.preventDefault(); + } evt.stopPropagation(); - if (eventType.startsWith('mouse')){ - var delta = R.screenX - evt.screenX; + let delta = 0; + if (eventType.startsWith('mouse')) { + delta = R.screenX - evt.screenX; } else { - var delta = R.screenX - evt.changedTouches[0].screenX; + delta = R.screenX - evt.changedTouches[0].screenX; } const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH); setLeftColGridTemplate(R.parent, leftColWidth); @@ -127,7 +130,7 @@ ['mouseup', 'touchend'].forEach((eventType) => { window.addEventListener(eventType, (evt) => { - if (eventType.startsWith('mouse')){ + if (eventType.startsWith('mouse')) { if (evt.button !== 0) return; } else { if (evt.changedTouches.length !== 1) return; From 58985e6b372de408150fcd2dbcd6c6d5a17a3f58 Mon Sep 17 00:00:00 2001 From: Andray Date: Thu, 22 Feb 2024 17:22:00 +0400 Subject: [PATCH 093/135] fix lint 2 --- javascript/resizeHandle.js | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 038f4cb06..f22aa51de 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -115,7 +115,7 @@ evt.preventDefault(); } evt.stopPropagation(); - + let delta = 0; if (eventType.startsWith('mouse')) { delta = R.screenX - evt.screenX; From 3f18a09c8638cfd69848a9f39d1841848b57d036 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 22 Feb 2024 21:27:10 +0300 Subject: [PATCH 094/135] make extra network card description plaintext by default, with an option to re-enable HTML as it was --- modules/shared_options.py | 1 + modules/ui_extra_networks.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index bb3752ba6..64f8f1967 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -254,6 +254,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks", "s "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), + "extra_networks_card_description_is_html": OptionInfo(False, "Treat card description as HTML"), "extra_networks_card_order_field": OptionInfo("Path", "Default order field for Extra Networks cards", gr.Dropdown, {"choices": ['Path', 'Name', 'Date Created', 'Date Modified']}).needs_reload_ui(), "extra_networks_card_order": OptionInfo("Ascending", "Default order for Extra Networks cards", gr.Dropdown, {"choices": ['Ascending', 'Descending']}).needs_reload_ui(), "extra_networks_tree_view_default_enabled": OptionInfo(False, "Enables the Extra Networks directory tree view by default").needs_reload_ui(), diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 6874a0244..34c46ed40 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -289,12 +289,16 @@ class ExtraNetworksPage: } ) + description = (item.get("description", "") or "" if shared.opts.extra_networks_card_show_desc else "") + if not shared.opts.extra_networks_card_description_is_html: + description = html.escape(description) + # Some items here might not be used depending on HTML template used. args = { "background_image": background_image, "card_clicked": onclick, "copy_path_button": btn_copy_path, - "description": (item.get("description", "") or "" if shared.opts.extra_networks_card_show_desc else ""), + "description": description, "edit_button": btn_edit_item, "local_preview": quote_js(item["local_preview"]), "metadata_button": btn_metadata, From 726aaea0fe87b40e983be73aa3c2fcd29aaf9588 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 22 Feb 2024 21:27:10 +0300 Subject: [PATCH 095/135] make extra network card description plaintext by default, with an option to re-enable HTML as it was --- modules/shared_options.py | 1 + modules/ui_extra_networks.py | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/modules/shared_options.py b/modules/shared_options.py index bb3752ba6..64f8f1967 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -254,6 +254,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks", "s "extra_networks_card_height": OptionInfo(0, "Card height for Extra Networks").info("in pixels"), "extra_networks_card_text_scale": OptionInfo(1.0, "Card text scale", gr.Slider, {"minimum": 0.0, "maximum": 2.0, "step": 0.01}).info("1 = original size"), "extra_networks_card_show_desc": OptionInfo(True, "Show description on card"), + "extra_networks_card_description_is_html": OptionInfo(False, "Treat card description as HTML"), "extra_networks_card_order_field": OptionInfo("Path", "Default order field for Extra Networks cards", gr.Dropdown, {"choices": ['Path', 'Name', 'Date Created', 'Date Modified']}).needs_reload_ui(), "extra_networks_card_order": OptionInfo("Ascending", "Default order for Extra Networks cards", gr.Dropdown, {"choices": ['Ascending', 'Descending']}).needs_reload_ui(), "extra_networks_tree_view_default_enabled": OptionInfo(False, "Enables the Extra Networks directory tree view by default").needs_reload_ui(), diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 6874a0244..34c46ed40 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -289,12 +289,16 @@ class ExtraNetworksPage: } ) + description = (item.get("description", "") or "" if shared.opts.extra_networks_card_show_desc else "") + if not shared.opts.extra_networks_card_description_is_html: + description = html.escape(description) + # Some items here might not be used depending on HTML template used. args = { "background_image": background_image, "card_clicked": onclick, "copy_path_button": btn_copy_path, - "description": (item.get("description", "") or "" if shared.opts.extra_networks_card_show_desc else ""), + "description": description, "edit_button": btn_edit_item, "local_preview": quote_js(item["local_preview"]), "metadata_button": btn_metadata, From b53989b195485430cc8083fad7508e201a00123a Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 22 Feb 2024 21:29:10 +0300 Subject: [PATCH 096/135] update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index c7bfc1c98..84d9c485e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -165,6 +165,7 @@ * add FP32 fallback support on sd_vae_approx ([#14046](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14046)) * support XYZ scripts / split hires path from unet ([#14126](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14126)) * allow use of mutiple styles csv files ([#14125](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14125)) +* make extra network card description plaintext by default, with an option (Treat card description as HTML) to re-enable HTML as it was (originally by [#13241](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/13241)) ### Extensions and API: * update gradio to 3.41.2 From 6bc35be10a025fdea649d0a948f076760581728f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 22 Feb 2024 21:29:58 +0300 Subject: [PATCH 097/135] update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 84d9c485e..a8eaca962 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -99,6 +99,7 @@ * Gracefully handle mtime read exception from cache ([#14933](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14933)) * Only trigger interrupt on `Esc` when interrupt button visible ([#14932](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14932)) * Disable prompt token counters option actually disables token counting rather than just hiding results. +* avoid doble upscaling in inpaint ([#14966](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14966)) ### Other: * Assign id for "extra_options". Replace numeric field with slider. ([#14270](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14270)) From dfab42c949428c759f8ce3dcd3e492f3b6063e3a Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 22 Feb 2024 22:59:26 +0300 Subject: [PATCH 098/135] Merge pull request #15002 from light-and-ray/support_resizable_columns_for_touch_(tablets) support resizable columns for touch (tablets) --- javascript/resizeHandle.js | 87 +++++++++++++++++++++++++------------- 1 file changed, 58 insertions(+), 29 deletions(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 8c5c51692..f22aa51de 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -65,21 +65,31 @@ resizeHandle.classList.add('resize-handle'); parent.insertBefore(resizeHandle, rightCol); - resizeHandle.addEventListener('mousedown', (evt) => { - if (evt.button !== 0) return; + ['mousedown', 'touchstart'].forEach((eventType) => { + resizeHandle.addEventListener(eventType, (evt) => { + if (eventType.startsWith('mouse')) { + if (evt.button !== 0) return; + } else { + if (evt.changedTouches.length !== 1) return; + } - evt.preventDefault(); - evt.stopPropagation(); + evt.preventDefault(); + evt.stopPropagation(); - document.body.classList.add('resizing'); + document.body.classList.add('resizing'); - R.tracking = true; - R.parent = parent; - R.parentWidth = parent.offsetWidth; - R.handle = resizeHandle; - R.leftCol = leftCol; - R.leftColStartWidth = leftCol.offsetWidth; - R.screenX = evt.screenX; + R.tracking = true; + R.parent = parent; + R.parentWidth = parent.offsetWidth; + R.handle = resizeHandle; + R.leftCol = leftCol; + R.leftColStartWidth = leftCol.offsetWidth; + if (eventType.startsWith('mouse')) { + R.screenX = evt.screenX; + } else { + R.screenX = evt.changedTouches[0].screenX; + } + }); }); resizeHandle.addEventListener('dblclick', (evt) => { @@ -92,30 +102,49 @@ afterResize(parent); } - window.addEventListener('mousemove', (evt) => { - if (evt.button !== 0) return; + ['mousemove', 'touchmove'].forEach((eventType) => { + window.addEventListener(eventType, (evt) => { + if (eventType.startsWith('mouse')) { + if (evt.button !== 0) return; + } else { + if (evt.changedTouches.length !== 1) return; + } - if (R.tracking) { - evt.preventDefault(); - evt.stopPropagation(); + if (R.tracking) { + if (eventType.startsWith('mouse')) { + evt.preventDefault(); + } + evt.stopPropagation(); - const delta = R.screenX - evt.screenX; - const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH); - setLeftColGridTemplate(R.parent, leftColWidth); - } + let delta = 0; + if (eventType.startsWith('mouse')) { + delta = R.screenX - evt.screenX; + } else { + delta = R.screenX - evt.changedTouches[0].screenX; + } + const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH); + setLeftColGridTemplate(R.parent, leftColWidth); + } + }); }); - window.addEventListener('mouseup', (evt) => { - if (evt.button !== 0) return; + ['mouseup', 'touchend'].forEach((eventType) => { + window.addEventListener(eventType, (evt) => { + if (eventType.startsWith('mouse')) { + if (evt.button !== 0) return; + } else { + if (evt.changedTouches.length !== 1) return; + } - if (R.tracking) { - evt.preventDefault(); - evt.stopPropagation(); + if (R.tracking) { + evt.preventDefault(); + evt.stopPropagation(); - R.tracking = false; + R.tracking = false; - document.body.classList.remove('resizing'); - } + document.body.classList.remove('resizing'); + } + }); }); From 3069716510c8ae9a95b2d04061c3f86f67d1089c Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Thu, 22 Feb 2024 23:00:14 +0300 Subject: [PATCH 099/135] update changelog --- CHANGELOG.md | 1 + 1 file changed, 1 insertion(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index a8eaca962..9d04dd527 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -41,6 +41,7 @@ * When counting tokens, also include enabled styles (can be disabled in settings to revert to previous behavior) * Configuration for the [📂] button for image gallery ([#14947](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14947)) * Support inference with LyCORIS BOFT networks ([#14871](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14871)) +* support resizable columns for touch (tablets) ([#15002](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15002)) ### Extensions and API: * Removed packages from requirements: basicsr, gfpgan, realesrgan; as well as their dependencies: absl-py, addict, beautifulsoup4, future, gdown, grpcio, importlib-metadata, lmdb, lpips, Markdown, platformdirs, PySocks, soupsieve, tb-nightly, tensorboard-data-server, tomli, Werkzeug, yapf, zipp, soupsieve From 9211febbfc9ce45bdd2dc33e73939d67924c3f1e Mon Sep 17 00:00:00 2001 From: Andray Date: Fri, 23 Feb 2024 02:20:42 +0400 Subject: [PATCH 100/135] ResizeHandleRow - allow overriden column scale parametr --- javascript/resizeHandle.js | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index f22aa51de..cd3e68c6c 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -1,6 +1,5 @@ (function() { const GRADIO_MIN_WIDTH = 320; - const GRID_TEMPLATE_COLUMNS = '1fr 16px 1fr'; const PAD = 16; const DEBOUNCE_TIME = 100; @@ -37,7 +36,7 @@ } function afterResize(parent) { - if (displayResizeHandle(parent) && parent.style.gridTemplateColumns != GRID_TEMPLATE_COLUMNS) { + if (displayResizeHandle(parent) && parent.style.gridTemplateColumns != parent.style.originalGridTemplateColumns) { const oldParentWidth = R.parentWidth; const newParentWidth = parent.offsetWidth; const widthL = parseInt(parent.style.gridTemplateColumns.split(' ')[0]); @@ -59,7 +58,9 @@ parent.style.display = 'grid'; parent.style.gap = '0'; - parent.style.gridTemplateColumns = GRID_TEMPLATE_COLUMNS; + const gridTemplateColumns = `${parent.children[0].style.flexGrow}fr ${PAD}px ${parent.children[1].style.flexGrow}fr`; + parent.style.gridTemplateColumns = gridTemplateColumns; + parent.style.originalGridTemplateColumns = gridTemplateColumns; const resizeHandle = document.createElement('div'); resizeHandle.classList.add('resize-handle'); @@ -96,7 +97,7 @@ evt.preventDefault(); evt.stopPropagation(); - parent.style.gridTemplateColumns = GRID_TEMPLATE_COLUMNS; + parent.style.gridTemplateColumns = parent.style.originalGridTemplateColumns; }); afterResize(parent); From ed594d7ba69cf065222348f5aabc0374525d8ad5 Mon Sep 17 00:00:00 2001 From: DB Eriospermum Date: Fri, 23 Feb 2024 13:37:37 +0800 Subject: [PATCH 101/135] fix: the `split_threshold` parameter does not work when running Split oversized images --- scripts/postprocessing_split_oversized.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/postprocessing_split_oversized.py b/scripts/postprocessing_split_oversized.py index c4a03160f..133e199b8 100644 --- a/scripts/postprocessing_split_oversized.py +++ b/scripts/postprocessing_split_oversized.py @@ -61,7 +61,7 @@ class ScriptPostprocessingSplitOversized(scripts_postprocessing.ScriptPostproces ratio = (pp.image.height * width) / (pp.image.width * height) inverse_xy = True - if ratio >= 1.0 and ratio > split_threshold: + if ratio >= 1.0 or ratio > split_threshold: return result, *others = split_pic(pp.image, inverse_xy, width, height, overlap_ratio) From bab918f049dd42f53eebc241ad27607ca63cc57b Mon Sep 17 00:00:00 2001 From: Andray Date: Fri, 23 Feb 2024 18:34:24 +0400 Subject: [PATCH 102/135] fix resize-handle for vertical layout --- javascript/resizeHandle.js | 2 ++ 1 file changed, 2 insertions(+) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index f22aa51de..a3164b4ff 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -23,12 +23,14 @@ function displayResizeHandle(parent) { if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) { parent.style.display = 'flex'; + parent.querySelector('.resize-handle').style.display = "none"; if (R.handle != null) { R.handle.style.opacity = '0'; } return false; } else { parent.style.display = 'grid'; + parent.querySelector('.resize-handle').style.display = 'block'; if (R.handle != null) { R.handle.style.opacity = '100'; } From 3a99824638027ff84cf6c4af3421741cc091e617 Mon Sep 17 00:00:00 2001 From: Andray Date: Fri, 23 Feb 2024 20:26:56 +0400 Subject: [PATCH 103/135] register_tmp_file also with mtime --- modules/ui_tempdir.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 621ed1eca..ecd6bdec3 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -35,7 +35,9 @@ def save_pil_to_file(self, pil_image, dir=None, format="png"): already_saved_as = getattr(pil_image, 'already_saved_as', None) if already_saved_as and os.path.isfile(already_saved_as): register_tmp_file(shared.demo, already_saved_as) - return f'{already_saved_as}?{os.path.getmtime(already_saved_as)}' + filename_with_mtime = f'{already_saved_as}?{os.path.getmtime(already_saved_as)}' + register_tmp_file(shared.demo, filename_with_mtime) + return filename_with_mtime if shared.opts.temp_dir != "": dir = shared.opts.temp_dir From 648f6a8e0cdf5881cbec9697792e6294c54422d4 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Sun, 25 Feb 2024 23:28:36 -0500 Subject: [PATCH 104/135] dont need to preserve alphas_cumprod_original --- modules/sd_samplers_common.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index c9578ffe6..7ab1bf65a 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -181,11 +181,9 @@ def apply_refiner(cfg_denoiser): cfg_denoiser.p.extra_generation_params['Refiner'] = refiner_checkpoint_info.short_title cfg_denoiser.p.extra_generation_params['Refiner switch at'] = refiner_switch_at - alphas_cumprod_original = cfg_denoiser.p.sd_model.alphas_cumprod_original alphas_cumprod = cfg_denoiser.p.sd_model.alphas_cumprod with sd_models.SkipWritingToConfig(): sd_models.reload_model_weights(info=refiner_checkpoint_info) - cfg_denoiser.p.sd_model.alphas_cumprod_original = alphas_cumprod_original cfg_denoiser.p.sd_model.alphas_cumprod = alphas_cumprod devices.torch_gc() From a10c8df8761c01801bac60d7977ae7e997ab51b0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 26 Feb 2024 07:12:12 +0300 Subject: [PATCH 105/135] Merge pull request #14973 from AUTOMATIC1111/Fix-new-oft-boft Fix the OFT/BOFT bugs when using new LyCORIS implementation --- extensions-builtin/Lora/network_oft.py | 50 +++++++++++++------------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/extensions-builtin/Lora/network_oft.py b/extensions-builtin/Lora/network_oft.py index d658ad109..7821a8a7d 100644 --- a/extensions-builtin/Lora/network_oft.py +++ b/extensions-builtin/Lora/network_oft.py @@ -1,6 +1,5 @@ import torch import network -from lyco_helpers import factorization from einops import rearrange @@ -22,24 +21,24 @@ class NetworkModuleOFT(network.NetworkModule): self.org_module: list[torch.Module] = [self.sd_module] self.scale = 1.0 - self.is_kohya = False + self.is_R = False self.is_boft = False - # kohya-ss + # kohya-ss/New LyCORIS OFT/BOFT if "oft_blocks" in weights.w.keys(): - self.is_kohya = True self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size) - self.alpha = weights.w["alpha"] # alpha is constraint + self.alpha = weights.w.get("alpha", None) # alpha is constraint self.dim = self.oft_blocks.shape[0] # lora dim - # LyCORIS OFT + # Old LyCORIS OFT elif "oft_diag" in weights.w.keys(): + self.is_R = True self.oft_blocks = weights.w["oft_diag"] # self.alpha is unused self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size) - # LyCORIS BOFT - if weights.w["oft_diag"].dim() == 4: - self.is_boft = True + # LyCORIS BOFT + if self.oft_blocks.dim() == 4: + self.is_boft = True self.rescale = weights.w.get('rescale', None) if self.rescale is not None: self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1)) @@ -55,30 +54,29 @@ class NetworkModuleOFT(network.NetworkModule): elif is_other_linear: self.out_dim = self.sd_module.embed_dim - if self.is_kohya: - self.constraint = self.alpha * self.out_dim - self.num_blocks = self.dim - self.block_size = self.out_dim // self.dim + self.num_blocks = self.dim + self.block_size = self.out_dim // self.dim + self.constraint = (0 if self.alpha is None else self.alpha) * self.out_dim + if self.is_R: + self.constraint = None + self.block_size = self.dim + self.num_blocks = self.out_dim // self.dim elif self.is_boft: - self.constraint = None - self.boft_m = weights.w["oft_diag"].shape[0] - self.block_num = weights.w["oft_diag"].shape[1] - self.block_size = weights.w["oft_diag"].shape[2] + self.boft_m = self.oft_blocks.shape[0] + self.num_blocks = self.oft_blocks.shape[1] + self.block_size = self.oft_blocks.shape[2] self.boft_b = self.block_size - #self.block_size, self.block_num = butterfly_factor(self.out_dim, self.dim) - else: - self.constraint = None - self.block_size, self.num_blocks = factorization(self.out_dim, self.dim) def calc_updown(self, orig_weight): oft_blocks = self.oft_blocks.to(orig_weight.device) eye = torch.eye(self.block_size, device=oft_blocks.device) - if self.is_kohya: - block_Q = oft_blocks - oft_blocks.transpose(1, 2) # ensure skew-symmetric orthogonal matrix - norm_Q = torch.norm(block_Q.flatten()) - new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device)) - block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) + if not self.is_R: + block_Q = oft_blocks - oft_blocks.transpose(-1, -2) # ensure skew-symmetric orthogonal matrix + if self.constraint != 0: + norm_Q = torch.norm(block_Q.flatten()) + new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device)) + block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8)) oft_blocks = torch.matmul(eye + block_Q, (eye - block_Q).float().inverse()) R = oft_blocks.to(orig_weight.device) From 78e421e4ea833e163367f26232656256ddc0bbc8 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 26 Feb 2024 07:12:31 +0300 Subject: [PATCH 106/135] Merge pull request #14995 from dtlnor/14591-bug-the-categories-layout-is-different-when-localization-is-on Fix #14591 using translated content to do categories mapping --- javascript/settings.js | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/settings.js b/javascript/settings.js index e6009290a..b2d981c21 100644 --- a/javascript/settings.js +++ b/javascript/settings.js @@ -55,8 +55,8 @@ onOptionsChanged(function() { }); opts._categories.forEach(function(x) { - var section = x[0]; - var category = x[1]; + var section = localization[x[0]] ?? x[0]; + var category = localization[x[1]] ?? x[1]; var span = document.createElement('SPAN'); span.textContent = category; From e651ca8adb00b14704aa564dbeb0bef5f8698f0a Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 26 Feb 2024 07:16:24 +0300 Subject: [PATCH 107/135] Merge pull request #15004 from light-and-ray/ResizeHandleRow_-_allow_overriden_column_scale_parametr ResizeHandleRow - allow overriden column scale parametr --- javascript/resizeHandle.js | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index f22aa51de..cd3e68c6c 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -1,6 +1,5 @@ (function() { const GRADIO_MIN_WIDTH = 320; - const GRID_TEMPLATE_COLUMNS = '1fr 16px 1fr'; const PAD = 16; const DEBOUNCE_TIME = 100; @@ -37,7 +36,7 @@ } function afterResize(parent) { - if (displayResizeHandle(parent) && parent.style.gridTemplateColumns != GRID_TEMPLATE_COLUMNS) { + if (displayResizeHandle(parent) && parent.style.gridTemplateColumns != parent.style.originalGridTemplateColumns) { const oldParentWidth = R.parentWidth; const newParentWidth = parent.offsetWidth; const widthL = parseInt(parent.style.gridTemplateColumns.split(' ')[0]); @@ -59,7 +58,9 @@ parent.style.display = 'grid'; parent.style.gap = '0'; - parent.style.gridTemplateColumns = GRID_TEMPLATE_COLUMNS; + const gridTemplateColumns = `${parent.children[0].style.flexGrow}fr ${PAD}px ${parent.children[1].style.flexGrow}fr`; + parent.style.gridTemplateColumns = gridTemplateColumns; + parent.style.originalGridTemplateColumns = gridTemplateColumns; const resizeHandle = document.createElement('div'); resizeHandle.classList.add('resize-handle'); @@ -96,7 +97,7 @@ evt.preventDefault(); evt.stopPropagation(); - parent.style.gridTemplateColumns = GRID_TEMPLATE_COLUMNS; + parent.style.gridTemplateColumns = parent.style.originalGridTemplateColumns; }); afterResize(parent); From f97e3548e54cfc914c195d6d4d48792e4d021261 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 26 Feb 2024 07:16:42 +0300 Subject: [PATCH 108/135] Merge pull request #15006 from imnodb/master fix: the `split_threshold` parameter does not work when running Split oversized images --- scripts/postprocessing_split_oversized.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/postprocessing_split_oversized.py b/scripts/postprocessing_split_oversized.py index c4a03160f..133e199b8 100644 --- a/scripts/postprocessing_split_oversized.py +++ b/scripts/postprocessing_split_oversized.py @@ -61,7 +61,7 @@ class ScriptPostprocessingSplitOversized(scripts_postprocessing.ScriptPostproces ratio = (pp.image.height * width) / (pp.image.width * height) inverse_xy = True - if ratio >= 1.0 and ratio > split_threshold: + if ratio >= 1.0 or ratio > split_threshold: return result, *others = split_pic(pp.image, inverse_xy, width, height, overlap_ratio) From eaf5e0289a2e71d39322c6258a1e2d7dc1fd2e34 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 26 Feb 2024 07:37:26 +0300 Subject: [PATCH 109/135] update cahngelog --- CHANGELOG.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9d04dd527..8fa3c47c5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,7 +40,7 @@ * Add an option to disable normalization after calculating emphasis. ([#14874](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14874)) * When counting tokens, also include enabled styles (can be disabled in settings to revert to previous behavior) * Configuration for the [📂] button for image gallery ([#14947](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14947)) -* Support inference with LyCORIS BOFT networks ([#14871](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14871)) +* Support inference with LyCORIS BOFT networks ([#14871](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14871), [#14973](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14973)) * support resizable columns for touch (tablets) ([#15002](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15002)) ### Extensions and API: @@ -59,6 +59,7 @@ * modules/api/api.py: add api endpoint to refresh embeddings list ([#14715](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14715)) * set_named_arg ([#14773](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14773)) * add before_token_counter callback and use it for prompt comments +* ResizeHandleRow - allow overriden column scale parameter ([#15004](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15004)) ### Performance * Massive performance improvement for extra networks directories with a huge number of files in them in an attempt to tackle #14507 ([#14528](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14528)) @@ -101,6 +102,8 @@ * Only trigger interrupt on `Esc` when interrupt button visible ([#14932](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14932)) * Disable prompt token counters option actually disables token counting rather than just hiding results. * avoid doble upscaling in inpaint ([#14966](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14966)) +* Fix #14591 using translated content to do categories mapping ([#14995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14995)) +* fix: the `split_threshold` parameter does not work when running Split oversized images ([#15006](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15006)) ### Other: * Assign id for "extra_options". Replace numeric field with slider. ([#14270](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14270)) From 6e6cc2922d39fff4029d47c316c22a1c152680ce Mon Sep 17 00:00:00 2001 From: Andray Date: Mon, 26 Feb 2024 13:37:29 +0400 Subject: [PATCH 110/135] fix resize handle --- javascript/resizeHandle.js | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index a3164b4ff..ce67ca672 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -23,17 +23,11 @@ function displayResizeHandle(parent) { if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) { parent.style.display = 'flex'; - parent.querySelector('.resize-handle').style.display = "none"; - if (R.handle != null) { - R.handle.style.opacity = '0'; - } + parent.resizeHandle.style.display = "none"; return false; } else { parent.style.display = 'grid'; - parent.querySelector('.resize-handle').style.display = 'block'; - if (R.handle != null) { - R.handle.style.opacity = '100'; - } + parent.resizeHandle.style.display = "block"; return true; } } @@ -66,6 +60,7 @@ const resizeHandle = document.createElement('div'); resizeHandle.classList.add('resize-handle'); parent.insertBefore(resizeHandle, rightCol); + parent.resizeHandle = resizeHandle; ['mousedown', 'touchstart'].forEach((eventType) => { resizeHandle.addEventListener(eventType, (evt) => { @@ -83,7 +78,6 @@ R.tracking = true; R.parent = parent; R.parentWidth = parent.offsetWidth; - R.handle = resizeHandle; R.leftCol = leftCol; R.leftColStartWidth = leftCol.offsetWidth; if (eventType.startsWith('mouse')) { From dd4b0b95d5a59fa96759e5eb3937c9d268ebc2b9 Mon Sep 17 00:00:00 2001 From: Andray Date: Mon, 26 Feb 2024 16:30:15 +0400 Subject: [PATCH 111/135] cmd args: allow unix filenames and filenames max length --- modules/cmd_args.py | 4 +++- modules/images.py | 7 +++++-- 2 files changed, 8 insertions(+), 3 deletions(-) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 312dabffc..be7a59873 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -120,4 +120,6 @@ parser.add_argument('--api-server-stop', action='store_true', help='enable serve parser.add_argument('--timeout-keep-alive', type=int, default=30, help='set timeout_keep_alive for uvicorn') parser.add_argument("--disable-all-extensions", action='store_true', help="prevent all extensions from running regardless of any other settings", default=False) parser.add_argument("--disable-extra-extensions", action='store_true', help="prevent all extensions except built-in from running regardless of any other settings", default=False) -parser.add_argument("--skip-load-model-at-start", action='store_true', help="if load a model at web start, only take effect when --nowebui", ) +parser.add_argument("--skip-load-model-at-start", action='store_true', help="if load a model at web start, only take effect when --nowebui") +parser.add_argument("--unix-filenames-sanitization", action='store_true', help="allow any symbols except '/' in filenames. May conflict with your browser and file system") +parser.add_argument("--filenames-max-length", type=int, default=128, help='maximal length of filenames of saved images. If you override it, it can conflict with your file system') diff --git a/modules/images.py b/modules/images.py index b6f2358c3..e7d111723 100644 --- a/modules/images.py +++ b/modules/images.py @@ -321,13 +321,16 @@ def resize_image(resize_mode, im, width, height, upscaler_name=None): return res -invalid_filename_chars = '#<>:"/\\|?*\n\r\t' +if not shared.cmd_opts.unix_filenames_sanitization: + invalid_filename_chars = '#<>:"/\\|?*\n\r\t' +else: + invalid_filename_chars = '/' invalid_filename_prefix = ' ' invalid_filename_postfix = ' .' re_nonletters = re.compile(r'[\s' + string.punctuation + ']+') re_pattern = re.compile(r"(.*?)(?:\[([^\[\]]+)\]|$)") re_pattern_arg = re.compile(r"(.*)<([^>]*)>$") -max_filename_part_length = 128 +max_filename_part_length = shared.cmd_opts.filenames_max_length NOTHING_AND_SKIP_PREVIOUS_TEXT = object() From 3a618e3d24394aef0f8682ded713ef1b6c265553 Mon Sep 17 00:00:00 2001 From: catboxanon <122327233+catboxanon@users.noreply.github.com> Date: Mon, 26 Feb 2024 12:44:57 -0500 Subject: [PATCH 112/135] Fix normalized filepath, resolve -> absolute https://github.com/lllyasviel/stable-diffusion-webui-forge/issues/313 https://github.com/AUTOMATIC1111/stable-diffusion-webui/discussions/14942#discussioncomment-8550050 --- modules/paths_internal.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/paths_internal.py b/modules/paths_internal.py index 2ed1392a4..6058b0cde 100644 --- a/modules/paths_internal.py +++ b/modules/paths_internal.py @@ -7,7 +7,7 @@ import shlex from pathlib import Path -normalized_filepath = lambda filepath: str(Path(filepath).resolve()) +normalized_filepath = lambda filepath: str(Path(filepath).absolute()) commandline_args = os.environ.get('COMMANDLINE_ARGS', "") sys.argv += shlex.split(commandline_args) From e2cd92ea230801ecc5fc7ed90e14ab55c946fb4a Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Mon, 26 Feb 2024 23:43:27 -0500 Subject: [PATCH 113/135] move refiner fix to sd_models.py --- modules/sd_models.py | 32 ++++++++++++++++++++++++++++++++ 1 file changed, 32 insertions(+) diff --git a/modules/sd_models.py b/modules/sd_models.py index 2c0457715..fbd53adba 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -15,6 +15,7 @@ from ldm.util import instantiate_from_config from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack, patches from modules.timer import Timer +from modules.shared import opts import tomesd import numpy as np @@ -549,6 +550,36 @@ def repair_config(sd_config): karlo_path = os.path.join(paths.models_path, 'karlo') sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path) +def apply_alpha_schedule_override(sd_model, p=None): + def rescale_zero_terminal_snr_abar(alphas_cumprod): + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= (alphas_bar_sqrt_T) + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas_bar[-1] = 4.8973451890853435e-08 + return alphas_bar + + if hasattr(sd_model, 'alphas_cumprod') and hasattr(sd_model, 'alphas_cumprod_original'): + sd_model.alphas_cumprod = sd_model.alphas_cumprod_original.to(shared.device) + + if opts.use_downcasted_alpha_bar: + if p is not None: + p.extra_generation_params['Downcast alphas_cumprod'] = opts.use_downcasted_alpha_bar + sd_model.alphas_cumprod = sd_model.alphas_cumprod.half().to(shared.device) + if opts.sd_noise_schedule == "Zero Terminal SNR": + if p is not None: + p.extra_generation_params['Noise Schedule'] = opts.sd_noise_schedule + sd_model.alphas_cumprod = rescale_zero_terminal_snr_abar(sd_model.alphas_cumprod).to(shared.device) sd1_clip_weight = 'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight' sd2_clip_weight = 'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight' @@ -812,6 +843,7 @@ def reload_model_weights(sd_model=None, info=None, forced_reload=False): sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer) if not forced_reload and sd_model is not None and sd_model.sd_checkpoint_info.filename == checkpoint_info.filename: + apply_alpha_schedule_override(sd_model) return sd_model if sd_model is not None: From 94f23d00a76e7988f4b73ced1fa2922801e893fb Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Mon, 26 Feb 2024 23:44:58 -0500 Subject: [PATCH 114/135] move alphas cumprod override out of processing --- modules/processing.py | 28 +--------------------------- 1 file changed, 1 insertion(+), 27 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index d208a922d..411c7c3f4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -915,33 +915,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.n_iter > 1: shared.state.job = f"Batch {n+1} out of {p.n_iter}" - def rescale_zero_terminal_snr_abar(alphas_cumprod): - alphas_bar_sqrt = alphas_cumprod.sqrt() - - # Store old values. - alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() - alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() - - # Shift so the last timestep is zero. - alphas_bar_sqrt -= (alphas_bar_sqrt_T) - - # Scale so the first timestep is back to the old value. - alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) - - # Convert alphas_bar_sqrt to betas - alphas_bar = alphas_bar_sqrt**2 # Revert sqrt - alphas_bar[-1] = 4.8973451890853435e-08 - return alphas_bar - - if hasattr(p.sd_model, 'alphas_cumprod') and hasattr(p.sd_model, 'alphas_cumprod_original'): - p.sd_model.alphas_cumprod = p.sd_model.alphas_cumprod_original.to(shared.device) - - if opts.use_downcasted_alpha_bar: - p.extra_generation_params['Downcast alphas_cumprod'] = opts.use_downcasted_alpha_bar - p.sd_model.alphas_cumprod = p.sd_model.alphas_cumprod.half().to(shared.device) - if opts.sd_noise_schedule == "Zero Terminal SNR": - p.extra_generation_params['Noise Schedule'] = opts.sd_noise_schedule - p.sd_model.alphas_cumprod = rescale_zero_terminal_snr_abar(p.sd_model.alphas_cumprod).to(shared.device) + sd_models.apply_alpha_schedule_override(p.sd_model, p) with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(): samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) From 4dae91a1fe960ad9a9774f8f5407ef67c1a109f9 Mon Sep 17 00:00:00 2001 From: drhead <1313496+drhead@users.noreply.github.com> Date: Mon, 26 Feb 2024 23:46:10 -0500 Subject: [PATCH 115/135] remove alphas cumprod fix from samplers_common --- modules/sd_samplers_common.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 7ab1bf65a..6bd38e12a 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -181,10 +181,8 @@ def apply_refiner(cfg_denoiser): cfg_denoiser.p.extra_generation_params['Refiner'] = refiner_checkpoint_info.short_title cfg_denoiser.p.extra_generation_params['Refiner switch at'] = refiner_switch_at - alphas_cumprod = cfg_denoiser.p.sd_model.alphas_cumprod with sd_models.SkipWritingToConfig(): sd_models.reload_model_weights(info=refiner_checkpoint_info) - cfg_denoiser.p.sd_model.alphas_cumprod = alphas_cumprod devices.torch_gc() cfg_denoiser.p.setup_conds() From 3ba575216a8e7df307562ba8bc68a8717798daef Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 27 Feb 2024 15:10:51 +0400 Subject: [PATCH 116/135] dat cmd flag --- modules/cmd_args.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 312dabffc..213cba98c 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -53,6 +53,7 @@ parser.add_argument("--gfpgan-models-path", type=normalized_filepath, help="Path parser.add_argument("--esrgan-models-path", type=normalized_filepath, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN')) parser.add_argument("--bsrgan-models-path", type=normalized_filepath, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN')) parser.add_argument("--realesrgan-models-path", type=normalized_filepath, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN')) +parser.add_argument("--dat-models-path", type=normalized_filepath, help="Path to directory with DAT model file(s).", default=os.path.join(models_path, 'DAT')) parser.add_argument("--clip-models-path", type=normalized_filepath, help="Path to directory with CLIP model file(s).", default=None) parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers") parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work") From 44bce3c74ee745b9776d965e02ae006e6b4fe3fb Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 27 Feb 2024 18:31:36 +0400 Subject: [PATCH 117/135] resize handle for extra networks --- html/extra-networks-pane.html | 6 +++--- javascript/extraNetworks.js | 19 ++++++++++++++++++- javascript/resizeHandle.js | 22 +++++++++++++++++++--- modules/shared_options.py | 1 + modules/ui_extra_networks.py | 9 ++++++++- style.css | 3 ++- 6 files changed, 51 insertions(+), 9 deletions(-) diff --git a/html/extra-networks-pane.html b/html/extra-networks-pane.html index 0c763f710..f54344aaa 100644 --- a/html/extra-networks-pane.html +++ b/html/extra-networks-pane.html @@ -44,11 +44,11 @@
    -
    -
    +
    +
    {tree_html}
    -
    +
    {items_html}
    diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index d5855fe96..ff808d7aa 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -447,7 +447,24 @@ function extraNetworksControlTreeViewOnClick(event, tabname, extra_networks_tabn * @param tabname The name of the active tab in the sd webui. Ex: txt2img, img2img, etc. * @param extra_networks_tabname The id of the active extraNetworks tab. Ex: lora, checkpoints, etc. */ - gradioApp().getElementById(tabname + "_" + extra_networks_tabname + "_tree").classList.toggle("hidden"); + const tree = gradioApp().getElementById(tabname + "_" + extra_networks_tabname + "_tree"); + const parent = tree.parentElement; + let resizeHandle = parent.querySelector('.resize-handle'); + tree.classList.toggle("hidden"); + + if (tree.classList.contains("hidden")){ + tree.style.display = 'none'; + resizeHandle.style.display = 'none'; + parent.style.display = 'flex'; + } else { + tree.style.display = 'block'; + if (!resizeHandle) { + setupResizeHandle(parent); + resizeHandle = parent.querySelector('.resize-handle'); + } + resizeHandle.style.display = 'block'; + parent.style.display = 'grid'; + } event.currentTarget.classList.toggle("extra-network-control--enabled"); } diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 6560372cc..5fb5dd4f3 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -39,7 +39,7 @@ const ratio = newParentWidth / oldParentWidth; - const newWidthL = Math.max(Math.floor(ratio * widthL), GRADIO_MIN_WIDTH); + const newWidthL = Math.max(Math.floor(ratio * widthL), parent.minLeftColWidth); setLeftColGridTemplate(parent, newWidthL); R.parentWidth = newParentWidth; @@ -54,7 +54,15 @@ parent.style.display = 'grid'; parent.style.gap = '0'; - const gridTemplateColumns = `${parent.children[0].style.flexGrow}fr ${PAD}px ${parent.children[1].style.flexGrow}fr`; + let leftColTemplate = ""; + if (parent.children[0].style.flexGrow) { + leftColTemplate = `${parent.children[0].style.flexGrow}fr`; + parent.minLeftColWidth = GRADIO_MIN_WIDTH; + } else { + leftColTemplate = parent.children[0].style.flexBasis; + parent.minLeftColWidth = parent.children[0].style.flexBasis.slice(0, -2); + } + const gridTemplateColumns = `${leftColTemplate} ${PAD}px ${parent.children[1].style.flexGrow}fr`; parent.style.gridTemplateColumns = gridTemplateColumns; parent.style.originalGridTemplateColumns = gridTemplateColumns; @@ -119,7 +127,7 @@ } else { delta = R.screenX - evt.changedTouches[0].screenX; } - const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), GRADIO_MIN_WIDTH); + const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), R.parent.minLeftColWidth); setLeftColGridTemplate(R.parent, leftColWidth); } }); @@ -165,3 +173,11 @@ onUiLoaded(function() { } } }); + +function setupExtraNetworksResizeHandle() { + for (var elem of document.body.querySelectorAll('.resize-handle-row')) { + if (!elem.querySelector('.resize-handle') && !elem.children[0].classList.contains("hidden")) { + setupResizeHandle(elem); + } + } +} \ No newline at end of file diff --git a/modules/shared_options.py b/modules/shared_options.py index 64f8f1967..285c54158 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -258,6 +258,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks", "s "extra_networks_card_order_field": OptionInfo("Path", "Default order field for Extra Networks cards", gr.Dropdown, {"choices": ['Path', 'Name', 'Date Created', 'Date Modified']}).needs_reload_ui(), "extra_networks_card_order": OptionInfo("Ascending", "Default order for Extra Networks cards", gr.Dropdown, {"choices": ['Ascending', 'Descending']}).needs_reload_ui(), "extra_networks_tree_view_default_enabled": OptionInfo(False, "Enables the Extra Networks directory tree view by default").needs_reload_ui(), + "extra_networks_tree_view_min_width": OptionInfo(180, "Minimal width for the Extra Networks directory tree view", gr.Number).needs_reload_ui(), "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 34c46ed40..09705a98c 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -531,9 +531,13 @@ class ExtraNetworksPage: data_sortkey = f"{data_sortmode}-{data_sortdir}-{len(self.items)}" tree_view_btn_extra_class = "" tree_view_div_extra_class = "hidden" + tree_view_div_default_display = "none" + extra_network_pane_content_default_display = "flex" if shared.opts.extra_networks_tree_view_default_enabled: tree_view_btn_extra_class = "extra-network-control--enabled" tree_view_div_extra_class = "" + tree_view_div_default_display = "block" + extra_network_pane_content_default_display = "grid" return self.pane_tpl.format( **{ @@ -546,6 +550,9 @@ class ExtraNetworksPage: "tree_view_div_extra_class": tree_view_div_extra_class, "tree_html": self.create_tree_view_html(tabname), "items_html": self.create_card_view_html(tabname, none_message="Loading..." if empty else None), + "extra_networks_tree_view_min_width": shared.opts.extra_networks_tree_view_min_width, + "tree_view_div_default_display": tree_view_div_default_display, + "extra_network_pane_content_default_display": extra_network_pane_content_default_display, } ) @@ -703,7 +710,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): create_html() return ui.pages_contents - interface.load(fn=pages_html, inputs=[], outputs=ui.pages) + interface.load(fn=pages_html, inputs=[], outputs=ui.pages).then(fn=lambda: None, _js='setupExtraNetworksResizeHandle') return ui diff --git a/style.css b/style.css index 8ce78ff0c..004038f89 100644 --- a/style.css +++ b/style.css @@ -1615,9 +1615,10 @@ body.resizing .resize-handle { display: inline-flex; visibility: hidden; color: var(--button-secondary-text-color); - + width: 0; } .extra-network-tree .tree-list-content:hover .button-row { visibility: visible; + width: auto; } From de7604fa77180ac8d51da4f8a59c5a27bbe25cdc Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 27 Feb 2024 18:38:38 +0400 Subject: [PATCH 118/135] lint --- javascript/extraNetworks.js | 2 +- javascript/resizeHandle.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index ff808d7aa..4e30261b8 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -452,7 +452,7 @@ function extraNetworksControlTreeViewOnClick(event, tabname, extra_networks_tabn let resizeHandle = parent.querySelector('.resize-handle'); tree.classList.toggle("hidden"); - if (tree.classList.contains("hidden")){ + if (tree.classList.contains("hidden")) { tree.style.display = 'none'; resizeHandle.style.display = 'none'; parent.style.display = 'flex'; diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 5fb5dd4f3..cf2c778bb 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -180,4 +180,4 @@ function setupExtraNetworksResizeHandle() { setupResizeHandle(elem); } } -} \ No newline at end of file +} From b4c44e659ba3931d4bee0a0061c674e594cc639f Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 27 Feb 2024 23:17:52 +0400 Subject: [PATCH 119/135] fix on reload with changed show all loras setting --- javascript/extraNetworks.js | 6 ++++-- javascript/resizeHandle.js | 15 ++++++--------- modules/ui_extra_networks.py | 4 ++-- 3 files changed, 12 insertions(+), 13 deletions(-) diff --git a/javascript/extraNetworks.js b/javascript/extraNetworks.js index 4e30261b8..1610698bf 100644 --- a/javascript/extraNetworks.js +++ b/javascript/extraNetworks.js @@ -454,16 +454,18 @@ function extraNetworksControlTreeViewOnClick(event, tabname, extra_networks_tabn if (tree.classList.contains("hidden")) { tree.style.display = 'none'; - resizeHandle.style.display = 'none'; parent.style.display = 'flex'; + if (resizeHandle) { + resizeHandle.style.display = 'none'; + } } else { tree.style.display = 'block'; + parent.style.display = 'grid'; if (!resizeHandle) { setupResizeHandle(parent); resizeHandle = parent.querySelector('.resize-handle'); } resizeHandle.style.display = 'block'; - parent.style.display = 'grid'; } event.currentTarget.classList.toggle("extra-network-control--enabled"); } diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index cf2c778bb..94ae4aaa2 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -166,18 +166,15 @@ setupResizeHandle = setup; })(); -onUiLoaded(function() { - for (var elem of gradioApp().querySelectorAll('.resize-handle-row')) { - if (!elem.querySelector('.resize-handle')) { - setupResizeHandle(elem); - } - } -}); -function setupExtraNetworksResizeHandle() { - for (var elem of document.body.querySelectorAll('.resize-handle-row')) { +function setupAllResizeHandles() { + for (var elem of gradioApp().querySelectorAll('.resize-handle-row')) { if (!elem.querySelector('.resize-handle') && !elem.children[0].classList.contains("hidden")) { setupResizeHandle(elem); } } } + + +onUiLoaded(setupAllResizeHandles); + diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 09705a98c..9d8f8b28b 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -700,7 +700,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): return ui.pages_contents button_refresh = gr.Button("Refresh", elem_id=f"{tabname}_{page.extra_networks_tabname}_extra_refresh_internal", visible=False) - button_refresh.click(fn=refresh, inputs=[], outputs=ui.pages).then(fn=lambda: None, _js="function(){ " + f"applyExtraNetworkFilter('{tabname}_{page.extra_networks_tabname}');" + " }") + button_refresh.click(fn=refresh, inputs=[], outputs=ui.pages).then(fn=lambda: None, _js="function(){ " + f"applyExtraNetworkFilter('{tabname}_{page.extra_networks_tabname}');" + " }").then(fn=lambda: None, _js='setupAllResizeHandles') def create_html(): ui.pages_contents = [pg.create_html(ui.tabname) for pg in ui.stored_extra_pages] @@ -710,7 +710,7 @@ def create_ui(interface: gr.Blocks, unrelated_tabs, tabname): create_html() return ui.pages_contents - interface.load(fn=pages_html, inputs=[], outputs=ui.pages).then(fn=lambda: None, _js='setupExtraNetworksResizeHandle') + interface.load(fn=pages_html, inputs=[], outputs=ui.pages).then(fn=lambda: None, _js='setupAllResizeHandles') return ui From 51cc1ff2c9d47e66221c7abfb244e4d058c8b279 Mon Sep 17 00:00:00 2001 From: Andray Date: Tue, 27 Feb 2024 23:31:47 +0400 Subject: [PATCH 120/135] fix for mobile and allow collapse right column --- javascript/resizeHandle.js | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 94ae4aaa2..4fe9cbdff 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -20,6 +20,9 @@ } function displayResizeHandle(parent) { + if (!parent.needHideOnMoblie) { + return true; + } if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) { parent.style.display = 'flex'; parent.resizeHandle.style.display = "none"; @@ -58,9 +61,13 @@ if (parent.children[0].style.flexGrow) { leftColTemplate = `${parent.children[0].style.flexGrow}fr`; parent.minLeftColWidth = GRADIO_MIN_WIDTH; + parent.minRightColWidth = GRADIO_MIN_WIDTH; + parent.needHideOnMoblie = true; } else { leftColTemplate = parent.children[0].style.flexBasis; parent.minLeftColWidth = parent.children[0].style.flexBasis.slice(0, -2); + parent.minRightColWidth = 0; + parent.needHideOnMoblie = false; } const gridTemplateColumns = `${leftColTemplate} ${PAD}px ${parent.children[1].style.flexGrow}fr`; parent.style.gridTemplateColumns = gridTemplateColumns; @@ -127,7 +134,7 @@ } else { delta = R.screenX - evt.changedTouches[0].screenX; } - const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - GRADIO_MIN_WIDTH - PAD), R.parent.minLeftColWidth); + const leftColWidth = Math.max(Math.min(R.leftColStartWidth - delta, R.parent.offsetWidth - R.parent.minRightColWidth - PAD), R.parent.minLeftColWidth); setLeftColGridTemplate(R.parent, leftColWidth); } }); From bce09eb9871e08fda07b8d6ff78d4d19307574db Mon Sep 17 00:00:00 2001 From: Dalton Date: Thu, 29 Feb 2024 01:04:46 -0500 Subject: [PATCH 121/135] Add a direct link to the binary release --- modules/launch_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/launch_utils.py b/modules/launch_utils.py index 29506f249..d1a086ad6 100644 --- a/modules/launch_utils.py +++ b/modules/launch_utils.py @@ -56,7 +56,7 @@ and delete current Python and "venv" folder in WebUI's directory. You can download 3.10 Python from here: https://www.python.org/downloads/release/python-3106/ -{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases" if is_windows else ""} +{"Alternatively, use a binary release of WebUI: https://github.com/AUTOMATIC1111/stable-diffusion-webui/releases/tag/v1.0.0-pre" if is_windows else ""} Use --skip-python-version-check to suppress this warning. """) From bb99f5271241565bfd98d2a1fdba59350a5aeb39 Mon Sep 17 00:00:00 2001 From: Andray Date: Thu, 29 Feb 2024 15:40:15 +0400 Subject: [PATCH 122/135] resizeHandle handle double tap --- javascript/resizeHandle.js | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 6560372cc..c4e9de581 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -2,6 +2,7 @@ const GRADIO_MIN_WIDTH = 320; const PAD = 16; const DEBOUNCE_TIME = 100; + const DOUBLE_TAP_DELAY = 200; //ms const R = { tracking: false, @@ -10,6 +11,7 @@ leftCol: null, leftColStartWidth: null, screenX: null, + lastTapTime: null, }; let resizeTimer; @@ -47,6 +49,14 @@ } function setup(parent) { + + function onDoubleClick(evt) { + evt.preventDefault(); + evt.stopPropagation(); + + parent.style.gridTemplateColumns = parent.style.originalGridTemplateColumns; + } + const leftCol = parent.firstElementChild; const rightCol = parent.lastElementChild; @@ -69,6 +79,14 @@ if (evt.button !== 0) return; } else { if (evt.changedTouches.length !== 1) return; + + const currentTime = new Date().getTime(); + if (R.lastTapTime && currentTime - R.lastTapTime <= DOUBLE_TAP_DELAY) { + onDoubleClick(evt); + return; + } + + R.lastTapTime = currentTime; } evt.preventDefault(); @@ -89,12 +107,7 @@ }); }); - resizeHandle.addEventListener('dblclick', (evt) => { - evt.preventDefault(); - evt.stopPropagation(); - - parent.style.gridTemplateColumns = parent.style.originalGridTemplateColumns; - }); + resizeHandle.addEventListener('dblclick', onDoubleClick); afterResize(parent); } From eb0b84c5643896385ba6dd242c6815b288618355 Mon Sep 17 00:00:00 2001 From: Andray Date: Thu, 29 Feb 2024 16:02:21 +0400 Subject: [PATCH 123/135] make minimal width 2 times smaller then default --- html/extra-networks-pane.html | 2 +- javascript/resizeHandle.js | 2 +- modules/shared_options.py | 2 +- modules/ui_extra_networks.py | 2 +- 4 files changed, 4 insertions(+), 4 deletions(-) diff --git a/html/extra-networks-pane.html b/html/extra-networks-pane.html index f54344aaa..02a871086 100644 --- a/html/extra-networks-pane.html +++ b/html/extra-networks-pane.html @@ -45,7 +45,7 @@
    -
    +
    {tree_html}
    diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 4fe9cbdff..513198f53 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -65,7 +65,7 @@ parent.needHideOnMoblie = true; } else { leftColTemplate = parent.children[0].style.flexBasis; - parent.minLeftColWidth = parent.children[0].style.flexBasis.slice(0, -2); + parent.minLeftColWidth = parent.children[0].style.flexBasis.slice(0, -2) / 2; parent.minRightColWidth = 0; parent.needHideOnMoblie = false; } diff --git a/modules/shared_options.py b/modules/shared_options.py index 285c54158..aa26588df 100644 --- a/modules/shared_options.py +++ b/modules/shared_options.py @@ -258,7 +258,7 @@ options_templates.update(options_section(('extra_networks', "Extra Networks", "s "extra_networks_card_order_field": OptionInfo("Path", "Default order field for Extra Networks cards", gr.Dropdown, {"choices": ['Path', 'Name', 'Date Created', 'Date Modified']}).needs_reload_ui(), "extra_networks_card_order": OptionInfo("Ascending", "Default order for Extra Networks cards", gr.Dropdown, {"choices": ['Ascending', 'Descending']}).needs_reload_ui(), "extra_networks_tree_view_default_enabled": OptionInfo(False, "Enables the Extra Networks directory tree view by default").needs_reload_ui(), - "extra_networks_tree_view_min_width": OptionInfo(180, "Minimal width for the Extra Networks directory tree view", gr.Number).needs_reload_ui(), + "extra_networks_tree_view_default_width": OptionInfo(180, "Default width for the Extra Networks directory tree view", gr.Number).needs_reload_ui(), "extra_networks_add_text_separator": OptionInfo(" ", "Extra networks separator").info("extra text to add before <...> when adding extra network to prompt"), "ui_extra_networks_tab_reorder": OptionInfo("", "Extra networks tab order").needs_reload_ui(), "textual_inversion_print_at_load": OptionInfo(False, "Print a list of Textual Inversion embeddings when loading model"), diff --git a/modules/ui_extra_networks.py b/modules/ui_extra_networks.py index 9d8f8b28b..ad2c23054 100644 --- a/modules/ui_extra_networks.py +++ b/modules/ui_extra_networks.py @@ -550,7 +550,7 @@ class ExtraNetworksPage: "tree_view_div_extra_class": tree_view_div_extra_class, "tree_html": self.create_tree_view_html(tabname), "items_html": self.create_card_view_html(tabname, none_message="Loading..." if empty else None), - "extra_networks_tree_view_min_width": shared.opts.extra_networks_tree_view_min_width, + "extra_networks_tree_view_default_width": shared.opts.extra_networks_tree_view_default_width, "tree_view_div_default_display": tree_view_div_default_display, "extra_network_pane_content_default_display": extra_network_pane_content_default_display, } From 1a51b166a04245f5e2ccdfc1300be3be79345bc3 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Mar 2024 06:53:53 +0300 Subject: [PATCH 124/135] call apply_alpha_schedule_override in load_model_weights for #14979 --- modules/sd_models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index fbd53adba..db72e120f 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -428,6 +428,8 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer devices.dtype_unet = torch.float16 timer.record("apply half()") + apply_alpha_schedule_override(model) + for module in model.modules(): if hasattr(module, 'fp16_weight'): del module.fp16_weight @@ -843,7 +845,6 @@ def reload_model_weights(sd_model=None, info=None, forced_reload=False): sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer) if not forced_reload and sd_model is not None and sd_model.sd_checkpoint_info.filename == checkpoint_info.filename: - apply_alpha_schedule_override(sd_model) return sd_model if sd_model is not None: From ee470cc6a32ae0c89ca32d71adac02b2d434f59a Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Mar 2024 06:54:11 +0300 Subject: [PATCH 125/135] style changes for #14979 --- modules/sd_models.py | 60 ++++++++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 24 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index db72e120f..747fc39ee 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -552,36 +552,48 @@ def repair_config(sd_config): karlo_path = os.path.join(paths.models_path, 'karlo') sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path) + +def rescale_zero_terminal_snr_abar(alphas_cumprod): + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= (alphas_bar_sqrt_T) + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt ** 2 # Revert sqrt + alphas_bar[-1] = 4.8973451890853435e-08 + return alphas_bar + + def apply_alpha_schedule_override(sd_model, p=None): - def rescale_zero_terminal_snr_abar(alphas_cumprod): - alphas_bar_sqrt = alphas_cumprod.sqrt() + """ + Applies an override to the alpha schedule of the model according to settings. + - downcasts the alpha schedule to half precision + - rescales the alpha schedule to have zero terminal SNR + """ - # Store old values. - alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() - alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + if not hasattr(sd_model, 'alphas_cumprod') or not hasattr(sd_model, 'alphas_cumprod_original'): + return - # Shift so the last timestep is zero. - alphas_bar_sqrt -= (alphas_bar_sqrt_T) + sd_model.alphas_cumprod = sd_model.alphas_cumprod_original.to(shared.device) - # Scale so the first timestep is back to the old value. - alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + if opts.use_downcasted_alpha_bar: + if p is not None: + p.extra_generation_params['Downcast alphas_cumprod'] = opts.use_downcasted_alpha_bar + sd_model.alphas_cumprod = sd_model.alphas_cumprod.half().to(shared.device) - # Convert alphas_bar_sqrt to betas - alphas_bar = alphas_bar_sqrt**2 # Revert sqrt - alphas_bar[-1] = 4.8973451890853435e-08 - return alphas_bar + if opts.sd_noise_schedule == "Zero Terminal SNR": + if p is not None: + p.extra_generation_params['Noise Schedule'] = opts.sd_noise_schedule + sd_model.alphas_cumprod = rescale_zero_terminal_snr_abar(sd_model.alphas_cumprod).to(shared.device) - if hasattr(sd_model, 'alphas_cumprod') and hasattr(sd_model, 'alphas_cumprod_original'): - sd_model.alphas_cumprod = sd_model.alphas_cumprod_original.to(shared.device) - - if opts.use_downcasted_alpha_bar: - if p is not None: - p.extra_generation_params['Downcast alphas_cumprod'] = opts.use_downcasted_alpha_bar - sd_model.alphas_cumprod = sd_model.alphas_cumprod.half().to(shared.device) - if opts.sd_noise_schedule == "Zero Terminal SNR": - if p is not None: - p.extra_generation_params['Noise Schedule'] = opts.sd_noise_schedule - sd_model.alphas_cumprod = rescale_zero_terminal_snr_abar(sd_model.alphas_cumprod).to(shared.device) sd1_clip_weight = 'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight' sd2_clip_weight = 'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight' From b47756385d3fbd851ff52507cfa392807556c330 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 26 Feb 2024 12:52:34 +0300 Subject: [PATCH 126/135] Merge pull request #15010 from light-and-ray/fix_resize-handle_for_vertical_layout Fix resize-handle visability for vertical layout (mobile) --- javascript/resizeHandle.js | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index cd3e68c6c..6560372cc 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -22,15 +22,11 @@ function displayResizeHandle(parent) { if (window.innerWidth < GRADIO_MIN_WIDTH * 2 + PAD * 4) { parent.style.display = 'flex'; - if (R.handle != null) { - R.handle.style.opacity = '0'; - } + parent.resizeHandle.style.display = "none"; return false; } else { parent.style.display = 'grid'; - if (R.handle != null) { - R.handle.style.opacity = '100'; - } + parent.resizeHandle.style.display = "block"; return true; } } @@ -65,6 +61,7 @@ const resizeHandle = document.createElement('div'); resizeHandle.classList.add('resize-handle'); parent.insertBefore(resizeHandle, rightCol); + parent.resizeHandle = resizeHandle; ['mousedown', 'touchstart'].forEach((eventType) => { resizeHandle.addEventListener(eventType, (evt) => { @@ -82,7 +79,6 @@ R.tracking = true; R.parent = parent; R.parentWidth = parent.offsetWidth; - R.handle = resizeHandle; R.leftCol = leftCol; R.leftColStartWidth = leftCol.offsetWidth; if (eventType.startsWith('mouse')) { From 024a32a09bb833f467fc351a6ea67132db5299c5 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Mon, 26 Feb 2024 12:53:21 +0300 Subject: [PATCH 127/135] Merge pull request #15012 from light-and-ray/register_tmp_file-also-with-mtime register_tmp_file also for mtime --- modules/ui_tempdir.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/modules/ui_tempdir.py b/modules/ui_tempdir.py index 621ed1eca..ecd6bdec3 100644 --- a/modules/ui_tempdir.py +++ b/modules/ui_tempdir.py @@ -35,7 +35,9 @@ def save_pil_to_file(self, pil_image, dir=None, format="png"): already_saved_as = getattr(pil_image, 'already_saved_as', None) if already_saved_as and os.path.isfile(already_saved_as): register_tmp_file(shared.demo, already_saved_as) - return f'{already_saved_as}?{os.path.getmtime(already_saved_as)}' + filename_with_mtime = f'{already_saved_as}?{os.path.getmtime(already_saved_as)}' + register_tmp_file(shared.demo, filename_with_mtime) + return filename_with_mtime if shared.opts.temp_dir != "": dir = shared.opts.temp_dir From 0b07a6cf26634ea758d9fd67b4d4222cf9ec1617 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Mar 2024 06:35:46 +0300 Subject: [PATCH 128/135] Merge pull request #15035 from AUTOMATIC1111/fix/normalized-filepath-absolute Use `absolute` path for normalized filepath --- modules/paths_internal.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/modules/paths_internal.py b/modules/paths_internal.py index 2ed1392a4..6058b0cde 100644 --- a/modules/paths_internal.py +++ b/modules/paths_internal.py @@ -7,7 +7,7 @@ import shlex from pathlib import Path -normalized_filepath = lambda filepath: str(Path(filepath).resolve()) +normalized_filepath = lambda filepath: str(Path(filepath).absolute()) commandline_args = os.environ.get('COMMANDLINE_ARGS', "") sys.argv += shlex.split(commandline_args) From d558cb69b0299cfbd15d693492d489a858b027f2 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Mar 2024 06:37:19 +0300 Subject: [PATCH 129/135] Merge pull request #15065 from light-and-ray/resizeHandle_handle_double_tap resizeHandle handle double tap --- javascript/resizeHandle.js | 25 +++++++++++++++++++------ 1 file changed, 19 insertions(+), 6 deletions(-) diff --git a/javascript/resizeHandle.js b/javascript/resizeHandle.js index 6560372cc..c4e9de581 100644 --- a/javascript/resizeHandle.js +++ b/javascript/resizeHandle.js @@ -2,6 +2,7 @@ const GRADIO_MIN_WIDTH = 320; const PAD = 16; const DEBOUNCE_TIME = 100; + const DOUBLE_TAP_DELAY = 200; //ms const R = { tracking: false, @@ -10,6 +11,7 @@ leftCol: null, leftColStartWidth: null, screenX: null, + lastTapTime: null, }; let resizeTimer; @@ -47,6 +49,14 @@ } function setup(parent) { + + function onDoubleClick(evt) { + evt.preventDefault(); + evt.stopPropagation(); + + parent.style.gridTemplateColumns = parent.style.originalGridTemplateColumns; + } + const leftCol = parent.firstElementChild; const rightCol = parent.lastElementChild; @@ -69,6 +79,14 @@ if (evt.button !== 0) return; } else { if (evt.changedTouches.length !== 1) return; + + const currentTime = new Date().getTime(); + if (R.lastTapTime && currentTime - R.lastTapTime <= DOUBLE_TAP_DELAY) { + onDoubleClick(evt); + return; + } + + R.lastTapTime = currentTime; } evt.preventDefault(); @@ -89,12 +107,7 @@ }); }); - resizeHandle.addEventListener('dblclick', (evt) => { - evt.preventDefault(); - evt.stopPropagation(); - - parent.style.gridTemplateColumns = parent.style.originalGridTemplateColumns; - }); + resizeHandle.addEventListener('dblclick', onDoubleClick); afterResize(parent); } From 978c7fadb35e0955fdaf17afc9a8fb27f6c10e49 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Mar 2024 06:38:50 +0300 Subject: [PATCH 130/135] Merge pull request #15039 from light-and-ray/dat_cmd_flag dat cmd flag --- modules/cmd_args.py | 1 + 1 file changed, 1 insertion(+) diff --git a/modules/cmd_args.py b/modules/cmd_args.py index 312dabffc..213cba98c 100644 --- a/modules/cmd_args.py +++ b/modules/cmd_args.py @@ -53,6 +53,7 @@ parser.add_argument("--gfpgan-models-path", type=normalized_filepath, help="Path parser.add_argument("--esrgan-models-path", type=normalized_filepath, help="Path to directory with ESRGAN model file(s).", default=os.path.join(models_path, 'ESRGAN')) parser.add_argument("--bsrgan-models-path", type=normalized_filepath, help="Path to directory with BSRGAN model file(s).", default=os.path.join(models_path, 'BSRGAN')) parser.add_argument("--realesrgan-models-path", type=normalized_filepath, help="Path to directory with RealESRGAN model file(s).", default=os.path.join(models_path, 'RealESRGAN')) +parser.add_argument("--dat-models-path", type=normalized_filepath, help="Path to directory with DAT model file(s).", default=os.path.join(models_path, 'DAT')) parser.add_argument("--clip-models-path", type=normalized_filepath, help="Path to directory with CLIP model file(s).", default=None) parser.add_argument("--xformers", action='store_true', help="enable xformers for cross attention layers") parser.add_argument("--force-enable-xformers", action='store_true', help="enable xformers for cross attention layers regardless of whether the checking code thinks you can run it; do not make bug reports if this fails to work") From 28bc85a20a282285710e17c4d86cf9db5e00d7db Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Mar 2024 06:40:32 +0300 Subject: [PATCH 131/135] Merge pull request #14979 from drhead/refiner_cumprod_fix Protect alphas_cumprod during refiner switchover --- modules/processing.py | 28 +--------------------------- modules/sd_models.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+), 27 deletions(-) diff --git a/modules/processing.py b/modules/processing.py index d208a922d..411c7c3f4 100644 --- a/modules/processing.py +++ b/modules/processing.py @@ -915,33 +915,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed: if p.n_iter > 1: shared.state.job = f"Batch {n+1} out of {p.n_iter}" - def rescale_zero_terminal_snr_abar(alphas_cumprod): - alphas_bar_sqrt = alphas_cumprod.sqrt() - - # Store old values. - alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() - alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() - - # Shift so the last timestep is zero. - alphas_bar_sqrt -= (alphas_bar_sqrt_T) - - # Scale so the first timestep is back to the old value. - alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) - - # Convert alphas_bar_sqrt to betas - alphas_bar = alphas_bar_sqrt**2 # Revert sqrt - alphas_bar[-1] = 4.8973451890853435e-08 - return alphas_bar - - if hasattr(p.sd_model, 'alphas_cumprod') and hasattr(p.sd_model, 'alphas_cumprod_original'): - p.sd_model.alphas_cumprod = p.sd_model.alphas_cumprod_original.to(shared.device) - - if opts.use_downcasted_alpha_bar: - p.extra_generation_params['Downcast alphas_cumprod'] = opts.use_downcasted_alpha_bar - p.sd_model.alphas_cumprod = p.sd_model.alphas_cumprod.half().to(shared.device) - if opts.sd_noise_schedule == "Zero Terminal SNR": - p.extra_generation_params['Noise Schedule'] = opts.sd_noise_schedule - p.sd_model.alphas_cumprod = rescale_zero_terminal_snr_abar(p.sd_model.alphas_cumprod).to(shared.device) + sd_models.apply_alpha_schedule_override(p.sd_model, p) with devices.without_autocast() if devices.unet_needs_upcast else devices.autocast(): samples_ddim = p.sample(conditioning=p.c, unconditional_conditioning=p.uc, seeds=p.seeds, subseeds=p.subseeds, subseed_strength=p.subseed_strength, prompts=p.prompts) diff --git a/modules/sd_models.py b/modules/sd_models.py index 2c0457715..fbd53adba 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -15,6 +15,7 @@ from ldm.util import instantiate_from_config from modules import paths, shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes, sd_models_config, sd_unet, sd_models_xl, cache, extra_networks, processing, lowvram, sd_hijack, patches from modules.timer import Timer +from modules.shared import opts import tomesd import numpy as np @@ -549,6 +550,36 @@ def repair_config(sd_config): karlo_path = os.path.join(paths.models_path, 'karlo') sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path) +def apply_alpha_schedule_override(sd_model, p=None): + def rescale_zero_terminal_snr_abar(alphas_cumprod): + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= (alphas_bar_sqrt_T) + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt**2 # Revert sqrt + alphas_bar[-1] = 4.8973451890853435e-08 + return alphas_bar + + if hasattr(sd_model, 'alphas_cumprod') and hasattr(sd_model, 'alphas_cumprod_original'): + sd_model.alphas_cumprod = sd_model.alphas_cumprod_original.to(shared.device) + + if opts.use_downcasted_alpha_bar: + if p is not None: + p.extra_generation_params['Downcast alphas_cumprod'] = opts.use_downcasted_alpha_bar + sd_model.alphas_cumprod = sd_model.alphas_cumprod.half().to(shared.device) + if opts.sd_noise_schedule == "Zero Terminal SNR": + if p is not None: + p.extra_generation_params['Noise Schedule'] = opts.sd_noise_schedule + sd_model.alphas_cumprod = rescale_zero_terminal_snr_abar(sd_model.alphas_cumprod).to(shared.device) sd1_clip_weight = 'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight' sd2_clip_weight = 'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight' @@ -812,6 +843,7 @@ def reload_model_weights(sd_model=None, info=None, forced_reload=False): sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer) if not forced_reload and sd_model is not None and sd_model.sd_checkpoint_info.filename == checkpoint_info.filename: + apply_alpha_schedule_override(sd_model) return sd_model if sd_model is not None: From da67afe5f68497a04d1fd9173bbd256b73d9d251 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Mar 2024 06:53:53 +0300 Subject: [PATCH 132/135] call apply_alpha_schedule_override in load_model_weights for #14979 --- modules/sd_models.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index fbd53adba..db72e120f 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -428,6 +428,8 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer devices.dtype_unet = torch.float16 timer.record("apply half()") + apply_alpha_schedule_override(model) + for module in model.modules(): if hasattr(module, 'fp16_weight'): del module.fp16_weight @@ -843,7 +845,6 @@ def reload_model_weights(sd_model=None, info=None, forced_reload=False): sd_model = reuse_model_from_already_loaded(sd_model, checkpoint_info, timer) if not forced_reload and sd_model is not None and sd_model.sd_checkpoint_info.filename == checkpoint_info.filename: - apply_alpha_schedule_override(sd_model) return sd_model if sd_model is not None: From 141a17e9693065c33a2b1d30f04a0083bb687775 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Mar 2024 06:54:11 +0300 Subject: [PATCH 133/135] style changes for #14979 --- modules/sd_models.py | 60 ++++++++++++++++++++++++++------------------ 1 file changed, 36 insertions(+), 24 deletions(-) diff --git a/modules/sd_models.py b/modules/sd_models.py index db72e120f..747fc39ee 100644 --- a/modules/sd_models.py +++ b/modules/sd_models.py @@ -552,36 +552,48 @@ def repair_config(sd_config): karlo_path = os.path.join(paths.models_path, 'karlo') sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path) + +def rescale_zero_terminal_snr_abar(alphas_cumprod): + alphas_bar_sqrt = alphas_cumprod.sqrt() + + # Store old values. + alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() + alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + + # Shift so the last timestep is zero. + alphas_bar_sqrt -= (alphas_bar_sqrt_T) + + # Scale so the first timestep is back to the old value. + alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + + # Convert alphas_bar_sqrt to betas + alphas_bar = alphas_bar_sqrt ** 2 # Revert sqrt + alphas_bar[-1] = 4.8973451890853435e-08 + return alphas_bar + + def apply_alpha_schedule_override(sd_model, p=None): - def rescale_zero_terminal_snr_abar(alphas_cumprod): - alphas_bar_sqrt = alphas_cumprod.sqrt() + """ + Applies an override to the alpha schedule of the model according to settings. + - downcasts the alpha schedule to half precision + - rescales the alpha schedule to have zero terminal SNR + """ - # Store old values. - alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() - alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() + if not hasattr(sd_model, 'alphas_cumprod') or not hasattr(sd_model, 'alphas_cumprod_original'): + return - # Shift so the last timestep is zero. - alphas_bar_sqrt -= (alphas_bar_sqrt_T) + sd_model.alphas_cumprod = sd_model.alphas_cumprod_original.to(shared.device) - # Scale so the first timestep is back to the old value. - alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) + if opts.use_downcasted_alpha_bar: + if p is not None: + p.extra_generation_params['Downcast alphas_cumprod'] = opts.use_downcasted_alpha_bar + sd_model.alphas_cumprod = sd_model.alphas_cumprod.half().to(shared.device) - # Convert alphas_bar_sqrt to betas - alphas_bar = alphas_bar_sqrt**2 # Revert sqrt - alphas_bar[-1] = 4.8973451890853435e-08 - return alphas_bar + if opts.sd_noise_schedule == "Zero Terminal SNR": + if p is not None: + p.extra_generation_params['Noise Schedule'] = opts.sd_noise_schedule + sd_model.alphas_cumprod = rescale_zero_terminal_snr_abar(sd_model.alphas_cumprod).to(shared.device) - if hasattr(sd_model, 'alphas_cumprod') and hasattr(sd_model, 'alphas_cumprod_original'): - sd_model.alphas_cumprod = sd_model.alphas_cumprod_original.to(shared.device) - - if opts.use_downcasted_alpha_bar: - if p is not None: - p.extra_generation_params['Downcast alphas_cumprod'] = opts.use_downcasted_alpha_bar - sd_model.alphas_cumprod = sd_model.alphas_cumprod.half().to(shared.device) - if opts.sd_noise_schedule == "Zero Terminal SNR": - if p is not None: - p.extra_generation_params['Noise Schedule'] = opts.sd_noise_schedule - sd_model.alphas_cumprod = rescale_zero_terminal_snr_abar(sd_model.alphas_cumprod).to(shared.device) sd1_clip_weight = 'cond_stage_model.transformer.text_model.embeddings.token_embedding.weight' sd2_clip_weight = 'cond_stage_model.model.transformer.resblocks.0.attn.in_proj_weight' From 13984857890401e8605a3e53bd671e900a18d73f Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Mar 2024 07:00:08 +0300 Subject: [PATCH 134/135] update changelog --- CHANGELOG.md | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8fa3c47c5..f0c659811 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,11 +7,11 @@ * Support for SDXL-Inpaint Model ([#14390](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14390)) * Use Spandrel for upscaling and face restoration architectures ([#14425](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14425), [#14467](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14467), [#14473](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14473), [#14474](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14474), [#14477](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14477), [#14476](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14476), [#14484](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14484), [#14500](https://github.com/AUTOMATIC1111/stable-difusion-webui/pull/14500), [#14501](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14501), [#14504](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14504), [#14524](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14524), [#14809](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14809)) * Automatic backwards version compatibility (when loading infotexts from old images with program version specified, will add compatibility settings) -* Implement zero terminal SNR noise schedule option (**[SEED BREAKING CHANGE](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Seed-breaking-changes#180-dev-170-225-2024-01-01---zero-terminal-snr-noise-schedule-option)**, [#14145](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14145)) +* Implement zero terminal SNR noise schedule option (**[SEED BREAKING CHANGE](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Seed-breaking-changes#180-dev-170-225-2024-01-01---zero-terminal-snr-noise-schedule-option)**, [#14145](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14145), [#14979](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14979)) * Add a [✨] button to run hires fix on selected image in the gallery (with help from [#14598](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14598), [#14626](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14626), [#14728](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14728)) * [Separate assets repository](https://github.com/AUTOMATIC1111/stable-diffusion-webui-assets); serve fonts locally rather than from google's servers * Official LCM Sampler Support ([#14583](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14583)) -* Add support for DAT upscaler models ([#14690](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14690)) +* Add support for DAT upscaler models ([#14690](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14690), [#15039](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15039)) * Extra Networks Tree View ([#14588](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14588), [#14900](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14900)) * NPU Support ([#14801](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14801)) * Propmpt comments support @@ -104,6 +104,7 @@ * avoid doble upscaling in inpaint ([#14966](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14966)) * Fix #14591 using translated content to do categories mapping ([#14995](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14995)) * fix: the `split_threshold` parameter does not work when running Split oversized images ([#15006](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15006)) +* Fix resize-handle for mobile ([#15010](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15010), [#15065](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15065)) ### Other: * Assign id for "extra_options". Replace numeric field with slider. ([#14270](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14270)) @@ -124,8 +125,9 @@ * extensions tab table row hover highlight ([#14885](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14885)) * Always add timestamp to displayed image ([#14890](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14890)) * Added core.filemode=false so doesn't track changes in file permission… ([#14930](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14930)) -* Normalize command-line argument paths ([#14934](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14934)) +* Normalize command-line argument paths ([#14934](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14934), [#15035](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15035)) * Use original App Title in progress bar ([#14916](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/14916)) +* register_tmp_file also for mtime ([#15012](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15012)) ## 1.7.0 From bb24c13ed7910e9e6255e3d7ff3d81ba40468fc0 Mon Sep 17 00:00:00 2001 From: AUTOMATIC1111 <16777216c@gmail.com> Date: Sat, 2 Mar 2024 07:39:59 +0300 Subject: [PATCH 135/135] infotext support for #14978 --- modules/infotext_utils.py | 3 +++ modules/infotext_versions.py | 3 +++ modules/sd_samplers_common.py | 8 +++++--- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/modules/infotext_utils.py b/modules/infotext_utils.py index a938aa2a7..e04a7bee9 100644 --- a/modules/infotext_utils.py +++ b/modules/infotext_utils.py @@ -359,6 +359,9 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model if "Emphasis" not in res: res["Emphasis"] = "Original" + if "Refiner switch by sampling steps" not in res: + res["Refiner switch by sampling steps"] = False + infotext_versions.backcompat(res) for key in skip_fields: diff --git a/modules/infotext_versions.py b/modules/infotext_versions.py index 23b45c3f9..b5552a312 100644 --- a/modules/infotext_versions.py +++ b/modules/infotext_versions.py @@ -5,6 +5,7 @@ import re v160 = version.parse("1.6.0") v170_tsnr = version.parse("v1.7.0-225") +v180 = version.parse("1.8.0") def parse_version(text): @@ -40,3 +41,5 @@ def backcompat(d): if ver < v170_tsnr: d["Downcast alphas_cumprod"] = True + if ver < v180 and d.get('Refiner'): + d["Refiner switch by sampling steps"] = True diff --git a/modules/sd_samplers_common.py b/modules/sd_samplers_common.py index 045b9e2fe..6df423912 100644 --- a/modules/sd_samplers_common.py +++ b/modules/sd_samplers_common.py @@ -155,14 +155,16 @@ def replace_torchsde_browinan(): replace_torchsde_browinan() -def apply_refiner(cfg_denoiser, sigma): - if opts.refiner_switch_by_sample_steps: +def apply_refiner(cfg_denoiser, sigma=None): + if opts.refiner_switch_by_sample_steps or not sigma: completed_ratio = cfg_denoiser.step / cfg_denoiser.total_steps + cfg_denoiser.p.extra_generation_params["Refiner switch by sampling steps"] = True + else: # torch.max(sigma) only to handle rare case where we might have different sigmas in the same batch try: timestep = torch.argmin(torch.abs(cfg_denoiser.inner_model.sigmas - torch.max(sigma))) - except AttributeError: # for samplers that dont use sigmas (DDIM) sigma is actually the timestep + except AttributeError: # for samplers that don't use sigmas (DDIM) sigma is actually the timestep timestep = torch.max(sigma).to(dtype=int) completed_ratio = (999 - timestep) / 1000