mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2024-12-29 19:05:05 +08:00
Merge branch 'dev' into move_upscale_postprocessing_under_input_accordion
This commit is contained in:
commit
446e49d6db
8
.github/workflows/on_pull_request.yaml
vendored
8
.github/workflows/on_pull_request.yaml
vendored
@ -11,8 +11,8 @@ jobs:
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v3
|
||||
- uses: actions/setup-python@v4
|
||||
uses: actions/checkout@v4
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.11
|
||||
# NB: there's no cache: pip here since we're not installing anything
|
||||
@ -29,9 +29,9 @@ jobs:
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Install Node.js
|
||||
uses: actions/setup-node@v3
|
||||
uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: 18
|
||||
- run: npm i --ci
|
||||
|
10
.github/workflows/run_tests.yaml
vendored
10
.github/workflows/run_tests.yaml
vendored
@ -11,9 +11,9 @@ jobs:
|
||||
if: github.event_name != 'pull_request' || github.event.pull_request.head.repo.full_name != github.event.pull_request.base.repo.full_name
|
||||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
- name: Set up Python 3.10
|
||||
uses: actions/setup-python@v4
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: 3.10.6
|
||||
cache: pip
|
||||
@ -22,7 +22,7 @@ jobs:
|
||||
launch.py
|
||||
- name: Cache models
|
||||
id: cache-models
|
||||
uses: actions/cache@v3
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: models
|
||||
key: "2023-12-30"
|
||||
@ -68,13 +68,13 @@ jobs:
|
||||
python -m coverage report -i
|
||||
python -m coverage html -i
|
||||
- name: Upload main app output
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: output
|
||||
path: output.txt
|
||||
- name: Upload coverage HTML
|
||||
uses: actions/upload-artifact@v3
|
||||
uses: actions/upload-artifact@v4
|
||||
if: always()
|
||||
with:
|
||||
name: htmlcov
|
||||
|
@ -98,6 +98,7 @@ Make sure the required [dependencies](https://github.com/AUTOMATIC1111/stable-di
|
||||
- [NVidia](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-NVidia-GPUs) (recommended)
|
||||
- [AMD](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Install-and-Run-on-AMD-GPUs) GPUs.
|
||||
- [Intel CPUs, Intel GPUs (both integrated and discrete)](https://github.com/openvinotoolkit/stable-diffusion-webui/wiki/Installation-on-Intel-Silicon) (external wiki page)
|
||||
- [Ascend NPUs](https://github.com/wangshuai09/stable-diffusion-webui/wiki/Install-and-run-on-Ascend-NPUs) (external wiki page)
|
||||
|
||||
Alternatively, use online services (like Google Colab):
|
||||
|
||||
|
@ -173,7 +173,7 @@ class NetworkModule:
|
||||
orig_weight = orig_weight.to(updown)
|
||||
merged_scale1 = updown + orig_weight
|
||||
dora_merged = (
|
||||
merged_scale1 / merged_scale1(dim=self.dora_mean_dim, keepdim=True) * self.dora_scale
|
||||
merged_scale1 / merged_scale1.mean(dim=self.dora_mean_dim, keepdim=True) * self.dora_scale
|
||||
)
|
||||
final_updown = dora_merged - orig_weight
|
||||
return final_updown
|
||||
|
@ -36,13 +36,6 @@ class NetworkModuleOFT(network.NetworkModule):
|
||||
# self.alpha is unused
|
||||
self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size)
|
||||
|
||||
# LyCORIS BOFT
|
||||
if self.oft_blocks.dim() == 4:
|
||||
self.is_boft = True
|
||||
self.rescale = weights.w.get('rescale', None)
|
||||
if self.rescale is not None:
|
||||
self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1))
|
||||
|
||||
is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear]
|
||||
is_conv = type(self.sd_module) in [torch.nn.Conv2d]
|
||||
is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] # unsupported
|
||||
@ -54,6 +47,13 @@ class NetworkModuleOFT(network.NetworkModule):
|
||||
elif is_other_linear:
|
||||
self.out_dim = self.sd_module.embed_dim
|
||||
|
||||
# LyCORIS BOFT
|
||||
if self.oft_blocks.dim() == 4:
|
||||
self.is_boft = True
|
||||
self.rescale = weights.w.get('rescale', None)
|
||||
if self.rescale is not None and not is_other_linear:
|
||||
self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1))
|
||||
|
||||
self.num_blocks = self.dim
|
||||
self.block_size = self.out_dim // self.dim
|
||||
self.constraint = (0 if self.alpha is None else self.alpha) * self.out_dim
|
||||
|
@ -131,19 +131,15 @@ function setupImageForLightbox(e) {
|
||||
e.style.cursor = 'pointer';
|
||||
e.style.userSelect = 'none';
|
||||
|
||||
var isFirefox = navigator.userAgent.toLowerCase().indexOf('firefox') > -1;
|
||||
|
||||
// For Firefox, listening on click first switched to next image then shows the lightbox.
|
||||
// If you know how to fix this without switching to mousedown event, please.
|
||||
// For other browsers the event is click to make it possiblr to drag picture.
|
||||
var event = isFirefox ? 'mousedown' : 'click';
|
||||
|
||||
e.addEventListener(event, function(evt) {
|
||||
e.addEventListener('mousedown', function(evt) {
|
||||
if (evt.button == 1) {
|
||||
open(evt.target.src);
|
||||
evt.preventDefault();
|
||||
return;
|
||||
}
|
||||
}, true);
|
||||
|
||||
e.addEventListener('click', function(evt) {
|
||||
if (!opts.js_modal_lightbox || evt.button != 0) return;
|
||||
|
||||
modalZoomSet(gradioApp().getElementById('modalImage'), opts.js_modal_lightbox_initially_zoomed);
|
||||
|
@ -156,6 +156,8 @@ class Extension:
|
||||
def check_updates(self):
|
||||
repo = Repo(self.path)
|
||||
for fetch in repo.remote().fetch(dry_run=True):
|
||||
if self.branch and fetch.name != f'{repo.remote().name}/{self.branch}':
|
||||
continue
|
||||
if fetch.flags != fetch.HEAD_UPTODATE:
|
||||
self.can_update = True
|
||||
self.status = "new commits"
|
||||
|
@ -265,17 +265,6 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
|
||||
else:
|
||||
prompt += ("" if prompt == "" else "\n") + line
|
||||
|
||||
if shared.opts.infotext_styles != "Ignore":
|
||||
found_styles, prompt, negative_prompt = shared.prompt_styles.extract_styles_from_prompt(prompt, negative_prompt)
|
||||
|
||||
if shared.opts.infotext_styles == "Apply":
|
||||
res["Styles array"] = found_styles
|
||||
elif shared.opts.infotext_styles == "Apply if any" and found_styles:
|
||||
res["Styles array"] = found_styles
|
||||
|
||||
res["Prompt"] = prompt
|
||||
res["Negative prompt"] = negative_prompt
|
||||
|
||||
for k, v in re_param.findall(lastline):
|
||||
try:
|
||||
if v[0] == '"' and v[-1] == '"':
|
||||
@ -290,6 +279,26 @@ Steps: 20, Sampler: Euler a, CFG scale: 7, Seed: 965400086, Size: 512x512, Model
|
||||
except Exception:
|
||||
print(f"Error parsing \"{k}: {v}\"")
|
||||
|
||||
# Extract styles from prompt
|
||||
if shared.opts.infotext_styles != "Ignore":
|
||||
found_styles, prompt_no_styles, negative_prompt_no_styles = shared.prompt_styles.extract_styles_from_prompt(prompt, negative_prompt)
|
||||
|
||||
same_hr_styles = True
|
||||
if ("Hires prompt" in res or "Hires negative prompt" in res) and (infotext_ver > infotext_versions.v180_hr_styles if (infotext_ver := infotext_versions.parse_version(res.get("Version"))) else True):
|
||||
hr_prompt, hr_negative_prompt = res.get("Hires prompt", prompt), res.get("Hires negative prompt", negative_prompt)
|
||||
hr_found_styles, hr_prompt_no_styles, hr_negative_prompt_no_styles = shared.prompt_styles.extract_styles_from_prompt(hr_prompt, hr_negative_prompt)
|
||||
if same_hr_styles := found_styles == hr_found_styles:
|
||||
res["Hires prompt"] = '' if hr_prompt_no_styles == prompt_no_styles else hr_prompt_no_styles
|
||||
res['Hires negative prompt'] = '' if hr_negative_prompt_no_styles == negative_prompt_no_styles else hr_negative_prompt_no_styles
|
||||
|
||||
if same_hr_styles:
|
||||
prompt, negative_prompt = prompt_no_styles, negative_prompt_no_styles
|
||||
if (shared.opts.infotext_styles == "Apply if any" and found_styles) or shared.opts.infotext_styles == "Apply":
|
||||
res['Styles array'] = found_styles
|
||||
|
||||
res["Prompt"] = prompt
|
||||
res["Negative prompt"] = negative_prompt
|
||||
|
||||
# Missing CLIP skip means it was set to 1 (the default)
|
||||
if "Clip skip" not in res:
|
||||
res["Clip skip"] = "1"
|
||||
|
@ -6,6 +6,7 @@ import re
|
||||
v160 = version.parse("1.6.0")
|
||||
v170_tsnr = version.parse("v1.7.0-225")
|
||||
v180 = version.parse("1.8.0")
|
||||
v180_hr_styles = version.parse("1.8.0-136") # todo: change to the actual version number after merge
|
||||
|
||||
|
||||
def parse_version(text):
|
||||
|
@ -702,7 +702,7 @@ def program_version():
|
||||
return res
|
||||
|
||||
|
||||
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None):
|
||||
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None, all_hr_prompts=None, all_hr_negative_prompts=None):
|
||||
if index is None:
|
||||
index = position_in_batch + iteration * p.batch_size
|
||||
|
||||
@ -745,11 +745,18 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
|
||||
"RNG": opts.randn_source if opts.randn_source != "GPU" else None,
|
||||
"NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond,
|
||||
"Tiling": "True" if p.tiling else None,
|
||||
"Hires prompt": None, # This is set later, insert here to keep order
|
||||
"Hires negative prompt": None, # This is set later, insert here to keep order
|
||||
**p.extra_generation_params,
|
||||
"Version": program_version() if opts.add_version_to_infotext else None,
|
||||
"User": p.user if opts.add_user_name_to_info else None,
|
||||
}
|
||||
|
||||
if all_hr_prompts := all_hr_prompts or getattr(p, 'all_hr_prompts', None):
|
||||
generation_params['Hires prompt'] = all_hr_prompts[index] if all_hr_prompts[index] != all_prompts[index] else None
|
||||
if all_hr_negative_prompts := all_hr_negative_prompts or getattr(p, 'all_hr_negative_prompts', None):
|
||||
generation_params['Hires negative prompt'] = all_hr_negative_prompts[index] if all_hr_negative_prompts[index] != all_negative_prompts[index] else None
|
||||
|
||||
generation_params_text = ", ".join([k if k == v else f'{k}: {infotext_utils.quote(v)}' for k, v in generation_params.items() if v is not None])
|
||||
|
||||
prompt_text = p.main_prompt if use_main_prompt else all_prompts[index]
|
||||
@ -1194,12 +1201,6 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
||||
if self.hr_sampler_name is not None and self.hr_sampler_name != self.sampler_name:
|
||||
self.extra_generation_params["Hires sampler"] = self.hr_sampler_name
|
||||
|
||||
if tuple(self.hr_prompt) != tuple(self.prompt):
|
||||
self.extra_generation_params["Hires prompt"] = self.hr_prompt
|
||||
|
||||
if tuple(self.hr_negative_prompt) != tuple(self.negative_prompt):
|
||||
self.extra_generation_params["Hires negative prompt"] = self.hr_negative_prompt
|
||||
|
||||
self.latent_scale_mode = shared.latent_upscale_modes.get(self.hr_upscaler, None) if self.hr_upscaler is not None else shared.latent_upscale_modes.get(shared.latent_upscale_default_mode, "nearest")
|
||||
if self.enable_hr and self.latent_scale_mode is None:
|
||||
if not any(x.name == self.hr_upscaler for x in shared.sd_upscalers):
|
||||
|
@ -26,6 +26,13 @@ class ScriptStripComments(scripts.Script):
|
||||
p.main_prompt = strip_comments(p.main_prompt)
|
||||
p.main_negative_prompt = strip_comments(p.main_negative_prompt)
|
||||
|
||||
if getattr(p, 'enable_hr', False):
|
||||
p.all_hr_prompts = [strip_comments(x) for x in p.all_hr_prompts]
|
||||
p.all_hr_negative_prompts = [strip_comments(x) for x in p.all_hr_negative_prompts]
|
||||
|
||||
p.hr_prompt = strip_comments(p.hr_prompt)
|
||||
p.hr_negative_prompt = strip_comments(p.hr_negative_prompt)
|
||||
|
||||
|
||||
def before_token_counter(params: script_callbacks.BeforeTokenCounterParams):
|
||||
if not shared.opts.enable_prompt_comments:
|
||||
|
@ -101,6 +101,7 @@ options_templates.update(options_section(('upscaling', "Upscaling", "postprocess
|
||||
"DAT_tile": OptionInfo(192, "Tile size for DAT upscalers.", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}).info("0 = no tiling"),
|
||||
"DAT_tile_overlap": OptionInfo(8, "Tile overlap for DAT upscalers.", gr.Slider, {"minimum": 0, "maximum": 48, "step": 1}).info("Low values = visible seam"),
|
||||
"upscaler_for_img2img": OptionInfo(None, "Upscaler for img2img", gr.Dropdown, lambda: {"choices": [x.name for x in shared.sd_upscalers]}),
|
||||
"set_scale_by_when_changing_upscaler": OptionInfo(False, "Automatically set the Scale by factor based on the name of the selected Upscaler."),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('face-restoration', "Face restoration", "postprocessing"), {
|
||||
|
@ -1,3 +1,4 @@
|
||||
from __future__ import annotations
|
||||
from pathlib import Path
|
||||
from modules import errors
|
||||
import csv
|
||||
|
@ -1,3 +1,5 @@
|
||||
import re
|
||||
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
|
||||
@ -39,10 +41,22 @@ class ScriptPostprocessingUpscale(scripts_postprocessing.ScriptPostprocessing):
|
||||
upscaling_res_switch_btn = ToolButton(value=switch_values_symbol, elem_id="upscaling_res_switch_btn", tooltip="Switch width/height")
|
||||
upscaling_crop = gr.Checkbox(label='Crop to fit', value=True, elem_id="extras_upscaling_crop")
|
||||
|
||||
def on_selected_upscale_method(upscale_method):
|
||||
if not shared.opts.set_scale_by_when_changing_upscaler:
|
||||
return gr.update()
|
||||
|
||||
match = re.search(r'(\d)[xX]|[xX](\d)', upscale_method)
|
||||
if not match:
|
||||
return gr.update()
|
||||
|
||||
return gr.update(value=int(match.group(1) or match.group(2)))
|
||||
|
||||
upscaling_res_switch_btn.click(lambda w, h: (h, w), inputs=[upscaling_resize_w, upscaling_resize_h], outputs=[upscaling_resize_w, upscaling_resize_h], show_progress=False)
|
||||
tab_scale_by.select(fn=lambda: 0, inputs=[], outputs=[selected_tab])
|
||||
tab_scale_to.select(fn=lambda: 1, inputs=[], outputs=[selected_tab])
|
||||
|
||||
extras_upscaler_1.change(on_selected_upscale_method, inputs=[extras_upscaler_1], outputs=[upscaling_resize], show_progress="hidden")
|
||||
|
||||
return {
|
||||
"upscale_enabled": upscale_enabled,
|
||||
"upscale_mode": selected_tab,
|
||||
|
16
webui.sh
16
webui.sh
@ -130,12 +130,18 @@ case "$gpu_info" in
|
||||
if [[ -z "${TORCH_COMMAND}" ]]
|
||||
then
|
||||
pyv="$(${python_cmd} -c 'import sys; print(".".join(map(str, sys.version_info[0:2])))')"
|
||||
if [[ $(bc <<< "$pyv <= 3.10") -eq 1 ]]
|
||||
# Using an old nightly compiled against rocm 5.2 for Navi1, see https://github.com/pytorch/pytorch/issues/106728#issuecomment-1749511711
|
||||
if [[ $pyv == "3.8" ]]
|
||||
then
|
||||
# Navi users will still use torch 1.13 because 2.0 does not seem to work.
|
||||
export TORCH_COMMAND="pip install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.6"
|
||||
export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp38-cp38-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp38-cp38-linux_x86_64.whl"
|
||||
elif [[ $pyv == "3.9" ]]
|
||||
then
|
||||
export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp39-cp39-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp39-cp39-linux_x86_64.whl"
|
||||
elif [[ $pyv == "3.10" ]]
|
||||
then
|
||||
export TORCH_COMMAND="pip install https://download.pytorch.org/whl/nightly/rocm5.2/torch-2.0.0.dev20230209%2Brocm5.2-cp310-cp310-linux_x86_64.whl https://download.pytorch.org/whl/nightly/rocm5.2/torchvision-0.15.0.dev20230209%2Brocm5.2-cp310-cp310-linux_x86_64.whl"
|
||||
else
|
||||
printf "\e[1m\e[31mERROR: RX 5000 series GPUs must be using at max python 3.10, aborting...\e[0m"
|
||||
printf "\e[1m\e[31mERROR: RX 5000 series GPUs python version must be between 3.8 and 3.10, aborting...\e[0m"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
@ -143,7 +149,7 @@ case "$gpu_info" in
|
||||
*"Navi 2"*) export HSA_OVERRIDE_GFX_VERSION=10.3.0
|
||||
;;
|
||||
*"Navi 3"*) [[ -z "${TORCH_COMMAND}" ]] && \
|
||||
export TORCH_COMMAND="pip install --pre torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.7"
|
||||
export TORCH_COMMAND="pip install torch torchvision --index-url https://download.pytorch.org/whl/nightly/rocm5.7"
|
||||
;;
|
||||
*"Renoir"*) export HSA_OVERRIDE_GFX_VERSION=9.0.0
|
||||
printf "\n%s\n" "${delimiter}"
|
||||
|
Loading…
Reference in New Issue
Block a user