mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2025-02-07 06:02:53 +08:00
Merge branch 'dev' into patch-2
This commit is contained in:
commit
0edc04d126
@ -1,3 +1,8 @@
|
|||||||
|
## 1.9.4
|
||||||
|
|
||||||
|
### Bug Fixes:
|
||||||
|
* pin setuptools version to fix the startup error ([#15882](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15882))
|
||||||
|
|
||||||
## 1.9.3
|
## 1.9.3
|
||||||
|
|
||||||
### Bug Fixes:
|
### Bug Fixes:
|
||||||
|
@ -260,6 +260,16 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
|
|||||||
|
|
||||||
loaded_networks.clear()
|
loaded_networks.clear()
|
||||||
|
|
||||||
|
unavailable_networks = []
|
||||||
|
for name in names:
|
||||||
|
if name.lower() in forbidden_network_aliases and available_networks.get(name) is None:
|
||||||
|
unavailable_networks.append(name)
|
||||||
|
elif available_network_aliases.get(name) is None:
|
||||||
|
unavailable_networks.append(name)
|
||||||
|
|
||||||
|
if unavailable_networks:
|
||||||
|
update_available_networks_by_names(unavailable_networks)
|
||||||
|
|
||||||
networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names]
|
networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names]
|
||||||
if any(x is None for x in networks_on_disk):
|
if any(x is None for x in networks_on_disk):
|
||||||
list_available_networks()
|
list_available_networks()
|
||||||
@ -566,22 +576,16 @@ def network_MultiheadAttention_load_state_dict(self, *args, **kwargs):
|
|||||||
return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs)
|
return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def list_available_networks():
|
def process_network_files(names: list[str] | None = None):
|
||||||
available_networks.clear()
|
|
||||||
available_network_aliases.clear()
|
|
||||||
forbidden_network_aliases.clear()
|
|
||||||
available_network_hash_lookup.clear()
|
|
||||||
forbidden_network_aliases.update({"none": 1, "Addams": 1})
|
|
||||||
|
|
||||||
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
|
|
||||||
|
|
||||||
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
||||||
candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir_backcompat, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir_backcompat, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
||||||
for filename in candidates:
|
for filename in candidates:
|
||||||
if os.path.isdir(filename):
|
if os.path.isdir(filename):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
name = os.path.splitext(os.path.basename(filename))[0]
|
name = os.path.splitext(os.path.basename(filename))[0]
|
||||||
|
# if names is provided, only load networks with names in the list
|
||||||
|
if names and name not in names:
|
||||||
|
continue
|
||||||
try:
|
try:
|
||||||
entry = network.NetworkOnDisk(name, filename)
|
entry = network.NetworkOnDisk(name, filename)
|
||||||
except OSError: # should catch FileNotFoundError and PermissionError etc.
|
except OSError: # should catch FileNotFoundError and PermissionError etc.
|
||||||
@ -597,6 +601,22 @@ def list_available_networks():
|
|||||||
available_network_aliases[entry.alias] = entry
|
available_network_aliases[entry.alias] = entry
|
||||||
|
|
||||||
|
|
||||||
|
def update_available_networks_by_names(names: list[str]):
|
||||||
|
process_network_files(names)
|
||||||
|
|
||||||
|
|
||||||
|
def list_available_networks():
|
||||||
|
available_networks.clear()
|
||||||
|
available_network_aliases.clear()
|
||||||
|
forbidden_network_aliases.clear()
|
||||||
|
available_network_hash_lookup.clear()
|
||||||
|
forbidden_network_aliases.update({"none": 1, "Addams": 1})
|
||||||
|
|
||||||
|
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
|
||||||
|
|
||||||
|
process_network_files()
|
||||||
|
|
||||||
|
|
||||||
re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")
|
re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")
|
||||||
|
|
||||||
|
|
||||||
|
@ -337,8 +337,8 @@ onOptionsChanged(function() {
|
|||||||
let txt2img_textarea, img2img_textarea = undefined;
|
let txt2img_textarea, img2img_textarea = undefined;
|
||||||
|
|
||||||
function restart_reload() {
|
function restart_reload() {
|
||||||
|
document.body.style.backgroundColor = "var(--background-fill-primary)";
|
||||||
document.body.innerHTML = '<h1 style="font-family:monospace;margin-top:20%;color:lightgray;text-align:center;">Reloading...</h1>';
|
document.body.innerHTML = '<h1 style="font-family:monospace;margin-top:20%;color:lightgray;text-align:center;">Reloading...</h1>';
|
||||||
|
|
||||||
var requestPing = function() {
|
var requestPing = function() {
|
||||||
requestGet("./internal/ping", {}, function(data) {
|
requestGet("./internal/ping", {}, function(data) {
|
||||||
location.reload();
|
location.reload();
|
||||||
|
@ -653,7 +653,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
|
|||||||
# WebP and JPG formats have maximum dimension limits of 16383 and 65535 respectively. switch to PNG which has a much higher limit
|
# WebP and JPG formats have maximum dimension limits of 16383 and 65535 respectively. switch to PNG which has a much higher limit
|
||||||
if (image.height > 65535 or image.width > 65535) and extension.lower() in ("jpg", "jpeg") or (image.height > 16383 or image.width > 16383) and extension.lower() == "webp":
|
if (image.height > 65535 or image.width > 65535) and extension.lower() in ("jpg", "jpeg") or (image.height > 16383 or image.width > 16383) and extension.lower() == "webp":
|
||||||
print('Image dimensions too large; saving as PNG')
|
print('Image dimensions too large; saving as PNG')
|
||||||
extension = ".png"
|
extension = "png"
|
||||||
|
|
||||||
if save_to_dirs is None:
|
if save_to_dirs is None:
|
||||||
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
|
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
|
||||||
|
@ -569,7 +569,7 @@ class Processed:
|
|||||||
self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt]
|
self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt]
|
||||||
self.all_seeds = all_seeds or p.all_seeds or [self.seed]
|
self.all_seeds = all_seeds or p.all_seeds or [self.seed]
|
||||||
self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed]
|
self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed]
|
||||||
self.infotexts = infotexts or [info]
|
self.infotexts = infotexts or [info] * len(images_list)
|
||||||
self.version = program_version()
|
self.version = program_version()
|
||||||
|
|
||||||
def js(self):
|
def js(self):
|
||||||
|
@ -486,7 +486,8 @@ def xformers_attention_forward(self, x, context=None, mask=None, **kwargs):
|
|||||||
k_in = self.to_k(context_k)
|
k_in = self.to_k(context_k)
|
||||||
v_in = self.to_v(context_v)
|
v_in = self.to_v(context_v)
|
||||||
|
|
||||||
q, k, v = (rearrange(t, 'b n (h d) -> b n h d', h=h) for t in (q_in, k_in, v_in))
|
q, k, v = (t.reshape(t.shape[0], t.shape[1], h, -1) for t in (q_in, k_in, v_in))
|
||||||
|
|
||||||
del q_in, k_in, v_in
|
del q_in, k_in, v_in
|
||||||
|
|
||||||
dtype = q.dtype
|
dtype = q.dtype
|
||||||
@ -497,7 +498,8 @@ def xformers_attention_forward(self, x, context=None, mask=None, **kwargs):
|
|||||||
|
|
||||||
out = out.to(dtype)
|
out = out.to(dtype)
|
||||||
|
|
||||||
out = rearrange(out, 'b n h d -> b n (h d)', h=h)
|
b, n, h, d = out.shape
|
||||||
|
out = out.reshape(b, n, h * d)
|
||||||
return self.to_out(out)
|
return self.to_out(out)
|
||||||
|
|
||||||
|
|
||||||
|
@ -4,6 +4,9 @@ import torch
|
|||||||
|
|
||||||
import k_diffusion
|
import k_diffusion
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from modules import shared
|
||||||
|
|
||||||
@dataclasses.dataclass
|
@dataclasses.dataclass
|
||||||
class Scheduler:
|
class Scheduler:
|
||||||
@ -30,6 +33,33 @@ def sgm_uniform(n, sigma_min, sigma_max, inner_model, device):
|
|||||||
sigs += [0.0]
|
sigs += [0.0]
|
||||||
return torch.FloatTensor(sigs).to(device)
|
return torch.FloatTensor(sigs).to(device)
|
||||||
|
|
||||||
|
def get_align_your_steps_sigmas(n, sigma_min, sigma_max, device='cpu'):
|
||||||
|
# https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
|
||||||
|
def loglinear_interp(t_steps, num_steps):
|
||||||
|
"""
|
||||||
|
Performs log-linear interpolation of a given array of decreasing numbers.
|
||||||
|
"""
|
||||||
|
xs = np.linspace(0, 1, len(t_steps))
|
||||||
|
ys = np.log(t_steps[::-1])
|
||||||
|
|
||||||
|
new_xs = np.linspace(0, 1, num_steps)
|
||||||
|
new_ys = np.interp(new_xs, xs, ys)
|
||||||
|
|
||||||
|
interped_ys = np.exp(new_ys)[::-1].copy()
|
||||||
|
return interped_ys
|
||||||
|
|
||||||
|
if shared.sd_model.is_sdxl:
|
||||||
|
sigmas = [14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.029]
|
||||||
|
else:
|
||||||
|
# Default to SD 1.5 sigmas.
|
||||||
|
sigmas = [14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029]
|
||||||
|
|
||||||
|
if n != len(sigmas):
|
||||||
|
sigmas = np.append(loglinear_interp(sigmas, n), [0.0])
|
||||||
|
else:
|
||||||
|
sigmas.append(0.0)
|
||||||
|
|
||||||
|
return torch.FloatTensor(sigmas).to(device)
|
||||||
|
|
||||||
def kl_optimal(n, sigma_min, sigma_max, device):
|
def kl_optimal(n, sigma_min, sigma_max, device):
|
||||||
alpha_min = torch.arctan(torch.tensor(sigma_min, device=device))
|
alpha_min = torch.arctan(torch.tensor(sigma_min, device=device))
|
||||||
@ -47,6 +77,7 @@ schedulers = [
|
|||||||
Scheduler('polyexponential', 'Polyexponential', k_diffusion.sampling.get_sigmas_polyexponential, default_rho=1.0),
|
Scheduler('polyexponential', 'Polyexponential', k_diffusion.sampling.get_sigmas_polyexponential, default_rho=1.0),
|
||||||
Scheduler('sgm_uniform', 'SGM Uniform', sgm_uniform, need_inner_model=True, aliases=["SGMUniform"]),
|
Scheduler('sgm_uniform', 'SGM Uniform', sgm_uniform, need_inner_model=True, aliases=["SGMUniform"]),
|
||||||
Scheduler('kl_optimal', 'KL Optimal', kl_optimal),
|
Scheduler('kl_optimal', 'KL Optimal', kl_optimal),
|
||||||
|
Scheduler('align_your_steps', 'Align Your Steps', get_align_your_steps_sigmas),
|
||||||
]
|
]
|
||||||
|
|
||||||
schedulers_map = {**{x.name: x for x in schedulers}, **{x.label: x for x in schedulers}}
|
schedulers_map = {**{x.name: x for x in schedulers}, **{x.label: x for x in schedulers}}
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
setuptools==69.5.1 # temp fix for compatibility with some old packages
|
||||||
GitPython==3.1.32
|
GitPython==3.1.32
|
||||||
Pillow==9.5.0
|
Pillow==9.5.0
|
||||||
accelerate==0.21.0
|
accelerate==0.21.0
|
||||||
|
Loading…
Reference in New Issue
Block a user