diff --git a/CHANGELOG.md b/CHANGELOG.md
index 295d26c8c..596b1ec45 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,3 +1,8 @@
+## 1.9.4
+
+### Bug Fixes:
+* pin setuptools version to fix the startup error ([#15882](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15882))
+
## 1.9.3
### Bug Fixes:
diff --git a/extensions-builtin/Lora/networks.py b/extensions-builtin/Lora/networks.py
index 42b14dc23..18809364b 100644
--- a/extensions-builtin/Lora/networks.py
+++ b/extensions-builtin/Lora/networks.py
@@ -260,6 +260,16 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
loaded_networks.clear()
+ unavailable_networks = []
+ for name in names:
+ if name.lower() in forbidden_network_aliases and available_networks.get(name) is None:
+ unavailable_networks.append(name)
+ elif available_network_aliases.get(name) is None:
+ unavailable_networks.append(name)
+
+ if unavailable_networks:
+ update_available_networks_by_names(unavailable_networks)
+
networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names]
if any(x is None for x in networks_on_disk):
list_available_networks()
@@ -566,22 +576,16 @@ def network_MultiheadAttention_load_state_dict(self, *args, **kwargs):
return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs)
-def list_available_networks():
- available_networks.clear()
- available_network_aliases.clear()
- forbidden_network_aliases.clear()
- available_network_hash_lookup.clear()
- forbidden_network_aliases.update({"none": 1, "Addams": 1})
-
- os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
-
+def process_network_files(names: list[str] | None = None):
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir_backcompat, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
for filename in candidates:
if os.path.isdir(filename):
continue
-
name = os.path.splitext(os.path.basename(filename))[0]
+ # if names is provided, only load networks with names in the list
+ if names and name not in names:
+ continue
try:
entry = network.NetworkOnDisk(name, filename)
except OSError: # should catch FileNotFoundError and PermissionError etc.
@@ -597,6 +601,22 @@ def list_available_networks():
available_network_aliases[entry.alias] = entry
+def update_available_networks_by_names(names: list[str]):
+ process_network_files(names)
+
+
+def list_available_networks():
+ available_networks.clear()
+ available_network_aliases.clear()
+ forbidden_network_aliases.clear()
+ available_network_hash_lookup.clear()
+ forbidden_network_aliases.update({"none": 1, "Addams": 1})
+
+ os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
+
+ process_network_files()
+
+
re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")
diff --git a/javascript/ui.js b/javascript/ui.js
index e0f5feebd..16faacebb 100644
--- a/javascript/ui.js
+++ b/javascript/ui.js
@@ -337,8 +337,8 @@ onOptionsChanged(function() {
let txt2img_textarea, img2img_textarea = undefined;
function restart_reload() {
+ document.body.style.backgroundColor = "var(--background-fill-primary)";
document.body.innerHTML = '
Reloading...
';
-
var requestPing = function() {
requestGet("./internal/ping", {}, function(data) {
location.reload();
diff --git a/modules/images.py b/modules/images.py
index c0ff8a630..1be176cdf 100644
--- a/modules/images.py
+++ b/modules/images.py
@@ -653,7 +653,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
# WebP and JPG formats have maximum dimension limits of 16383 and 65535 respectively. switch to PNG which has a much higher limit
if (image.height > 65535 or image.width > 65535) and extension.lower() in ("jpg", "jpeg") or (image.height > 16383 or image.width > 16383) and extension.lower() == "webp":
print('Image dimensions too large; saving as PNG')
- extension = ".png"
+ extension = "png"
if save_to_dirs is None:
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
diff --git a/modules/processing.py b/modules/processing.py
index 76557dd7f..c22da4169 100644
--- a/modules/processing.py
+++ b/modules/processing.py
@@ -569,7 +569,7 @@ class Processed:
self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt]
self.all_seeds = all_seeds or p.all_seeds or [self.seed]
self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed]
- self.infotexts = infotexts or [info]
+ self.infotexts = infotexts or [info] * len(images_list)
self.version = program_version()
def js(self):
diff --git a/modules/sd_hijack_optimizations.py b/modules/sd_hijack_optimizations.py
index 7f9e328d0..0269f1f5b 100644
--- a/modules/sd_hijack_optimizations.py
+++ b/modules/sd_hijack_optimizations.py
@@ -486,7 +486,8 @@ def xformers_attention_forward(self, x, context=None, mask=None, **kwargs):
k_in = self.to_k(context_k)
v_in = self.to_v(context_v)
- q, k, v = (rearrange(t, 'b n (h d) -> b n h d', h=h) for t in (q_in, k_in, v_in))
+ q, k, v = (t.reshape(t.shape[0], t.shape[1], h, -1) for t in (q_in, k_in, v_in))
+
del q_in, k_in, v_in
dtype = q.dtype
@@ -497,7 +498,8 @@ def xformers_attention_forward(self, x, context=None, mask=None, **kwargs):
out = out.to(dtype)
- out = rearrange(out, 'b n h d -> b n (h d)', h=h)
+ b, n, h, d = out.shape
+ out = out.reshape(b, n, h * d)
return self.to_out(out)
diff --git a/modules/sd_schedulers.py b/modules/sd_schedulers.py
index 99a6f7be2..0c09af8d0 100644
--- a/modules/sd_schedulers.py
+++ b/modules/sd_schedulers.py
@@ -4,6 +4,9 @@ import torch
import k_diffusion
+import numpy as np
+
+from modules import shared
@dataclasses.dataclass
class Scheduler:
@@ -30,6 +33,33 @@ def sgm_uniform(n, sigma_min, sigma_max, inner_model, device):
sigs += [0.0]
return torch.FloatTensor(sigs).to(device)
+def get_align_your_steps_sigmas(n, sigma_min, sigma_max, device='cpu'):
+ # https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
+ def loglinear_interp(t_steps, num_steps):
+ """
+ Performs log-linear interpolation of a given array of decreasing numbers.
+ """
+ xs = np.linspace(0, 1, len(t_steps))
+ ys = np.log(t_steps[::-1])
+
+ new_xs = np.linspace(0, 1, num_steps)
+ new_ys = np.interp(new_xs, xs, ys)
+
+ interped_ys = np.exp(new_ys)[::-1].copy()
+ return interped_ys
+
+ if shared.sd_model.is_sdxl:
+ sigmas = [14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.029]
+ else:
+ # Default to SD 1.5 sigmas.
+ sigmas = [14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029]
+
+ if n != len(sigmas):
+ sigmas = np.append(loglinear_interp(sigmas, n), [0.0])
+ else:
+ sigmas.append(0.0)
+
+ return torch.FloatTensor(sigmas).to(device)
def kl_optimal(n, sigma_min, sigma_max, device):
alpha_min = torch.arctan(torch.tensor(sigma_min, device=device))
@@ -47,6 +77,7 @@ schedulers = [
Scheduler('polyexponential', 'Polyexponential', k_diffusion.sampling.get_sigmas_polyexponential, default_rho=1.0),
Scheduler('sgm_uniform', 'SGM Uniform', sgm_uniform, need_inner_model=True, aliases=["SGMUniform"]),
Scheduler('kl_optimal', 'KL Optimal', kl_optimal),
+ Scheduler('align_your_steps', 'Align Your Steps', get_align_your_steps_sigmas),
]
schedulers_map = {**{x.name: x for x in schedulers}, **{x.label: x for x in schedulers}}
diff --git a/requirements_versions.txt b/requirements_versions.txt
index 3df74f3d6..3037a395b 100644
--- a/requirements_versions.txt
+++ b/requirements_versions.txt
@@ -1,3 +1,4 @@
+setuptools==69.5.1 # temp fix for compatibility with some old packages
GitPython==3.1.32
Pillow==9.5.0
accelerate==0.21.0