mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2024-12-29 19:05:05 +08:00
Merge branch 'dev' into bgh-handle-metadata-issues-more-cleanly
This commit is contained in:
commit
3ef9f2748d
@ -1,3 +1,8 @@
|
|||||||
|
## 1.9.4
|
||||||
|
|
||||||
|
### Bug Fixes:
|
||||||
|
* pin setuptools version to fix the startup error ([#15882](https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/15882))
|
||||||
|
|
||||||
## 1.9.3
|
## 1.9.3
|
||||||
|
|
||||||
### Bug Fixes:
|
### Bug Fixes:
|
||||||
|
@ -40,7 +40,7 @@ model:
|
|||||||
use_spatial_transformer: True
|
use_spatial_transformer: True
|
||||||
transformer_depth: 1
|
transformer_depth: 1
|
||||||
context_dim: 768
|
context_dim: 768
|
||||||
use_checkpoint: True
|
use_checkpoint: False
|
||||||
legacy: False
|
legacy: False
|
||||||
|
|
||||||
first_stage_config:
|
first_stage_config:
|
||||||
|
@ -41,7 +41,7 @@ model:
|
|||||||
use_linear_in_transformer: True
|
use_linear_in_transformer: True
|
||||||
transformer_depth: 1
|
transformer_depth: 1
|
||||||
context_dim: 1024
|
context_dim: 1024
|
||||||
use_checkpoint: True
|
use_checkpoint: False
|
||||||
legacy: False
|
legacy: False
|
||||||
|
|
||||||
first_stage_config:
|
first_stage_config:
|
||||||
|
@ -45,7 +45,7 @@ model:
|
|||||||
use_spatial_transformer: True
|
use_spatial_transformer: True
|
||||||
transformer_depth: 1
|
transformer_depth: 1
|
||||||
context_dim: 768
|
context_dim: 768
|
||||||
use_checkpoint: True
|
use_checkpoint: False
|
||||||
legacy: False
|
legacy: False
|
||||||
|
|
||||||
first_stage_config:
|
first_stage_config:
|
||||||
|
@ -21,7 +21,7 @@ model:
|
|||||||
params:
|
params:
|
||||||
adm_in_channels: 2816
|
adm_in_channels: 2816
|
||||||
num_classes: sequential
|
num_classes: sequential
|
||||||
use_checkpoint: True
|
use_checkpoint: False
|
||||||
in_channels: 9
|
in_channels: 9
|
||||||
out_channels: 4
|
out_channels: 4
|
||||||
model_channels: 320
|
model_channels: 320
|
||||||
|
@ -40,7 +40,7 @@ model:
|
|||||||
use_spatial_transformer: True
|
use_spatial_transformer: True
|
||||||
transformer_depth: 1
|
transformer_depth: 1
|
||||||
context_dim: 768
|
context_dim: 768
|
||||||
use_checkpoint: True
|
use_checkpoint: False
|
||||||
legacy: False
|
legacy: False
|
||||||
|
|
||||||
first_stage_config:
|
first_stage_config:
|
||||||
|
@ -40,7 +40,7 @@ model:
|
|||||||
use_spatial_transformer: True
|
use_spatial_transformer: True
|
||||||
transformer_depth: 1
|
transformer_depth: 1
|
||||||
context_dim: 768
|
context_dim: 768
|
||||||
use_checkpoint: True
|
use_checkpoint: False
|
||||||
legacy: False
|
legacy: False
|
||||||
|
|
||||||
first_stage_config:
|
first_stage_config:
|
||||||
|
@ -143,6 +143,14 @@ def assign_network_names_to_compvis_modules(sd_model):
|
|||||||
sd_model.network_layer_mapping = network_layer_mapping
|
sd_model.network_layer_mapping = network_layer_mapping
|
||||||
|
|
||||||
|
|
||||||
|
class BundledTIHash(str):
|
||||||
|
def __init__(self, hash_str):
|
||||||
|
self.hash = hash_str
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return self.hash if shared.opts.lora_bundled_ti_to_infotext else ''
|
||||||
|
|
||||||
|
|
||||||
def load_network(name, network_on_disk):
|
def load_network(name, network_on_disk):
|
||||||
net = network.Network(name, network_on_disk)
|
net = network.Network(name, network_on_disk)
|
||||||
net.mtime = os.path.getmtime(network_on_disk.filename)
|
net.mtime = os.path.getmtime(network_on_disk.filename)
|
||||||
@ -229,6 +237,7 @@ def load_network(name, network_on_disk):
|
|||||||
for emb_name, data in bundle_embeddings.items():
|
for emb_name, data in bundle_embeddings.items():
|
||||||
embedding = textual_inversion.create_embedding_from_data(data, emb_name, filename=network_on_disk.filename + "/" + emb_name)
|
embedding = textual_inversion.create_embedding_from_data(data, emb_name, filename=network_on_disk.filename + "/" + emb_name)
|
||||||
embedding.loaded = None
|
embedding.loaded = None
|
||||||
|
embedding.shorthash = BundledTIHash(name)
|
||||||
embeddings[emb_name] = embedding
|
embeddings[emb_name] = embedding
|
||||||
|
|
||||||
net.bundle_embeddings = embeddings
|
net.bundle_embeddings = embeddings
|
||||||
@ -260,6 +269,16 @@ def load_networks(names, te_multipliers=None, unet_multipliers=None, dyn_dims=No
|
|||||||
|
|
||||||
loaded_networks.clear()
|
loaded_networks.clear()
|
||||||
|
|
||||||
|
unavailable_networks = []
|
||||||
|
for name in names:
|
||||||
|
if name.lower() in forbidden_network_aliases and available_networks.get(name) is None:
|
||||||
|
unavailable_networks.append(name)
|
||||||
|
elif available_network_aliases.get(name) is None:
|
||||||
|
unavailable_networks.append(name)
|
||||||
|
|
||||||
|
if unavailable_networks:
|
||||||
|
update_available_networks_by_names(unavailable_networks)
|
||||||
|
|
||||||
networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names]
|
networks_on_disk = [available_networks.get(name, None) if name.lower() in forbidden_network_aliases else available_network_aliases.get(name, None) for name in names]
|
||||||
if any(x is None for x in networks_on_disk):
|
if any(x is None for x in networks_on_disk):
|
||||||
list_available_networks()
|
list_available_networks()
|
||||||
@ -378,13 +397,18 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
|
|||||||
self.network_weights_backup = weights_backup
|
self.network_weights_backup = weights_backup
|
||||||
|
|
||||||
bias_backup = getattr(self, "network_bias_backup", None)
|
bias_backup = getattr(self, "network_bias_backup", None)
|
||||||
if bias_backup is None:
|
if bias_backup is None and wanted_names != ():
|
||||||
if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None:
|
if isinstance(self, torch.nn.MultiheadAttention) and self.out_proj.bias is not None:
|
||||||
bias_backup = self.out_proj.bias.to(devices.cpu, copy=True)
|
bias_backup = self.out_proj.bias.to(devices.cpu, copy=True)
|
||||||
elif getattr(self, 'bias', None) is not None:
|
elif getattr(self, 'bias', None) is not None:
|
||||||
bias_backup = self.bias.to(devices.cpu, copy=True)
|
bias_backup = self.bias.to(devices.cpu, copy=True)
|
||||||
else:
|
else:
|
||||||
bias_backup = None
|
bias_backup = None
|
||||||
|
|
||||||
|
# Unlike weight which always has value, some modules don't have bias.
|
||||||
|
# Only report if bias is not None and current bias are not unchanged.
|
||||||
|
if bias_backup is not None and current_names != ():
|
||||||
|
raise RuntimeError("no backup bias found and current bias are not unchanged")
|
||||||
self.network_bias_backup = bias_backup
|
self.network_bias_backup = bias_backup
|
||||||
|
|
||||||
if current_names != wanted_names:
|
if current_names != wanted_names:
|
||||||
@ -566,22 +590,16 @@ def network_MultiheadAttention_load_state_dict(self, *args, **kwargs):
|
|||||||
return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs)
|
return originals.MultiheadAttention_load_state_dict(self, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def list_available_networks():
|
def process_network_files(names: list[str] | None = None):
|
||||||
available_networks.clear()
|
|
||||||
available_network_aliases.clear()
|
|
||||||
forbidden_network_aliases.clear()
|
|
||||||
available_network_hash_lookup.clear()
|
|
||||||
forbidden_network_aliases.update({"none": 1, "Addams": 1})
|
|
||||||
|
|
||||||
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
|
|
||||||
|
|
||||||
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
candidates = list(shared.walk_files(shared.cmd_opts.lora_dir, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
||||||
candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir_backcompat, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
candidates += list(shared.walk_files(shared.cmd_opts.lyco_dir_backcompat, allowed_extensions=[".pt", ".ckpt", ".safetensors"]))
|
||||||
for filename in candidates:
|
for filename in candidates:
|
||||||
if os.path.isdir(filename):
|
if os.path.isdir(filename):
|
||||||
continue
|
continue
|
||||||
|
|
||||||
name = os.path.splitext(os.path.basename(filename))[0]
|
name = os.path.splitext(os.path.basename(filename))[0]
|
||||||
|
# if names is provided, only load networks with names in the list
|
||||||
|
if names and name not in names:
|
||||||
|
continue
|
||||||
try:
|
try:
|
||||||
entry = network.NetworkOnDisk(name, filename)
|
entry = network.NetworkOnDisk(name, filename)
|
||||||
except OSError: # should catch FileNotFoundError and PermissionError etc.
|
except OSError: # should catch FileNotFoundError and PermissionError etc.
|
||||||
@ -597,6 +615,22 @@ def list_available_networks():
|
|||||||
available_network_aliases[entry.alias] = entry
|
available_network_aliases[entry.alias] = entry
|
||||||
|
|
||||||
|
|
||||||
|
def update_available_networks_by_names(names: list[str]):
|
||||||
|
process_network_files(names)
|
||||||
|
|
||||||
|
|
||||||
|
def list_available_networks():
|
||||||
|
available_networks.clear()
|
||||||
|
available_network_aliases.clear()
|
||||||
|
forbidden_network_aliases.clear()
|
||||||
|
available_network_hash_lookup.clear()
|
||||||
|
forbidden_network_aliases.update({"none": 1, "Addams": 1})
|
||||||
|
|
||||||
|
os.makedirs(shared.cmd_opts.lora_dir, exist_ok=True)
|
||||||
|
|
||||||
|
process_network_files()
|
||||||
|
|
||||||
|
|
||||||
re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")
|
re_network_name = re.compile(r"(.*)\s*\([0-9a-fA-F]+\)")
|
||||||
|
|
||||||
|
|
||||||
|
@ -36,6 +36,7 @@ shared.options_templates.update(shared.options_section(('extra_networks', "Extra
|
|||||||
"sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks),
|
"sd_lora": shared.OptionInfo("None", "Add network to prompt", gr.Dropdown, lambda: {"choices": ["None", *networks.available_networks]}, refresh=networks.list_available_networks),
|
||||||
"lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
|
"lora_preferred_name": shared.OptionInfo("Alias from file", "When adding to prompt, refer to Lora by", gr.Radio, {"choices": ["Alias from file", "Filename"]}),
|
||||||
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
|
"lora_add_hashes_to_infotext": shared.OptionInfo(True, "Add Lora hashes to infotext"),
|
||||||
|
"lora_bundled_ti_to_infotext": shared.OptionInfo(True, "Add Lora name as TI hashes for bundled Textual Inversion").info('"Add Textual Inversion hashes to infotext" needs to be enabled'),
|
||||||
"lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
|
"lora_show_all": shared.OptionInfo(False, "Always show all networks on the Lora page").info("otherwise, those detected as for incompatible version of Stable Diffusion will be hidden"),
|
||||||
"lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
|
"lora_hide_unknown_for_versions": shared.OptionInfo([], "Hide networks of unknown versions for model versions", gr.CheckboxGroup, {"choices": ["SD1", "SD2", "SDXL"]}),
|
||||||
"lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}),
|
"lora_in_memory_limit": shared.OptionInfo(0, "Number of Lora networks to keep cached in memory", gr.Number, {"precision": 0}),
|
||||||
|
@ -60,7 +60,7 @@ class ExtraNetworksPageLora(ui_extra_networks.ExtraNetworksPage):
|
|||||||
else:
|
else:
|
||||||
sd_version = lora_on_disk.sd_version
|
sd_version = lora_on_disk.sd_version
|
||||||
|
|
||||||
if shared.opts.lora_show_all or not enable_filter:
|
if shared.opts.lora_show_all or not enable_filter or not shared.sd_model:
|
||||||
pass
|
pass
|
||||||
elif sd_version == network.SdVersion.Unknown:
|
elif sd_version == network.SdVersion.Unknown:
|
||||||
model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1
|
model_version = network.SdVersion.SDXL if shared.sd_model.is_sdxl else network.SdVersion.SD2 if shared.sd_model.is_sd2 else network.SdVersion.SD1
|
||||||
|
@ -1,6 +1,5 @@
|
|||||||
import hypertile
|
import hypertile
|
||||||
from modules import scripts, script_callbacks, shared
|
from modules import scripts, script_callbacks, shared
|
||||||
from scripts.hypertile_xyz import add_axis_options
|
|
||||||
|
|
||||||
|
|
||||||
class ScriptHypertile(scripts.Script):
|
class ScriptHypertile(scripts.Script):
|
||||||
@ -93,7 +92,6 @@ def on_ui_settings():
|
|||||||
"hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile U-Net max depth").info("larger = more neural network layers affected; minor effect on performance"),
|
"hypertile_max_depth_unet": shared.OptionInfo(3, "Hypertile U-Net max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile U-Net max depth").info("larger = more neural network layers affected; minor effect on performance"),
|
||||||
"hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-Net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile U-Net max tile size").info("larger = worse performance"),
|
"hypertile_max_tile_unet": shared.OptionInfo(256, "Hypertile U-Net max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile U-Net max tile size").info("larger = worse performance"),
|
||||||
"hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-Net swap size", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, infotext="Hypertile U-Net swap size"),
|
"hypertile_swap_size_unet": shared.OptionInfo(3, "Hypertile U-Net swap size", gr.Slider, {"minimum": 0, "maximum": 64, "step": 1}, infotext="Hypertile U-Net swap size"),
|
||||||
|
|
||||||
"hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE", infotext="Hypertile VAE").info("minimal change in the generated picture"),
|
"hypertile_enable_vae": shared.OptionInfo(False, "Enable Hypertile VAE", infotext="Hypertile VAE").info("minimal change in the generated picture"),
|
||||||
"hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile VAE max depth"),
|
"hypertile_max_depth_vae": shared.OptionInfo(3, "Hypertile VAE max depth", gr.Slider, {"minimum": 0, "maximum": 3, "step": 1}, infotext="Hypertile VAE max depth"),
|
||||||
"hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile VAE max tile size"),
|
"hypertile_max_tile_vae": shared.OptionInfo(128, "Hypertile VAE max tile size", gr.Slider, {"minimum": 0, "maximum": 512, "step": 16}, infotext="Hypertile VAE max tile size"),
|
||||||
@ -105,5 +103,20 @@ def on_ui_settings():
|
|||||||
shared.opts.add_option(name, opt)
|
shared.opts.add_option(name, opt)
|
||||||
|
|
||||||
|
|
||||||
|
def add_axis_options():
|
||||||
|
xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module
|
||||||
|
xyz_grid.axis_options.extend([
|
||||||
|
xyz_grid.AxisOption("[Hypertile] Unet First pass Enabled", str, xyz_grid.apply_override('hypertile_enable_unet', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)),
|
||||||
|
xyz_grid.AxisOption("[Hypertile] Unet Second pass Enabled", str, xyz_grid.apply_override('hypertile_enable_unet_secondpass', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)),
|
||||||
|
xyz_grid.AxisOption("[Hypertile] Unet Max Depth", int, xyz_grid.apply_override("hypertile_max_depth_unet"), confirm=xyz_grid.confirm_range(0, 3, '[Hypertile] Unet Max Depth'), choices=lambda: [str(x) for x in range(4)]),
|
||||||
|
xyz_grid.AxisOption("[Hypertile] Unet Max Tile Size", int, xyz_grid.apply_override("hypertile_max_tile_unet"), confirm=xyz_grid.confirm_range(0, 512, '[Hypertile] Unet Max Tile Size')),
|
||||||
|
xyz_grid.AxisOption("[Hypertile] Unet Swap Size", int, xyz_grid.apply_override("hypertile_swap_size_unet"), confirm=xyz_grid.confirm_range(0, 64, '[Hypertile] Unet Swap Size')),
|
||||||
|
xyz_grid.AxisOption("[Hypertile] VAE Enabled", str, xyz_grid.apply_override('hypertile_enable_vae', boolean=True), choices=xyz_grid.boolean_choice(reverse=True)),
|
||||||
|
xyz_grid.AxisOption("[Hypertile] VAE Max Depth", int, xyz_grid.apply_override("hypertile_max_depth_vae"), confirm=xyz_grid.confirm_range(0, 3, '[Hypertile] VAE Max Depth'), choices=lambda: [str(x) for x in range(4)]),
|
||||||
|
xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, xyz_grid.apply_override("hypertile_max_tile_vae"), confirm=xyz_grid.confirm_range(0, 512, '[Hypertile] VAE Max Tile Size')),
|
||||||
|
xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, xyz_grid.apply_override("hypertile_swap_size_vae"), confirm=xyz_grid.confirm_range(0, 64, '[Hypertile] VAE Swap Size')),
|
||||||
|
])
|
||||||
|
|
||||||
|
|
||||||
script_callbacks.on_ui_settings(on_ui_settings)
|
script_callbacks.on_ui_settings(on_ui_settings)
|
||||||
script_callbacks.on_before_ui(add_axis_options)
|
script_callbacks.on_before_ui(add_axis_options)
|
||||||
|
@ -1,51 +0,0 @@
|
|||||||
from modules import scripts
|
|
||||||
from modules.shared import opts
|
|
||||||
|
|
||||||
xyz_grid = [x for x in scripts.scripts_data if x.script_class.__module__ == "xyz_grid.py"][0].module
|
|
||||||
|
|
||||||
def int_applier(value_name:str, min_range:int = -1, max_range:int = -1):
|
|
||||||
"""
|
|
||||||
Returns a function that applies the given value to the given value_name in opts.data.
|
|
||||||
"""
|
|
||||||
def validate(value_name:str, value:str):
|
|
||||||
value = int(value)
|
|
||||||
# validate value
|
|
||||||
if not min_range == -1:
|
|
||||||
assert value >= min_range, f"Value {value} for {value_name} must be greater than or equal to {min_range}"
|
|
||||||
if not max_range == -1:
|
|
||||||
assert value <= max_range, f"Value {value} for {value_name} must be less than or equal to {max_range}"
|
|
||||||
def apply_int(p, x, xs):
|
|
||||||
validate(value_name, x)
|
|
||||||
opts.data[value_name] = int(x)
|
|
||||||
return apply_int
|
|
||||||
|
|
||||||
def bool_applier(value_name:str):
|
|
||||||
"""
|
|
||||||
Returns a function that applies the given value to the given value_name in opts.data.
|
|
||||||
"""
|
|
||||||
def validate(value_name:str, value:str):
|
|
||||||
assert value.lower() in ["true", "false"], f"Value {value} for {value_name} must be either true or false"
|
|
||||||
def apply_bool(p, x, xs):
|
|
||||||
validate(value_name, x)
|
|
||||||
value_boolean = x.lower() == "true"
|
|
||||||
opts.data[value_name] = value_boolean
|
|
||||||
return apply_bool
|
|
||||||
|
|
||||||
def add_axis_options():
|
|
||||||
extra_axis_options = [
|
|
||||||
xyz_grid.AxisOption("[Hypertile] Unet First pass Enabled", str, bool_applier("hypertile_enable_unet"), choices=xyz_grid.boolean_choice(reverse=True)),
|
|
||||||
xyz_grid.AxisOption("[Hypertile] Unet Second pass Enabled", str, bool_applier("hypertile_enable_unet_secondpass"), choices=xyz_grid.boolean_choice(reverse=True)),
|
|
||||||
xyz_grid.AxisOption("[Hypertile] Unet Max Depth", int, int_applier("hypertile_max_depth_unet", 0, 3), choices=lambda: [str(x) for x in range(4)]),
|
|
||||||
xyz_grid.AxisOption("[Hypertile] Unet Max Tile Size", int, int_applier("hypertile_max_tile_unet", 0, 512)),
|
|
||||||
xyz_grid.AxisOption("[Hypertile] Unet Swap Size", int, int_applier("hypertile_swap_size_unet", 0, 64)),
|
|
||||||
xyz_grid.AxisOption("[Hypertile] VAE Enabled", str, bool_applier("hypertile_enable_vae"), choices=xyz_grid.boolean_choice(reverse=True)),
|
|
||||||
xyz_grid.AxisOption("[Hypertile] VAE Max Depth", int, int_applier("hypertile_max_depth_vae", 0, 3), choices=lambda: [str(x) for x in range(4)]),
|
|
||||||
xyz_grid.AxisOption("[Hypertile] VAE Max Tile Size", int, int_applier("hypertile_max_tile_vae", 0, 512)),
|
|
||||||
xyz_grid.AxisOption("[Hypertile] VAE Swap Size", int, int_applier("hypertile_swap_size_vae", 0, 64)),
|
|
||||||
]
|
|
||||||
set_a = {opt.label for opt in xyz_grid.axis_options}
|
|
||||||
set_b = {opt.label for opt in extra_axis_options}
|
|
||||||
if set_a.intersection(set_b):
|
|
||||||
return
|
|
||||||
|
|
||||||
xyz_grid.axis_options.extend(extra_axis_options)
|
|
@ -3,6 +3,7 @@ import gradio as gr
|
|||||||
import math
|
import math
|
||||||
from modules.ui_components import InputAccordion
|
from modules.ui_components import InputAccordion
|
||||||
import modules.scripts as scripts
|
import modules.scripts as scripts
|
||||||
|
from modules.torch_utils import float64
|
||||||
|
|
||||||
|
|
||||||
class SoftInpaintingSettings:
|
class SoftInpaintingSettings:
|
||||||
@ -79,13 +80,11 @@ def latent_blend(settings, a, b, t):
|
|||||||
|
|
||||||
# Calculate the magnitude of the interpolated vectors. (We will remove this magnitude.)
|
# Calculate the magnitude of the interpolated vectors. (We will remove this magnitude.)
|
||||||
# 64-bit operations are used here to allow large exponents.
|
# 64-bit operations are used here to allow large exponents.
|
||||||
current_magnitude = torch.norm(image_interp, p=2, dim=1, keepdim=True).to(torch.float64).add_(0.00001)
|
current_magnitude = torch.norm(image_interp, p=2, dim=1, keepdim=True).to(float64(image_interp)).add_(0.00001)
|
||||||
|
|
||||||
# Interpolate the powered magnitudes, then un-power them (bring them back to a power of 1).
|
# Interpolate the powered magnitudes, then un-power them (bring them back to a power of 1).
|
||||||
a_magnitude = torch.norm(a, p=2, dim=1, keepdim=True).to(torch.float64).pow_(
|
a_magnitude = torch.norm(a, p=2, dim=1, keepdim=True).to(float64(a)).pow_(settings.inpaint_detail_preservation) * one_minus_t3
|
||||||
settings.inpaint_detail_preservation) * one_minus_t3
|
b_magnitude = torch.norm(b, p=2, dim=1, keepdim=True).to(float64(b)).pow_(settings.inpaint_detail_preservation) * t3
|
||||||
b_magnitude = torch.norm(b, p=2, dim=1, keepdim=True).to(torch.float64).pow_(
|
|
||||||
settings.inpaint_detail_preservation) * t3
|
|
||||||
desired_magnitude = a_magnitude
|
desired_magnitude = a_magnitude
|
||||||
desired_magnitude.add_(b_magnitude).pow_(1 / settings.inpaint_detail_preservation)
|
desired_magnitude.add_(b_magnitude).pow_(1 / settings.inpaint_detail_preservation)
|
||||||
del a_magnitude, b_magnitude, t3, one_minus_t3
|
del a_magnitude, b_magnitude, t3, one_minus_t3
|
||||||
|
@ -8,9 +8,6 @@ var contextMenuInit = function() {
|
|||||||
};
|
};
|
||||||
|
|
||||||
function showContextMenu(event, element, menuEntries) {
|
function showContextMenu(event, element, menuEntries) {
|
||||||
let posx = event.clientX + document.body.scrollLeft + document.documentElement.scrollLeft;
|
|
||||||
let posy = event.clientY + document.body.scrollTop + document.documentElement.scrollTop;
|
|
||||||
|
|
||||||
let oldMenu = gradioApp().querySelector('#context-menu');
|
let oldMenu = gradioApp().querySelector('#context-menu');
|
||||||
if (oldMenu) {
|
if (oldMenu) {
|
||||||
oldMenu.remove();
|
oldMenu.remove();
|
||||||
@ -23,10 +20,8 @@ var contextMenuInit = function() {
|
|||||||
contextMenu.style.background = baseStyle.background;
|
contextMenu.style.background = baseStyle.background;
|
||||||
contextMenu.style.color = baseStyle.color;
|
contextMenu.style.color = baseStyle.color;
|
||||||
contextMenu.style.fontFamily = baseStyle.fontFamily;
|
contextMenu.style.fontFamily = baseStyle.fontFamily;
|
||||||
contextMenu.style.top = posy + 'px';
|
contextMenu.style.top = event.pageY + 'px';
|
||||||
contextMenu.style.left = posx + 'px';
|
contextMenu.style.left = event.pageX + 'px';
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
const contextMenuList = document.createElement('ul');
|
const contextMenuList = document.createElement('ul');
|
||||||
contextMenuList.className = 'context-menu-items';
|
contextMenuList.className = 'context-menu-items';
|
||||||
@ -43,21 +38,6 @@ var contextMenuInit = function() {
|
|||||||
});
|
});
|
||||||
|
|
||||||
gradioApp().appendChild(contextMenu);
|
gradioApp().appendChild(contextMenu);
|
||||||
|
|
||||||
let menuWidth = contextMenu.offsetWidth + 4;
|
|
||||||
let menuHeight = contextMenu.offsetHeight + 4;
|
|
||||||
|
|
||||||
let windowWidth = window.innerWidth;
|
|
||||||
let windowHeight = window.innerHeight;
|
|
||||||
|
|
||||||
if ((windowWidth - posx) < menuWidth) {
|
|
||||||
contextMenu.style.left = windowWidth - menuWidth + "px";
|
|
||||||
}
|
|
||||||
|
|
||||||
if ((windowHeight - posy) < menuHeight) {
|
|
||||||
contextMenu.style.top = windowHeight - menuHeight + "px";
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
function appendContextMenuOption(targetElementSelector, entryName, entryFunction) {
|
function appendContextMenuOption(targetElementSelector, entryName, entryFunction) {
|
||||||
@ -107,16 +87,23 @@ var contextMenuInit = function() {
|
|||||||
oldMenu.remove();
|
oldMenu.remove();
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
gradioApp().addEventListener("contextmenu", function(e) {
|
['contextmenu', 'touchstart'].forEach((eventType) => {
|
||||||
let oldMenu = gradioApp().querySelector('#context-menu');
|
gradioApp().addEventListener(eventType, function(e) {
|
||||||
if (oldMenu) {
|
let ev = e;
|
||||||
oldMenu.remove();
|
if (eventType.startsWith('touch')) {
|
||||||
}
|
if (e.touches.length !== 2) return;
|
||||||
menuSpecs.forEach(function(v, k) {
|
ev = e.touches[0];
|
||||||
if (e.composedPath()[0].matches(k)) {
|
|
||||||
showContextMenu(e, e.composedPath()[0], v);
|
|
||||||
e.preventDefault();
|
|
||||||
}
|
}
|
||||||
|
let oldMenu = gradioApp().querySelector('#context-menu');
|
||||||
|
if (oldMenu) {
|
||||||
|
oldMenu.remove();
|
||||||
|
}
|
||||||
|
menuSpecs.forEach(function(v, k) {
|
||||||
|
if (e.composedPath()[0].matches(k)) {
|
||||||
|
showContextMenu(ev, e.composedPath()[0], v);
|
||||||
|
e.preventDefault();
|
||||||
|
}
|
||||||
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
eventListenerApplied = true;
|
eventListenerApplied = true;
|
||||||
|
11
javascript/dragdrop.js
vendored
11
javascript/dragdrop.js
vendored
@ -56,6 +56,15 @@ function eventHasFiles(e) {
|
|||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
function isURL(url) {
|
||||||
|
try {
|
||||||
|
const _ = new URL(url);
|
||||||
|
return true;
|
||||||
|
} catch {
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
function dragDropTargetIsPrompt(target) {
|
function dragDropTargetIsPrompt(target) {
|
||||||
if (target?.placeholder && target?.placeholder.indexOf("Prompt") >= 0) return true;
|
if (target?.placeholder && target?.placeholder.indexOf("Prompt") >= 0) return true;
|
||||||
if (target?.parentNode?.parentNode?.className?.indexOf("prompt") > 0) return true;
|
if (target?.parentNode?.parentNode?.className?.indexOf("prompt") > 0) return true;
|
||||||
@ -77,7 +86,7 @@ window.document.addEventListener('dragover', e => {
|
|||||||
window.document.addEventListener('drop', async e => {
|
window.document.addEventListener('drop', async e => {
|
||||||
const target = e.composedPath()[0];
|
const target = e.composedPath()[0];
|
||||||
const url = e.dataTransfer.getData('text/uri-list') || e.dataTransfer.getData('text/plain');
|
const url = e.dataTransfer.getData('text/uri-list') || e.dataTransfer.getData('text/plain');
|
||||||
if (!eventHasFiles(e) && !url) return;
|
if (!eventHasFiles(e) && !isURL(url)) return;
|
||||||
|
|
||||||
if (dragDropTargetIsPrompt(target)) {
|
if (dragDropTargetIsPrompt(target)) {
|
||||||
e.stopPropagation();
|
e.stopPropagation();
|
||||||
|
@ -51,14 +51,7 @@ function modalImageSwitch(offset) {
|
|||||||
var galleryButtons = all_gallery_buttons();
|
var galleryButtons = all_gallery_buttons();
|
||||||
|
|
||||||
if (galleryButtons.length > 1) {
|
if (galleryButtons.length > 1) {
|
||||||
var currentButton = selected_gallery_button();
|
var result = selected_gallery_index();
|
||||||
|
|
||||||
var result = -1;
|
|
||||||
galleryButtons.forEach(function(v, i) {
|
|
||||||
if (v == currentButton) {
|
|
||||||
result = i;
|
|
||||||
}
|
|
||||||
});
|
|
||||||
|
|
||||||
if (result != -1) {
|
if (result != -1) {
|
||||||
var nextButton = galleryButtons[negmod((result + offset), galleryButtons.length)];
|
var nextButton = galleryButtons[negmod((result + offset), galleryButtons.length)];
|
||||||
|
@ -337,8 +337,8 @@ onOptionsChanged(function() {
|
|||||||
let txt2img_textarea, img2img_textarea = undefined;
|
let txt2img_textarea, img2img_textarea = undefined;
|
||||||
|
|
||||||
function restart_reload() {
|
function restart_reload() {
|
||||||
|
document.body.style.backgroundColor = "var(--background-fill-primary)";
|
||||||
document.body.innerHTML = '<h1 style="font-family:monospace;margin-top:20%;color:lightgray;text-align:center;">Reloading...</h1>';
|
document.body.innerHTML = '<h1 style="font-family:monospace;margin-top:20%;color:lightgray;text-align:center;">Reloading...</h1>';
|
||||||
|
|
||||||
var requestPing = function() {
|
var requestPing = function() {
|
||||||
requestGet("./internal/ping", {}, function(data) {
|
requestGet("./internal/ping", {}, function(data) {
|
||||||
location.reload();
|
location.reload();
|
||||||
|
@ -438,15 +438,19 @@ class Api:
|
|||||||
self.apply_infotext(txt2imgreq, "txt2img", script_runner=script_runner, mentioned_script_args=infotext_script_args)
|
self.apply_infotext(txt2imgreq, "txt2img", script_runner=script_runner, mentioned_script_args=infotext_script_args)
|
||||||
|
|
||||||
selectable_scripts, selectable_script_idx = self.get_selectable_script(txt2imgreq.script_name, script_runner)
|
selectable_scripts, selectable_script_idx = self.get_selectable_script(txt2imgreq.script_name, script_runner)
|
||||||
|
sampler, scheduler = sd_samplers.get_sampler_and_scheduler(txt2imgreq.sampler_name or txt2imgreq.sampler_index, txt2imgreq.scheduler)
|
||||||
|
|
||||||
populate = txt2imgreq.copy(update={ # Override __init__ params
|
populate = txt2imgreq.copy(update={ # Override __init__ params
|
||||||
"sampler_name": validate_sampler_name(txt2imgreq.sampler_name or txt2imgreq.sampler_index),
|
"sampler_name": validate_sampler_name(sampler),
|
||||||
"do_not_save_samples": not txt2imgreq.save_images,
|
"do_not_save_samples": not txt2imgreq.save_images,
|
||||||
"do_not_save_grid": not txt2imgreq.save_images,
|
"do_not_save_grid": not txt2imgreq.save_images,
|
||||||
})
|
})
|
||||||
if populate.sampler_name:
|
if populate.sampler_name:
|
||||||
populate.sampler_index = None # prevent a warning later on
|
populate.sampler_index = None # prevent a warning later on
|
||||||
|
|
||||||
|
if not populate.scheduler and scheduler != "Automatic":
|
||||||
|
populate.scheduler = scheduler
|
||||||
|
|
||||||
args = vars(populate)
|
args = vars(populate)
|
||||||
args.pop('script_name', None)
|
args.pop('script_name', None)
|
||||||
args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them
|
args.pop('script_args', None) # will refeed them to the pipeline directly after initializing them
|
||||||
@ -502,9 +506,10 @@ class Api:
|
|||||||
self.apply_infotext(img2imgreq, "img2img", script_runner=script_runner, mentioned_script_args=infotext_script_args)
|
self.apply_infotext(img2imgreq, "img2img", script_runner=script_runner, mentioned_script_args=infotext_script_args)
|
||||||
|
|
||||||
selectable_scripts, selectable_script_idx = self.get_selectable_script(img2imgreq.script_name, script_runner)
|
selectable_scripts, selectable_script_idx = self.get_selectable_script(img2imgreq.script_name, script_runner)
|
||||||
|
sampler, scheduler = sd_samplers.get_sampler_and_scheduler(img2imgreq.sampler_name or img2imgreq.sampler_index, img2imgreq.scheduler)
|
||||||
|
|
||||||
populate = img2imgreq.copy(update={ # Override __init__ params
|
populate = img2imgreq.copy(update={ # Override __init__ params
|
||||||
"sampler_name": validate_sampler_name(img2imgreq.sampler_name or img2imgreq.sampler_index),
|
"sampler_name": validate_sampler_name(sampler),
|
||||||
"do_not_save_samples": not img2imgreq.save_images,
|
"do_not_save_samples": not img2imgreq.save_images,
|
||||||
"do_not_save_grid": not img2imgreq.save_images,
|
"do_not_save_grid": not img2imgreq.save_images,
|
||||||
"mask": mask,
|
"mask": mask,
|
||||||
@ -512,6 +517,9 @@ class Api:
|
|||||||
if populate.sampler_name:
|
if populate.sampler_name:
|
||||||
populate.sampler_index = None # prevent a warning later on
|
populate.sampler_index = None # prevent a warning later on
|
||||||
|
|
||||||
|
if not populate.scheduler and scheduler != "Automatic":
|
||||||
|
populate.scheduler = scheduler
|
||||||
|
|
||||||
args = vars(populate)
|
args = vars(populate)
|
||||||
args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine.
|
args.pop('include_init_images', None) # this is meant to be done by "exclude": True in model, but it's for a reason that I cannot determine.
|
||||||
args.pop('script_name', None)
|
args.pop('script_name', None)
|
||||||
|
@ -20,6 +20,7 @@ parser.add_argument("--dump-sysinfo", action='store_true', help="launch.py argum
|
|||||||
parser.add_argument("--loglevel", type=str, help="log level; one of: CRITICAL, ERROR, WARNING, INFO, DEBUG", default=None)
|
parser.add_argument("--loglevel", type=str, help="log level; one of: CRITICAL, ERROR, WARNING, INFO, DEBUG", default=None)
|
||||||
parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint")
|
parser.add_argument("--do-not-download-clip", action='store_true', help="do not download CLIP model even if it's not included in the checkpoint")
|
||||||
parser.add_argument("--data-dir", type=normalized_filepath, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored")
|
parser.add_argument("--data-dir", type=normalized_filepath, default=os.path.dirname(os.path.dirname(os.path.realpath(__file__))), help="base path where all user data is stored")
|
||||||
|
parser.add_argument("--models-dir", type=normalized_filepath, default=None, help="base path where models are stored; overrides --data-dir")
|
||||||
parser.add_argument("--config", type=normalized_filepath, default=sd_default_config, help="path to config which constructs model",)
|
parser.add_argument("--config", type=normalized_filepath, default=sd_default_config, help="path to config which constructs model",)
|
||||||
parser.add_argument("--ckpt", type=normalized_filepath, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
|
parser.add_argument("--ckpt", type=normalized_filepath, default=sd_model_file, help="path to checkpoint of stable diffusion model; if specified, this checkpoint will be added to the list of checkpoints and loaded",)
|
||||||
parser.add_argument("--ckpt-dir", type=normalized_filepath, default=None, help="Path to directory with stable diffusion checkpoints")
|
parser.add_argument("--ckpt-dir", type=normalized_filepath, default=None, help="Path to directory with stable diffusion checkpoints")
|
||||||
@ -41,7 +42,7 @@ parser.add_argument("--lowvram", action='store_true', help="enable stable diffus
|
|||||||
parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
|
parser.add_argument("--lowram", action='store_true', help="load stable diffusion checkpoint weights to VRAM instead of RAM")
|
||||||
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="does not do anything")
|
parser.add_argument("--always-batch-cond-uncond", action='store_true', help="does not do anything")
|
||||||
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
|
parser.add_argument("--unload-gfpgan", action='store_true', help="does not do anything.")
|
||||||
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "autocast"], default="autocast")
|
parser.add_argument("--precision", type=str, help="evaluate at this precision", choices=["full", "half", "autocast"], default="autocast")
|
||||||
parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.")
|
parser.add_argument("--upcast-sampling", action='store_true', help="upcast sampling. No effect with --no-half. Usually produces similar results to --no-half with better performance while using less memory.")
|
||||||
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
|
parser.add_argument("--share", action='store_true', help="use share=True for gradio and make the UI accessible through their site")
|
||||||
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
|
parser.add_argument("--ngrok", type=str, help="ngrok authtoken, alternative to gradio --share", default=None)
|
||||||
|
@ -114,6 +114,9 @@ errors.run(enable_tf32, "Enabling TF32")
|
|||||||
|
|
||||||
cpu: torch.device = torch.device("cpu")
|
cpu: torch.device = torch.device("cpu")
|
||||||
fp8: bool = False
|
fp8: bool = False
|
||||||
|
# Force fp16 for all models in inference. No casting during inference.
|
||||||
|
# This flag is controlled by "--precision half" command line arg.
|
||||||
|
force_fp16: bool = False
|
||||||
device: torch.device = None
|
device: torch.device = None
|
||||||
device_interrogate: torch.device = None
|
device_interrogate: torch.device = None
|
||||||
device_gfpgan: torch.device = None
|
device_gfpgan: torch.device = None
|
||||||
@ -127,6 +130,8 @@ unet_needs_upcast = False
|
|||||||
|
|
||||||
|
|
||||||
def cond_cast_unet(input):
|
def cond_cast_unet(input):
|
||||||
|
if force_fp16:
|
||||||
|
return input.to(torch.float16)
|
||||||
return input.to(dtype_unet) if unet_needs_upcast else input
|
return input.to(dtype_unet) if unet_needs_upcast else input
|
||||||
|
|
||||||
|
|
||||||
@ -206,6 +211,11 @@ def autocast(disable=False):
|
|||||||
if disable:
|
if disable:
|
||||||
return contextlib.nullcontext()
|
return contextlib.nullcontext()
|
||||||
|
|
||||||
|
if force_fp16:
|
||||||
|
# No casting during inference if force_fp16 is enabled.
|
||||||
|
# All tensor dtype conversion happens before inference.
|
||||||
|
return contextlib.nullcontext()
|
||||||
|
|
||||||
if fp8 and device==cpu:
|
if fp8 and device==cpu:
|
||||||
return torch.autocast("cpu", dtype=torch.bfloat16, enabled=True)
|
return torch.autocast("cpu", dtype=torch.bfloat16, enabled=True)
|
||||||
|
|
||||||
@ -269,3 +279,17 @@ def first_time_calculation():
|
|||||||
x = torch.zeros((1, 1, 3, 3)).to(device, dtype)
|
x = torch.zeros((1, 1, 3, 3)).to(device, dtype)
|
||||||
conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype)
|
conv2d = torch.nn.Conv2d(1, 1, (3, 3)).to(device, dtype)
|
||||||
conv2d(x)
|
conv2d(x)
|
||||||
|
|
||||||
|
|
||||||
|
def force_model_fp16():
|
||||||
|
"""
|
||||||
|
ldm and sgm has modules.diffusionmodules.util.GroupNorm32.forward, which
|
||||||
|
force conversion of input to float32. If force_fp16 is enabled, we need to
|
||||||
|
prevent this casting.
|
||||||
|
"""
|
||||||
|
assert force_fp16
|
||||||
|
import sgm.modules.diffusionmodules.util as sgm_util
|
||||||
|
import ldm.modules.diffusionmodules.util as ldm_util
|
||||||
|
sgm_util.GroupNorm32 = torch.nn.GroupNorm
|
||||||
|
ldm_util.GroupNorm32 = torch.nn.GroupNorm
|
||||||
|
print("ldm/sgm GroupNorm32 replaced with normal torch.nn.GroupNorm due to `--precision half`.")
|
||||||
|
@ -191,8 +191,9 @@ class Extension:
|
|||||||
|
|
||||||
def check_updates(self):
|
def check_updates(self):
|
||||||
repo = Repo(self.path)
|
repo = Repo(self.path)
|
||||||
|
branch_name = f'{repo.remote().name}/{self.branch}'
|
||||||
for fetch in repo.remote().fetch(dry_run=True):
|
for fetch in repo.remote().fetch(dry_run=True):
|
||||||
if self.branch and fetch.name != f'{repo.remote().name}/{self.branch}':
|
if self.branch and fetch.name != branch_name:
|
||||||
continue
|
continue
|
||||||
if fetch.flags != fetch.HEAD_UPTODATE:
|
if fetch.flags != fetch.HEAD_UPTODATE:
|
||||||
self.can_update = True
|
self.can_update = True
|
||||||
@ -200,7 +201,7 @@ class Extension:
|
|||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
origin = repo.rev_parse('origin')
|
origin = repo.rev_parse(branch_name)
|
||||||
if repo.head.commit != origin:
|
if repo.head.commit != origin:
|
||||||
self.can_update = True
|
self.can_update = True
|
||||||
self.status = "behind HEAD"
|
self.status = "behind HEAD"
|
||||||
@ -213,8 +214,10 @@ class Extension:
|
|||||||
self.can_update = False
|
self.can_update = False
|
||||||
self.status = "latest"
|
self.status = "latest"
|
||||||
|
|
||||||
def fetch_and_reset_hard(self, commit='origin'):
|
def fetch_and_reset_hard(self, commit=None):
|
||||||
repo = Repo(self.path)
|
repo = Repo(self.path)
|
||||||
|
if commit is None:
|
||||||
|
commit = f'{repo.remote().name}/{self.branch}'
|
||||||
# Fix: `error: Your local changes to the following files would be overwritten by merge`,
|
# Fix: `error: Your local changes to the following files would be overwritten by merge`,
|
||||||
# because WSL2 Docker set 755 file permissions instead of 644, this results to the error.
|
# because WSL2 Docker set 755 file permissions instead of 644, this results to the error.
|
||||||
repo.git.fetch(all=True)
|
repo.git.fetch(all=True)
|
||||||
|
@ -653,7 +653,7 @@ def save_image(image, path, basename, seed=None, prompt=None, extension='png', i
|
|||||||
# WebP and JPG formats have maximum dimension limits of 16383 and 65535 respectively. switch to PNG which has a much higher limit
|
# WebP and JPG formats have maximum dimension limits of 16383 and 65535 respectively. switch to PNG which has a much higher limit
|
||||||
if (image.height > 65535 or image.width > 65535) and extension.lower() in ("jpg", "jpeg") or (image.height > 16383 or image.width > 16383) and extension.lower() == "webp":
|
if (image.height > 65535 or image.width > 65535) and extension.lower() in ("jpg", "jpeg") or (image.height > 16383 or image.width > 16383) and extension.lower() == "webp":
|
||||||
print('Image dimensions too large; saving as PNG')
|
print('Image dimensions too large; saving as PNG')
|
||||||
extension = ".png"
|
extension = "png"
|
||||||
|
|
||||||
if save_to_dirs is None:
|
if save_to_dirs is None:
|
||||||
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
|
save_to_dirs = (grid and opts.grid_save_to_dirs) or (not grid and opts.save_to_dirs and not no_prompt)
|
||||||
@ -789,7 +789,10 @@ def read_info_from_image(image: Image.Image) -> tuple[str | None, dict]:
|
|||||||
if exif_comment:
|
if exif_comment:
|
||||||
geninfo = exif_comment
|
geninfo = exif_comment
|
||||||
elif "comment" in items: # for gif
|
elif "comment" in items: # for gif
|
||||||
geninfo = items["comment"].decode('utf8', errors="ignore")
|
if isinstance(items["comment"], bytes):
|
||||||
|
geninfo = items["comment"].decode('utf8', errors="ignore")
|
||||||
|
else:
|
||||||
|
geninfo = items["comment"]
|
||||||
|
|
||||||
for field in IGNORED_INFO_KEYS:
|
for field in IGNORED_INFO_KEYS:
|
||||||
items.pop(field, None)
|
items.pop(field, None)
|
||||||
|
@ -17,11 +17,14 @@ from modules.ui import plaintext_to_html
|
|||||||
import modules.scripts
|
import modules.scripts
|
||||||
|
|
||||||
|
|
||||||
def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None):
|
def process_batch(p, input, output_dir, inpaint_mask_dir, args, to_scale=False, scale_by=1.0, use_png_info=False, png_info_props=None, png_info_dir=None):
|
||||||
output_dir = output_dir.strip()
|
output_dir = output_dir.strip()
|
||||||
processing.fix_seed(p)
|
processing.fix_seed(p)
|
||||||
|
|
||||||
batch_images = list(shared.walk_files(input_dir, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff")))
|
if isinstance(input, str):
|
||||||
|
batch_images = list(shared.walk_files(input, allowed_extensions=(".png", ".jpg", ".jpeg", ".webp", ".tif", ".tiff")))
|
||||||
|
else:
|
||||||
|
batch_images = [os.path.abspath(x.name) for x in input]
|
||||||
|
|
||||||
is_inpaint_batch = False
|
is_inpaint_batch = False
|
||||||
if inpaint_mask_dir:
|
if inpaint_mask_dir:
|
||||||
@ -146,7 +149,7 @@ def process_batch(p, input_dir, output_dir, inpaint_mask_dir, args, to_scale=Fal
|
|||||||
return batch_results
|
return batch_results
|
||||||
|
|
||||||
|
|
||||||
def img2img(id_task: str, request: gr.Request, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, *args):
|
def img2img(id_task: str, request: gr.Request, mode: int, prompt: str, negative_prompt: str, prompt_styles, init_img, sketch, init_img_with_mask, inpaint_color_sketch, inpaint_color_sketch_orig, init_img_inpaint, init_mask_inpaint, mask_blur: int, mask_alpha: float, inpainting_fill: int, n_iter: int, batch_size: int, cfg_scale: float, image_cfg_scale: float, denoising_strength: float, selected_scale_tab: int, height: int, width: int, scale_by: float, resize_mode: int, inpaint_full_res: bool, inpaint_full_res_padding: int, inpainting_mask_invert: int, img2img_batch_input_dir: str, img2img_batch_output_dir: str, img2img_batch_inpaint_mask_dir: str, override_settings_texts, img2img_batch_use_png_info: bool, img2img_batch_png_info_props: list, img2img_batch_png_info_dir: str, img2img_batch_source_type: str, img2img_batch_upload: list, *args):
|
||||||
override_settings = create_override_settings_dict(override_settings_texts)
|
override_settings = create_override_settings_dict(override_settings_texts)
|
||||||
|
|
||||||
is_batch = mode == 5
|
is_batch = mode == 5
|
||||||
@ -221,8 +224,15 @@ def img2img(id_task: str, request: gr.Request, mode: int, prompt: str, negative_
|
|||||||
|
|
||||||
with closing(p):
|
with closing(p):
|
||||||
if is_batch:
|
if is_batch:
|
||||||
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
|
if img2img_batch_source_type == "upload":
|
||||||
processed = process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
|
assert isinstance(img2img_batch_upload, list) and img2img_batch_upload
|
||||||
|
output_dir = ""
|
||||||
|
inpaint_mask_dir = ""
|
||||||
|
png_info_dir = img2img_batch_png_info_dir if not shared.cmd_opts.hide_ui_dir_config else ""
|
||||||
|
processed = process_batch(p, img2img_batch_upload, output_dir, inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=png_info_dir)
|
||||||
|
else: # "from dir"
|
||||||
|
assert not shared.cmd_opts.hide_ui_dir_config, "Launched with --hide-ui-dir-config, batch img2img disabled"
|
||||||
|
processed = process_batch(p, img2img_batch_input_dir, img2img_batch_output_dir, img2img_batch_inpaint_mask_dir, args, to_scale=selected_scale_tab == 1, scale_by=scale_by, use_png_info=img2img_batch_use_png_info, png_info_props=img2img_batch_png_info_props, png_info_dir=img2img_batch_png_info_dir)
|
||||||
|
|
||||||
if processed is None:
|
if processed is None:
|
||||||
processed = Processed(p, [], p.seed, "")
|
processed = Processed(p, [], p.seed, "")
|
||||||
|
@ -76,7 +76,7 @@ def git_tag():
|
|||||||
except Exception:
|
except Exception:
|
||||||
try:
|
try:
|
||||||
|
|
||||||
changelog_md = os.path.join(os.path.dirname(os.path.dirname(__file__)), "CHANGELOG.md")
|
changelog_md = os.path.join(script_path, "CHANGELOG.md")
|
||||||
with open(changelog_md, "r", encoding="utf-8") as file:
|
with open(changelog_md, "r", encoding="utf-8") as file:
|
||||||
line = next((line.strip() for line in file if line.strip()), "<none>")
|
line = next((line.strip() for line in file if line.strip()), "<none>")
|
||||||
line = line.replace("## ", "")
|
line = line.replace("## ", "")
|
||||||
@ -231,7 +231,7 @@ def run_extension_installer(extension_dir):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
env = os.environ.copy()
|
env = os.environ.copy()
|
||||||
env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env.get('PYTHONPATH', '')}"
|
env['PYTHONPATH'] = f"{script_path}{os.pathsep}{env.get('PYTHONPATH', '')}"
|
||||||
|
|
||||||
stdout = run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env).strip()
|
stdout = run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env).strip()
|
||||||
if stdout:
|
if stdout:
|
||||||
|
@ -23,6 +23,7 @@ def load_file_from_url(
|
|||||||
model_dir: str,
|
model_dir: str,
|
||||||
progress: bool = True,
|
progress: bool = True,
|
||||||
file_name: str | None = None,
|
file_name: str | None = None,
|
||||||
|
hash_prefix: str | None = None,
|
||||||
) -> str:
|
) -> str:
|
||||||
"""Download a file from `url` into `model_dir`, using the file present if possible.
|
"""Download a file from `url` into `model_dir`, using the file present if possible.
|
||||||
|
|
||||||
@ -36,11 +37,11 @@ def load_file_from_url(
|
|||||||
if not os.path.exists(cached_file):
|
if not os.path.exists(cached_file):
|
||||||
print(f'Downloading: "{url}" to {cached_file}\n')
|
print(f'Downloading: "{url}" to {cached_file}\n')
|
||||||
from torch.hub import download_url_to_file
|
from torch.hub import download_url_to_file
|
||||||
download_url_to_file(url, cached_file, progress=progress)
|
download_url_to_file(url, cached_file, progress=progress, hash_prefix=hash_prefix)
|
||||||
return cached_file
|
return cached_file
|
||||||
|
|
||||||
|
|
||||||
def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None, ext_blacklist=None) -> list:
|
def load_models(model_path: str, model_url: str = None, command_path: str = None, ext_filter=None, download_name=None, ext_blacklist=None, hash_prefix=None) -> list:
|
||||||
"""
|
"""
|
||||||
A one-and done loader to try finding the desired models in specified directories.
|
A one-and done loader to try finding the desired models in specified directories.
|
||||||
|
|
||||||
@ -49,6 +50,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
|
|||||||
@param model_path: The location to store/find models in.
|
@param model_path: The location to store/find models in.
|
||||||
@param command_path: A command-line argument to search for models in first.
|
@param command_path: A command-line argument to search for models in first.
|
||||||
@param ext_filter: An optional list of filename extensions to filter by
|
@param ext_filter: An optional list of filename extensions to filter by
|
||||||
|
@param hash_prefix: the expected sha256 of the model_url
|
||||||
@return: A list of paths containing the desired model(s)
|
@return: A list of paths containing the desired model(s)
|
||||||
"""
|
"""
|
||||||
output = []
|
output = []
|
||||||
@ -78,7 +80,7 @@ def load_models(model_path: str, model_url: str = None, command_path: str = None
|
|||||||
|
|
||||||
if model_url is not None and len(output) == 0:
|
if model_url is not None and len(output) == 0:
|
||||||
if download_name is not None:
|
if download_name is not None:
|
||||||
output.append(load_file_from_url(model_url, model_dir=places[0], file_name=download_name))
|
output.append(load_file_from_url(model_url, model_dir=places[0], file_name=download_name, hash_prefix=hash_prefix))
|
||||||
else:
|
else:
|
||||||
output.append(model_url)
|
output.append(model_url)
|
||||||
|
|
||||||
|
@ -24,11 +24,12 @@ default_sd_model_file = sd_model_file
|
|||||||
# Parse the --data-dir flag first so we can use it as a base for our other argument default values
|
# Parse the --data-dir flag first so we can use it as a base for our other argument default values
|
||||||
parser_pre = argparse.ArgumentParser(add_help=False)
|
parser_pre = argparse.ArgumentParser(add_help=False)
|
||||||
parser_pre.add_argument("--data-dir", type=str, default=os.path.dirname(modules_path), help="base path where all user data is stored", )
|
parser_pre.add_argument("--data-dir", type=str, default=os.path.dirname(modules_path), help="base path where all user data is stored", )
|
||||||
|
parser_pre.add_argument("--models-dir", type=str, default=None, help="base path where models are stored; overrides --data-dir", )
|
||||||
cmd_opts_pre = parser_pre.parse_known_args()[0]
|
cmd_opts_pre = parser_pre.parse_known_args()[0]
|
||||||
|
|
||||||
data_path = cmd_opts_pre.data_dir
|
data_path = cmd_opts_pre.data_dir
|
||||||
|
|
||||||
models_path = os.path.join(data_path, "models")
|
models_path = cmd_opts_pre.models_dir if cmd_opts_pre.models_dir else os.path.join(data_path, "models")
|
||||||
extensions_dir = os.path.join(data_path, "extensions")
|
extensions_dir = os.path.join(data_path, "extensions")
|
||||||
extensions_builtin_dir = os.path.join(script_path, "extensions-builtin")
|
extensions_builtin_dir = os.path.join(script_path, "extensions-builtin")
|
||||||
config_states_dir = os.path.join(script_path, "config_states")
|
config_states_dir = os.path.join(script_path, "config_states")
|
||||||
|
@ -62,11 +62,13 @@ def run_postprocessing(extras_mode, image, image_folder, input_dir, output_dir,
|
|||||||
else:
|
else:
|
||||||
image_data = image_placeholder
|
image_data = image_placeholder
|
||||||
|
|
||||||
|
image_data = image_data if image_data.mode in ("RGBA", "RGB") else image_data.convert("RGB")
|
||||||
|
|
||||||
parameters, existing_pnginfo = images.read_info_from_image(image_data)
|
parameters, existing_pnginfo = images.read_info_from_image(image_data)
|
||||||
if parameters:
|
if parameters:
|
||||||
existing_pnginfo["parameters"] = parameters
|
existing_pnginfo["parameters"] = parameters
|
||||||
|
|
||||||
initial_pp = scripts_postprocessing.PostprocessedImage(image_data if image_data.mode in ("RGBA", "RGB") else image_data.convert("RGB"))
|
initial_pp = scripts_postprocessing.PostprocessedImage(image_data)
|
||||||
|
|
||||||
scripts.scripts_postproc.run(initial_pp, args)
|
scripts.scripts_postproc.run(initial_pp, args)
|
||||||
|
|
||||||
|
@ -115,20 +115,17 @@ def txt2img_image_conditioning(sd_model, x, width, height):
|
|||||||
return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)
|
return x.new_zeros(x.shape[0], 2*sd_model.noise_augmentor.time_embed.dim, dtype=x.dtype, device=x.device)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
sd = sd_model.model.state_dict()
|
if getattr(sd_model.model, "is_sdxl_inpaint", False):
|
||||||
diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
|
# The "masked-image" in this case will just be all 0.5 since the entire image is masked.
|
||||||
if diffusion_model_input is not None:
|
image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
|
||||||
if diffusion_model_input.shape[1] == 9:
|
image_conditioning = images_tensor_to_samples(image_conditioning,
|
||||||
# The "masked-image" in this case will just be all 0.5 since the entire image is masked.
|
approximation_indexes.get(opts.sd_vae_encode_method))
|
||||||
image_conditioning = torch.ones(x.shape[0], 3, height, width, device=x.device) * 0.5
|
|
||||||
image_conditioning = images_tensor_to_samples(image_conditioning,
|
|
||||||
approximation_indexes.get(opts.sd_vae_encode_method))
|
|
||||||
|
|
||||||
# Add the fake full 1s mask to the first dimension.
|
# Add the fake full 1s mask to the first dimension.
|
||||||
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
|
image_conditioning = torch.nn.functional.pad(image_conditioning, (0, 0, 0, 0, 1, 0), value=1.0)
|
||||||
image_conditioning = image_conditioning.to(x.dtype)
|
image_conditioning = image_conditioning.to(x.dtype)
|
||||||
|
|
||||||
return image_conditioning
|
return image_conditioning
|
||||||
|
|
||||||
# Dummy zero conditioning if we're not using inpainting or unclip models.
|
# Dummy zero conditioning if we're not using inpainting or unclip models.
|
||||||
# Still takes up a bit of memory, but no encoder call.
|
# Still takes up a bit of memory, but no encoder call.
|
||||||
@ -238,11 +235,6 @@ class StableDiffusionProcessing:
|
|||||||
self.styles = []
|
self.styles = []
|
||||||
|
|
||||||
self.sampler_noise_scheduler_override = None
|
self.sampler_noise_scheduler_override = None
|
||||||
self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond
|
|
||||||
self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn
|
|
||||||
self.s_tmin = self.s_tmin if self.s_tmin is not None else opts.s_tmin
|
|
||||||
self.s_tmax = (self.s_tmax if self.s_tmax is not None else opts.s_tmax) or float('inf')
|
|
||||||
self.s_noise = self.s_noise if self.s_noise is not None else opts.s_noise
|
|
||||||
|
|
||||||
self.extra_generation_params = self.extra_generation_params or {}
|
self.extra_generation_params = self.extra_generation_params or {}
|
||||||
self.override_settings = self.override_settings or {}
|
self.override_settings = self.override_settings or {}
|
||||||
@ -259,6 +251,13 @@ class StableDiffusionProcessing:
|
|||||||
self.cached_uc = StableDiffusionProcessing.cached_uc
|
self.cached_uc = StableDiffusionProcessing.cached_uc
|
||||||
self.cached_c = StableDiffusionProcessing.cached_c
|
self.cached_c = StableDiffusionProcessing.cached_c
|
||||||
|
|
||||||
|
def fill_fields_from_opts(self):
|
||||||
|
self.s_min_uncond = self.s_min_uncond if self.s_min_uncond is not None else opts.s_min_uncond
|
||||||
|
self.s_churn = self.s_churn if self.s_churn is not None else opts.s_churn
|
||||||
|
self.s_tmin = self.s_tmin if self.s_tmin is not None else opts.s_tmin
|
||||||
|
self.s_tmax = (self.s_tmax if self.s_tmax is not None else opts.s_tmax) or float('inf')
|
||||||
|
self.s_noise = self.s_noise if self.s_noise is not None else opts.s_noise
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def sd_model(self):
|
def sd_model(self):
|
||||||
return shared.sd_model
|
return shared.sd_model
|
||||||
@ -390,11 +389,8 @@ class StableDiffusionProcessing:
|
|||||||
if self.sampler.conditioning_key == "crossattn-adm":
|
if self.sampler.conditioning_key == "crossattn-adm":
|
||||||
return self.unclip_image_conditioning(source_image)
|
return self.unclip_image_conditioning(source_image)
|
||||||
|
|
||||||
sd = self.sampler.model_wrap.inner_model.model.state_dict()
|
if getattr(self.sampler.model_wrap.inner_model.model, "is_sdxl_inpaint", False):
|
||||||
diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
|
return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
|
||||||
if diffusion_model_input is not None:
|
|
||||||
if diffusion_model_input.shape[1] == 9:
|
|
||||||
return self.inpainting_image_conditioning(source_image, latent_image, image_mask=image_mask)
|
|
||||||
|
|
||||||
# Dummy zero conditioning if we're not using inpainting or depth model.
|
# Dummy zero conditioning if we're not using inpainting or depth model.
|
||||||
return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
|
return latent_image.new_zeros(latent_image.shape[0], 5, 1, 1)
|
||||||
@ -569,7 +565,7 @@ class Processed:
|
|||||||
self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt]
|
self.all_negative_prompts = all_negative_prompts or p.all_negative_prompts or [self.negative_prompt]
|
||||||
self.all_seeds = all_seeds or p.all_seeds or [self.seed]
|
self.all_seeds = all_seeds or p.all_seeds or [self.seed]
|
||||||
self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed]
|
self.all_subseeds = all_subseeds or p.all_subseeds or [self.subseed]
|
||||||
self.infotexts = infotexts or [info]
|
self.infotexts = infotexts or [info] * len(images_list)
|
||||||
self.version = program_version()
|
self.version = program_version()
|
||||||
|
|
||||||
def js(self):
|
def js(self):
|
||||||
@ -794,7 +790,6 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
|
|||||||
"Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr,
|
"Token merging ratio hr": None if not enable_hr or token_merging_ratio_hr == 0 else token_merging_ratio_hr,
|
||||||
"Init image hash": getattr(p, 'init_img_hash', None),
|
"Init image hash": getattr(p, 'init_img_hash', None),
|
||||||
"RNG": opts.randn_source if opts.randn_source != "GPU" else None,
|
"RNG": opts.randn_source if opts.randn_source != "GPU" else None,
|
||||||
"NGMS": None if p.s_min_uncond == 0 else p.s_min_uncond,
|
|
||||||
"Tiling": "True" if p.tiling else None,
|
"Tiling": "True" if p.tiling else None,
|
||||||
**p.extra_generation_params,
|
**p.extra_generation_params,
|
||||||
"Version": program_version() if opts.add_version_to_infotext else None,
|
"Version": program_version() if opts.add_version_to_infotext else None,
|
||||||
@ -842,6 +837,9 @@ def process_images(p: StableDiffusionProcessing) -> Processed:
|
|||||||
|
|
||||||
sd_models.apply_token_merging(p.sd_model, p.get_token_merging_ratio())
|
sd_models.apply_token_merging(p.sd_model, p.get_token_merging_ratio())
|
||||||
|
|
||||||
|
# backwards compatibility, fix sampler and scheduler if invalid
|
||||||
|
sd_samplers.fix_p_invalid_sampler_and_scheduler(p)
|
||||||
|
|
||||||
res = process_images_inner(p)
|
res = process_images_inner(p)
|
||||||
|
|
||||||
finally:
|
finally:
|
||||||
@ -890,6 +888,7 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||||||
modules.sd_hijack.model_hijack.apply_circular(p.tiling)
|
modules.sd_hijack.model_hijack.apply_circular(p.tiling)
|
||||||
modules.sd_hijack.model_hijack.clear_comments()
|
modules.sd_hijack.model_hijack.clear_comments()
|
||||||
|
|
||||||
|
p.fill_fields_from_opts()
|
||||||
p.setup_prompts()
|
p.setup_prompts()
|
||||||
|
|
||||||
if isinstance(seed, list):
|
if isinstance(seed, list):
|
||||||
|
@ -64,8 +64,8 @@ class RestrictedUnpickler(pickle.Unpickler):
|
|||||||
raise Exception(f"global '{module}/{name}' is forbidden")
|
raise Exception(f"global '{module}/{name}' is forbidden")
|
||||||
|
|
||||||
|
|
||||||
# Regular expression that accepts 'dirname/version', 'dirname/data.pkl', and 'dirname/data/<number>'
|
# Regular expression that accepts 'dirname/version', 'dirname/byteorder', 'dirname/data.pkl', '.data/serialization_id', and 'dirname/data/<number>'
|
||||||
allowed_zip_names_re = re.compile(r"^([^/]+)/((data/\d+)|byteorder|(\.data\/serialization_id)|version|(data\.pkl))$")
|
allowed_zip_names_re = re.compile(r"^([^/]+)/((data/\d+)|version|byteorder|.data/serialization_id|(data\.pkl))$")
|
||||||
data_pkl_re = re.compile(r"^([^/]+)/data\.pkl$")
|
data_pkl_re = re.compile(r"^([^/]+)/data\.pkl$")
|
||||||
|
|
||||||
def check_zip_filenames(filename, names):
|
def check_zip_filenames(filename, names):
|
||||||
|
@ -486,7 +486,8 @@ def xformers_attention_forward(self, x, context=None, mask=None, **kwargs):
|
|||||||
k_in = self.to_k(context_k)
|
k_in = self.to_k(context_k)
|
||||||
v_in = self.to_v(context_v)
|
v_in = self.to_v(context_v)
|
||||||
|
|
||||||
q, k, v = (rearrange(t, 'b n (h d) -> b n h d', h=h) for t in (q_in, k_in, v_in))
|
q, k, v = (t.reshape(t.shape[0], t.shape[1], h, -1) for t in (q_in, k_in, v_in))
|
||||||
|
|
||||||
del q_in, k_in, v_in
|
del q_in, k_in, v_in
|
||||||
|
|
||||||
dtype = q.dtype
|
dtype = q.dtype
|
||||||
@ -497,7 +498,8 @@ def xformers_attention_forward(self, x, context=None, mask=None, **kwargs):
|
|||||||
|
|
||||||
out = out.to(dtype)
|
out = out.to(dtype)
|
||||||
|
|
||||||
out = rearrange(out, 'b n h d -> b n (h d)', h=h)
|
b, n, h, d = out.shape
|
||||||
|
out = out.reshape(b, n, h * d)
|
||||||
return self.to_out(out)
|
return self.to_out(out)
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
import torch
|
import torch
|
||||||
from packaging import version
|
from packaging import version
|
||||||
|
from einops import repeat
|
||||||
|
import math
|
||||||
|
|
||||||
from modules import devices
|
from modules import devices
|
||||||
from modules.sd_hijack_utils import CondFunc
|
from modules.sd_hijack_utils import CondFunc
|
||||||
@ -36,7 +38,7 @@ th = TorchHijackForUnet()
|
|||||||
|
|
||||||
# Below are monkey patches to enable upcasting a float16 UNet for float32 sampling
|
# Below are monkey patches to enable upcasting a float16 UNet for float32 sampling
|
||||||
def apply_model(orig_func, self, x_noisy, t, cond, **kwargs):
|
def apply_model(orig_func, self, x_noisy, t, cond, **kwargs):
|
||||||
|
"""Always make sure inputs to unet are in correct dtype."""
|
||||||
if isinstance(cond, dict):
|
if isinstance(cond, dict):
|
||||||
for y in cond.keys():
|
for y in cond.keys():
|
||||||
if isinstance(cond[y], list):
|
if isinstance(cond[y], list):
|
||||||
@ -45,7 +47,59 @@ def apply_model(orig_func, self, x_noisy, t, cond, **kwargs):
|
|||||||
cond[y] = cond[y].to(devices.dtype_unet) if isinstance(cond[y], torch.Tensor) else cond[y]
|
cond[y] = cond[y].to(devices.dtype_unet) if isinstance(cond[y], torch.Tensor) else cond[y]
|
||||||
|
|
||||||
with devices.autocast():
|
with devices.autocast():
|
||||||
return orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs).float()
|
result = orig_func(self, x_noisy.to(devices.dtype_unet), t.to(devices.dtype_unet), cond, **kwargs)
|
||||||
|
if devices.unet_needs_upcast:
|
||||||
|
return result.float()
|
||||||
|
else:
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# Monkey patch to create timestep embed tensor on device, avoiding a block.
|
||||||
|
def timestep_embedding(_, timesteps, dim, max_period=10000, repeat_only=False):
|
||||||
|
"""
|
||||||
|
Create sinusoidal timestep embeddings.
|
||||||
|
:param timesteps: a 1-D Tensor of N indices, one per batch element.
|
||||||
|
These may be fractional.
|
||||||
|
:param dim: the dimension of the output.
|
||||||
|
:param max_period: controls the minimum frequency of the embeddings.
|
||||||
|
:return: an [N x dim] Tensor of positional embeddings.
|
||||||
|
"""
|
||||||
|
if not repeat_only:
|
||||||
|
half = dim // 2
|
||||||
|
freqs = torch.exp(
|
||||||
|
-math.log(max_period) * torch.arange(start=0, end=half, dtype=torch.float32, device=timesteps.device) / half
|
||||||
|
)
|
||||||
|
args = timesteps[:, None].float() * freqs[None]
|
||||||
|
embedding = torch.cat([torch.cos(args), torch.sin(args)], dim=-1)
|
||||||
|
if dim % 2:
|
||||||
|
embedding = torch.cat([embedding, torch.zeros_like(embedding[:, :1])], dim=-1)
|
||||||
|
else:
|
||||||
|
embedding = repeat(timesteps, 'b -> b d', d=dim)
|
||||||
|
return embedding
|
||||||
|
|
||||||
|
|
||||||
|
# Monkey patch to SpatialTransformer removing unnecessary contiguous calls.
|
||||||
|
# Prevents a lot of unnecessary aten::copy_ calls
|
||||||
|
def spatial_transformer_forward(_, self, x: torch.Tensor, context=None):
|
||||||
|
# note: if no context is given, cross-attention defaults to self-attention
|
||||||
|
if not isinstance(context, list):
|
||||||
|
context = [context]
|
||||||
|
b, c, h, w = x.shape
|
||||||
|
x_in = x
|
||||||
|
x = self.norm(x)
|
||||||
|
if not self.use_linear:
|
||||||
|
x = self.proj_in(x)
|
||||||
|
x = x.permute(0, 2, 3, 1).reshape(b, h * w, c)
|
||||||
|
if self.use_linear:
|
||||||
|
x = self.proj_in(x)
|
||||||
|
for i, block in enumerate(self.transformer_blocks):
|
||||||
|
x = block(x, context=context[i])
|
||||||
|
if self.use_linear:
|
||||||
|
x = self.proj_out(x)
|
||||||
|
x = x.view(b, h, w, c).permute(0, 3, 1, 2)
|
||||||
|
if not self.use_linear:
|
||||||
|
x = self.proj_out(x)
|
||||||
|
return x + x_in
|
||||||
|
|
||||||
|
|
||||||
class GELUHijack(torch.nn.GELU, torch.nn.Module):
|
class GELUHijack(torch.nn.GELU, torch.nn.Module):
|
||||||
@ -64,12 +118,15 @@ def hijack_ddpm_edit():
|
|||||||
if not ddpm_edit_hijack:
|
if not ddpm_edit_hijack:
|
||||||
CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
|
CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.decode_first_stage', first_stage_sub, first_stage_cond)
|
||||||
CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
|
CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
|
||||||
ddpm_edit_hijack = CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.apply_model', apply_model, unet_needs_upcast)
|
ddpm_edit_hijack = CondFunc('modules.models.diffusion.ddpm_edit.LatentDiffusion.apply_model', apply_model)
|
||||||
|
|
||||||
|
|
||||||
unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast
|
unet_needs_upcast = lambda *args, **kwargs: devices.unet_needs_upcast
|
||||||
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast)
|
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model, unet_needs_upcast)
|
||||||
|
CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding)
|
||||||
|
CondFunc('ldm.modules.attention.SpatialTransformer.forward', spatial_transformer_forward)
|
||||||
CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
|
CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
|
||||||
|
|
||||||
if version.parse(torch.__version__) <= version.parse("1.13.2") or torch.cuda.is_available():
|
if version.parse(torch.__version__) <= version.parse("1.13.2") or torch.cuda.is_available():
|
||||||
CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast)
|
CondFunc('ldm.modules.diffusionmodules.util.GroupNorm32.forward', lambda orig_func, self, *args, **kwargs: orig_func(self.float(), *args, **kwargs), unet_needs_upcast)
|
||||||
CondFunc('ldm.modules.attention.GEGLU.forward', lambda orig_func, self, x: orig_func(self.float(), x.float()).to(devices.dtype_unet), unet_needs_upcast)
|
CondFunc('ldm.modules.attention.GEGLU.forward', lambda orig_func, self, x: orig_func(self.float(), x.float()).to(devices.dtype_unet), unet_needs_upcast)
|
||||||
@ -81,5 +138,17 @@ CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.decode_first_stage', first_s
|
|||||||
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
|
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.encode_first_stage', first_stage_sub, first_stage_cond)
|
||||||
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).float(), first_stage_cond)
|
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.get_first_stage_encoding', lambda orig_func, *args, **kwargs: orig_func(*args, **kwargs).float(), first_stage_cond)
|
||||||
|
|
||||||
CondFunc('sgm.modules.diffusionmodules.wrappers.OpenAIWrapper.forward', apply_model, unet_needs_upcast)
|
CondFunc('ldm.models.diffusion.ddpm.LatentDiffusion.apply_model', apply_model)
|
||||||
CondFunc('sgm.modules.diffusionmodules.openaimodel.timestep_embedding', lambda orig_func, timesteps, *args, **kwargs: orig_func(timesteps, *args, **kwargs).to(torch.float32 if timesteps.dtype == torch.int64 else devices.dtype_unet), unet_needs_upcast)
|
CondFunc('sgm.modules.diffusionmodules.wrappers.OpenAIWrapper.forward', apply_model)
|
||||||
|
|
||||||
|
|
||||||
|
def timestep_embedding_cast_result(orig_func, timesteps, *args, **kwargs):
|
||||||
|
if devices.unet_needs_upcast and timesteps.dtype == torch.int64:
|
||||||
|
dtype = torch.float32
|
||||||
|
else:
|
||||||
|
dtype = devices.dtype_unet
|
||||||
|
return orig_func(timesteps, *args, **kwargs).to(dtype=dtype)
|
||||||
|
|
||||||
|
|
||||||
|
CondFunc('ldm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding_cast_result)
|
||||||
|
CondFunc('sgm.modules.diffusionmodules.openaimodel.timestep_embedding', timestep_embedding_cast_result)
|
||||||
|
@ -1,7 +1,11 @@
|
|||||||
import importlib
|
import importlib
|
||||||
|
|
||||||
|
|
||||||
|
always_true_func = lambda *args, **kwargs: True
|
||||||
|
|
||||||
|
|
||||||
class CondFunc:
|
class CondFunc:
|
||||||
def __new__(cls, orig_func, sub_func, cond_func):
|
def __new__(cls, orig_func, sub_func, cond_func=always_true_func):
|
||||||
self = super(CondFunc, cls).__new__(cls)
|
self = super(CondFunc, cls).__new__(cls)
|
||||||
if isinstance(orig_func, str):
|
if isinstance(orig_func, str):
|
||||||
func_path = orig_func.split('.')
|
func_path = orig_func.split('.')
|
||||||
@ -20,13 +24,13 @@ class CondFunc:
|
|||||||
print(f"Warning: Failed to resolve {orig_func} for CondFunc hijack")
|
print(f"Warning: Failed to resolve {orig_func} for CondFunc hijack")
|
||||||
pass
|
pass
|
||||||
self.__init__(orig_func, sub_func, cond_func)
|
self.__init__(orig_func, sub_func, cond_func)
|
||||||
return lambda *args, **kwargs: self(*args, **kwargs)
|
return lambda *args, **kwargs: self(*args, **kwargs)
|
||||||
def __init__(self, orig_func, sub_func, cond_func):
|
def __init__(self, orig_func, sub_func, cond_func):
|
||||||
self.__orig_func = orig_func
|
self.__orig_func = orig_func
|
||||||
self.__sub_func = sub_func
|
self.__sub_func = sub_func
|
||||||
self.__cond_func = cond_func
|
self.__cond_func = cond_func
|
||||||
def __call__(self, *args, **kwargs):
|
def __call__(self, *args, **kwargs):
|
||||||
if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs):
|
if not self.__cond_func or self.__cond_func(self.__orig_func, *args, **kwargs):
|
||||||
return self.__sub_func(self.__orig_func, *args, **kwargs)
|
return self.__sub_func(self.__orig_func, *args, **kwargs)
|
||||||
else:
|
else:
|
||||||
return self.__orig_func(*args, **kwargs)
|
return self.__orig_func(*args, **kwargs)
|
||||||
|
@ -149,10 +149,12 @@ def list_models():
|
|||||||
cmd_ckpt = shared.cmd_opts.ckpt
|
cmd_ckpt = shared.cmd_opts.ckpt
|
||||||
if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt):
|
if shared.cmd_opts.no_download_sd_model or cmd_ckpt != shared.sd_model_file or os.path.exists(cmd_ckpt):
|
||||||
model_url = None
|
model_url = None
|
||||||
|
expected_sha256 = None
|
||||||
else:
|
else:
|
||||||
model_url = f"{shared.hf_endpoint}/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors"
|
model_url = f"{shared.hf_endpoint}/runwayml/stable-diffusion-v1-5/resolve/main/v1-5-pruned-emaonly.safetensors"
|
||||||
|
expected_sha256 = '6ce0161689b3853acaa03779ec93eafe75a02f4ced659bee03f50797806fa2fa'
|
||||||
|
|
||||||
model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"])
|
model_list = modelloader.load_models(model_path=model_path, model_url=model_url, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], download_name="v1-5-pruned-emaonly.safetensors", ext_blacklist=[".vae.ckpt", ".vae.safetensors"], hash_prefix=expected_sha256)
|
||||||
|
|
||||||
if os.path.exists(cmd_ckpt):
|
if os.path.exists(cmd_ckpt):
|
||||||
checkpoint_info = CheckpointInfo(cmd_ckpt)
|
checkpoint_info = CheckpointInfo(cmd_ckpt)
|
||||||
@ -384,6 +386,13 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
|
|||||||
model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model')
|
model.is_sd2 = not model.is_sdxl and hasattr(model.cond_stage_model, 'model')
|
||||||
model.is_sd1 = not model.is_sdxl and not model.is_sd2
|
model.is_sd1 = not model.is_sdxl and not model.is_sd2
|
||||||
model.is_ssd = model.is_sdxl and 'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight' not in state_dict.keys()
|
model.is_ssd = model.is_sdxl and 'model.diffusion_model.middle_block.1.transformer_blocks.0.attn1.to_q.weight' not in state_dict.keys()
|
||||||
|
# Set is_sdxl_inpaint flag.
|
||||||
|
diffusion_model_input = state_dict.get('diffusion_model.input_blocks.0.0.weight', None)
|
||||||
|
model.is_sdxl_inpaint = (
|
||||||
|
model.is_sdxl and
|
||||||
|
diffusion_model_input is not None and
|
||||||
|
diffusion_model_input.shape[1] == 9
|
||||||
|
)
|
||||||
if model.is_sdxl:
|
if model.is_sdxl:
|
||||||
sd_models_xl.extend_sdxl(model)
|
sd_models_xl.extend_sdxl(model)
|
||||||
|
|
||||||
@ -407,6 +416,7 @@ def load_model_weights(model, checkpoint_info: CheckpointInfo, state_dict, timer
|
|||||||
model.float()
|
model.float()
|
||||||
model.alphas_cumprod_original = model.alphas_cumprod
|
model.alphas_cumprod_original = model.alphas_cumprod
|
||||||
devices.dtype_unet = torch.float32
|
devices.dtype_unet = torch.float32
|
||||||
|
assert shared.cmd_opts.precision != "half", "Cannot use --precision half with --no-half"
|
||||||
timer.record("apply float()")
|
timer.record("apply float()")
|
||||||
else:
|
else:
|
||||||
vae = model.first_stage_model
|
vae = model.first_stage_model
|
||||||
@ -544,7 +554,7 @@ def repair_config(sd_config):
|
|||||||
if hasattr(sd_config.model.params, 'unet_config'):
|
if hasattr(sd_config.model.params, 'unet_config'):
|
||||||
if shared.cmd_opts.no_half:
|
if shared.cmd_opts.no_half:
|
||||||
sd_config.model.params.unet_config.params.use_fp16 = False
|
sd_config.model.params.unet_config.params.use_fp16 = False
|
||||||
elif shared.cmd_opts.upcast_sampling:
|
elif shared.cmd_opts.upcast_sampling or shared.cmd_opts.precision == "half":
|
||||||
sd_config.model.params.unet_config.params.use_fp16 = True
|
sd_config.model.params.unet_config.params.use_fp16 = True
|
||||||
|
|
||||||
if getattr(sd_config.model.params.first_stage_config.params.ddconfig, "attn_type", None) == "vanilla-xformers" and not shared.xformers_available:
|
if getattr(sd_config.model.params.first_stage_config.params.ddconfig, "attn_type", None) == "vanilla-xformers" and not shared.xformers_available:
|
||||||
@ -555,6 +565,14 @@ def repair_config(sd_config):
|
|||||||
karlo_path = os.path.join(paths.models_path, 'karlo')
|
karlo_path = os.path.join(paths.models_path, 'karlo')
|
||||||
sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path)
|
sd_config.model.params.noise_aug_config.params.clip_stats_path = sd_config.model.params.noise_aug_config.params.clip_stats_path.replace("checkpoints/karlo_models", karlo_path)
|
||||||
|
|
||||||
|
# Do not use checkpoint for inference.
|
||||||
|
# This helps prevent extra performance overhead on checking parameters.
|
||||||
|
# The perf overhead is about 100ms/it on 4090 for SDXL.
|
||||||
|
if hasattr(sd_config.model.params, "network_config"):
|
||||||
|
sd_config.model.params.network_config.params.use_checkpoint = False
|
||||||
|
if hasattr(sd_config.model.params, "unet_config"):
|
||||||
|
sd_config.model.params.unet_config.params.use_checkpoint = False
|
||||||
|
|
||||||
|
|
||||||
def rescale_zero_terminal_snr_abar(alphas_cumprod):
|
def rescale_zero_terminal_snr_abar(alphas_cumprod):
|
||||||
alphas_bar_sqrt = alphas_cumprod.sqrt()
|
alphas_bar_sqrt = alphas_cumprod.sqrt()
|
||||||
@ -663,10 +681,11 @@ def get_empty_cond(sd_model):
|
|||||||
|
|
||||||
|
|
||||||
def send_model_to_cpu(m):
|
def send_model_to_cpu(m):
|
||||||
if m.lowvram:
|
if m is not None:
|
||||||
lowvram.send_everything_to_cpu()
|
if m.lowvram:
|
||||||
else:
|
lowvram.send_everything_to_cpu()
|
||||||
m.to(devices.cpu)
|
else:
|
||||||
|
m.to(devices.cpu)
|
||||||
|
|
||||||
devices.torch_gc()
|
devices.torch_gc()
|
||||||
|
|
||||||
|
@ -35,7 +35,7 @@ def is_using_v_parameterization_for_sd2(state_dict):
|
|||||||
|
|
||||||
with sd_disable_initialization.DisableInitialization():
|
with sd_disable_initialization.DisableInitialization():
|
||||||
unet = ldm.modules.diffusionmodules.openaimodel.UNetModel(
|
unet = ldm.modules.diffusionmodules.openaimodel.UNetModel(
|
||||||
use_checkpoint=True,
|
use_checkpoint=False,
|
||||||
use_fp16=False,
|
use_fp16=False,
|
||||||
image_size=32,
|
image_size=32,
|
||||||
in_channels=4,
|
in_channels=4,
|
||||||
|
@ -35,11 +35,10 @@ def get_learned_conditioning(self: sgm.models.diffusion.DiffusionEngine, batch:
|
|||||||
|
|
||||||
|
|
||||||
def apply_model(self: sgm.models.diffusion.DiffusionEngine, x, t, cond):
|
def apply_model(self: sgm.models.diffusion.DiffusionEngine, x, t, cond):
|
||||||
sd = self.model.state_dict()
|
"""WARNING: This function is called once per denoising iteration. DO NOT add
|
||||||
diffusion_model_input = sd.get('diffusion_model.input_blocks.0.0.weight', None)
|
expensive functionc calls such as `model.state_dict`. """
|
||||||
if diffusion_model_input is not None:
|
if self.is_sdxl_inpaint:
|
||||||
if diffusion_model_input.shape[1] == 9:
|
x = torch.cat([x] + cond['c_concat'], dim=1)
|
||||||
x = torch.cat([x] + cond['c_concat'], dim=1)
|
|
||||||
|
|
||||||
return self.model(x, t, cond)
|
return self.model(x, t, cond)
|
||||||
|
|
||||||
|
@ -1,7 +1,7 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import functools
|
import functools
|
||||||
|
import logging
|
||||||
from modules import sd_samplers_kdiffusion, sd_samplers_timesteps, sd_samplers_lcm, shared, sd_samplers_common, sd_schedulers
|
from modules import sd_samplers_kdiffusion, sd_samplers_timesteps, sd_samplers_lcm, shared, sd_samplers_common, sd_schedulers
|
||||||
|
|
||||||
# imports for functions that previously were here and are used by other modules
|
# imports for functions that previously were here and are used by other modules
|
||||||
@ -122,4 +122,11 @@ def get_sampler_and_scheduler(sampler_name, scheduler_name):
|
|||||||
return sampler.name, found_scheduler.label
|
return sampler.name, found_scheduler.label
|
||||||
|
|
||||||
|
|
||||||
|
def fix_p_invalid_sampler_and_scheduler(p):
|
||||||
|
i_sampler_name, i_scheduler = p.sampler_name, p.scheduler
|
||||||
|
p.sampler_name, p.scheduler = get_sampler_and_scheduler(p.sampler_name, p.scheduler)
|
||||||
|
if p.sampler_name != i_sampler_name or i_scheduler != p.scheduler:
|
||||||
|
logging.warning(f'Sampler Scheduler autocorrection: "{i_sampler_name}" -> "{p.sampler_name}", "{i_scheduler}" -> "{p.scheduler}"')
|
||||||
|
|
||||||
|
|
||||||
set_samplers()
|
set_samplers()
|
||||||
|
@ -212,9 +212,16 @@ class CFGDenoiser(torch.nn.Module):
|
|||||||
uncond = denoiser_params.text_uncond
|
uncond = denoiser_params.text_uncond
|
||||||
skip_uncond = False
|
skip_uncond = False
|
||||||
|
|
||||||
# alternating uncond allows for higher thresholds without the quality loss normally expected from raising it
|
if shared.opts.skip_early_cond != 0. and self.step / self.total_steps <= shared.opts.skip_early_cond:
|
||||||
if self.step % 2 and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model:
|
|
||||||
skip_uncond = True
|
skip_uncond = True
|
||||||
|
self.p.extra_generation_params["Skip Early CFG"] = shared.opts.skip_early_cond
|
||||||
|
elif (self.step % 2 or shared.opts.s_min_uncond_all) and s_min_uncond > 0 and sigma[0] < s_min_uncond and not is_edit_model:
|
||||||
|
skip_uncond = True
|
||||||
|
self.p.extra_generation_params["NGMS"] = s_min_uncond
|
||||||
|
if shared.opts.s_min_uncond_all:
|
||||||
|
self.p.extra_generation_params["NGMS all steps"] = shared.opts.s_min_uncond_all
|
||||||
|
|
||||||
|
if skip_uncond:
|
||||||
x_in = x_in[:-batch_size]
|
x_in = x_in[:-batch_size]
|
||||||
sigma_in = sigma_in[:-batch_size]
|
sigma_in = sigma_in[:-batch_size]
|
||||||
|
|
||||||
|
@ -5,13 +5,14 @@ import numpy as np
|
|||||||
|
|
||||||
from modules import shared
|
from modules import shared
|
||||||
from modules.models.diffusion.uni_pc import uni_pc
|
from modules.models.diffusion.uni_pc import uni_pc
|
||||||
|
from modules.torch_utils import float64
|
||||||
|
|
||||||
|
|
||||||
@torch.no_grad()
|
@torch.no_grad()
|
||||||
def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=0.0):
|
def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=0.0):
|
||||||
alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
|
alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
|
||||||
alphas = alphas_cumprod[timesteps]
|
alphas = alphas_cumprod[timesteps]
|
||||||
alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' and x.device.type != 'xpu' else torch.float32)
|
alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(float64(x))
|
||||||
sqrt_one_minus_alphas = torch.sqrt(1 - alphas)
|
sqrt_one_minus_alphas = torch.sqrt(1 - alphas)
|
||||||
sigmas = eta * np.sqrt((1 - alphas_prev.cpu().numpy()) / (1 - alphas.cpu()) * (1 - alphas.cpu() / alphas_prev.cpu().numpy()))
|
sigmas = eta * np.sqrt((1 - alphas_prev.cpu().numpy()) / (1 - alphas.cpu()) * (1 - alphas.cpu() / alphas_prev.cpu().numpy()))
|
||||||
|
|
||||||
@ -43,7 +44,7 @@ def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=
|
|||||||
def plms(model, x, timesteps, extra_args=None, callback=None, disable=None):
|
def plms(model, x, timesteps, extra_args=None, callback=None, disable=None):
|
||||||
alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
|
alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
|
||||||
alphas = alphas_cumprod[timesteps]
|
alphas = alphas_cumprod[timesteps]
|
||||||
alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' and x.device.type != 'xpu' else torch.float32)
|
alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(float64(x))
|
||||||
sqrt_one_minus_alphas = torch.sqrt(1 - alphas)
|
sqrt_one_minus_alphas = torch.sqrt(1 - alphas)
|
||||||
|
|
||||||
extra_args = {} if extra_args is None else extra_args
|
extra_args = {} if extra_args is None else extra_args
|
||||||
|
@ -4,6 +4,9 @@ import torch
|
|||||||
|
|
||||||
import k_diffusion
|
import k_diffusion
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from modules import shared
|
||||||
|
|
||||||
@dataclasses.dataclass
|
@dataclasses.dataclass
|
||||||
class Scheduler:
|
class Scheduler:
|
||||||
@ -30,6 +33,41 @@ def sgm_uniform(n, sigma_min, sigma_max, inner_model, device):
|
|||||||
sigs += [0.0]
|
sigs += [0.0]
|
||||||
return torch.FloatTensor(sigs).to(device)
|
return torch.FloatTensor(sigs).to(device)
|
||||||
|
|
||||||
|
def get_align_your_steps_sigmas(n, sigma_min, sigma_max, device='cpu'):
|
||||||
|
# https://research.nvidia.com/labs/toronto-ai/AlignYourSteps/howto.html
|
||||||
|
def loglinear_interp(t_steps, num_steps):
|
||||||
|
"""
|
||||||
|
Performs log-linear interpolation of a given array of decreasing numbers.
|
||||||
|
"""
|
||||||
|
xs = np.linspace(0, 1, len(t_steps))
|
||||||
|
ys = np.log(t_steps[::-1])
|
||||||
|
|
||||||
|
new_xs = np.linspace(0, 1, num_steps)
|
||||||
|
new_ys = np.interp(new_xs, xs, ys)
|
||||||
|
|
||||||
|
interped_ys = np.exp(new_ys)[::-1].copy()
|
||||||
|
return interped_ys
|
||||||
|
|
||||||
|
if shared.sd_model.is_sdxl:
|
||||||
|
sigmas = [14.615, 6.315, 3.771, 2.181, 1.342, 0.862, 0.555, 0.380, 0.234, 0.113, 0.029]
|
||||||
|
else:
|
||||||
|
# Default to SD 1.5 sigmas.
|
||||||
|
sigmas = [14.615, 6.475, 3.861, 2.697, 1.886, 1.396, 0.963, 0.652, 0.399, 0.152, 0.029]
|
||||||
|
|
||||||
|
if n != len(sigmas):
|
||||||
|
sigmas = np.append(loglinear_interp(sigmas, n), [0.0])
|
||||||
|
else:
|
||||||
|
sigmas.append(0.0)
|
||||||
|
|
||||||
|
return torch.FloatTensor(sigmas).to(device)
|
||||||
|
|
||||||
|
def kl_optimal(n, sigma_min, sigma_max, device):
|
||||||
|
alpha_min = torch.arctan(torch.tensor(sigma_min, device=device))
|
||||||
|
alpha_max = torch.arctan(torch.tensor(sigma_max, device=device))
|
||||||
|
step_indices = torch.arange(n + 1, device=device)
|
||||||
|
sigmas = torch.tan(step_indices / n * alpha_min + (1.0 - step_indices / n) * alpha_max)
|
||||||
|
return sigmas
|
||||||
|
|
||||||
|
|
||||||
schedulers = [
|
schedulers = [
|
||||||
Scheduler('automatic', 'Automatic', None),
|
Scheduler('automatic', 'Automatic', None),
|
||||||
@ -38,6 +76,8 @@ schedulers = [
|
|||||||
Scheduler('exponential', 'Exponential', k_diffusion.sampling.get_sigmas_exponential),
|
Scheduler('exponential', 'Exponential', k_diffusion.sampling.get_sigmas_exponential),
|
||||||
Scheduler('polyexponential', 'Polyexponential', k_diffusion.sampling.get_sigmas_polyexponential, default_rho=1.0),
|
Scheduler('polyexponential', 'Polyexponential', k_diffusion.sampling.get_sigmas_polyexponential, default_rho=1.0),
|
||||||
Scheduler('sgm_uniform', 'SGM Uniform', sgm_uniform, need_inner_model=True, aliases=["SGMUniform"]),
|
Scheduler('sgm_uniform', 'SGM Uniform', sgm_uniform, need_inner_model=True, aliases=["SGMUniform"]),
|
||||||
|
Scheduler('kl_optimal', 'KL Optimal', kl_optimal),
|
||||||
|
Scheduler('align_your_steps', 'Align Your Steps', get_align_your_steps_sigmas),
|
||||||
]
|
]
|
||||||
|
|
||||||
schedulers_map = {**{x.name: x for x in schedulers}, **{x.label: x for x in schedulers}}
|
schedulers_map = {**{x.name: x for x in schedulers}, **{x.label: x for x in schedulers}}
|
||||||
|
@ -31,6 +31,14 @@ def initialize():
|
|||||||
devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16
|
devices.dtype_vae = torch.float32 if cmd_opts.no_half or cmd_opts.no_half_vae else torch.float16
|
||||||
devices.dtype_inference = torch.float32 if cmd_opts.precision == 'full' else devices.dtype
|
devices.dtype_inference = torch.float32 if cmd_opts.precision == 'full' else devices.dtype
|
||||||
|
|
||||||
|
if cmd_opts.precision == "half":
|
||||||
|
msg = "--no-half and --no-half-vae conflict with --precision half"
|
||||||
|
assert devices.dtype == torch.float16, msg
|
||||||
|
assert devices.dtype_vae == torch.float16, msg
|
||||||
|
assert devices.dtype_inference == torch.float16, msg
|
||||||
|
devices.force_fp16 = True
|
||||||
|
devices.force_model_fp16()
|
||||||
|
|
||||||
shared.device = devices.device
|
shared.device = devices.device
|
||||||
shared.weight_load_location = None if cmd_opts.lowram else "cpu"
|
shared.weight_load_location = None if cmd_opts.lowram else "cpu"
|
||||||
|
|
||||||
|
@ -209,7 +209,8 @@ options_templates.update(options_section(('img2img', "img2img", "sd"), {
|
|||||||
|
|
||||||
options_templates.update(options_section(('optimizations', "Optimizations", "sd"), {
|
options_templates.update(options_section(('optimizations', "Optimizations", "sd"), {
|
||||||
"cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}),
|
"cross_attention_optimization": OptionInfo("Automatic", "Cross attention optimization", gr.Dropdown, lambda: {"choices": shared_items.cross_attention_optimizations()}),
|
||||||
"s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}).link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
|
"s_min_uncond": OptionInfo(0.0, "Negative Guidance minimum sigma", gr.Slider, {"minimum": 0.0, "maximum": 15.0, "step": 0.01}, infotext='NGMS').link("PR", "https://github.com/AUTOMATIC1111/stablediffusion-webui/pull/9177").info("skip negative prompt for some steps when the image is almost ready; 0=disable, higher=faster"),
|
||||||
|
"s_min_uncond_all": OptionInfo(False, "Negative Guidance minimum sigma all steps", infotext='NGMS all steps').info("By default, NGMS above skips every other step; this makes it skip all steps"),
|
||||||
"token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
|
"token_merging_ratio": OptionInfo(0.0, "Token merging ratio", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio').link("PR", "https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/9256").info("0=disable, higher=faster"),
|
||||||
"token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
|
"token_merging_ratio_img2img": OptionInfo(0.0, "Token merging ratio for img2img", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}).info("only applies if non-zero and overrides above"),
|
||||||
"token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio hr').info("only applies if non-zero and overrides above"),
|
"token_merging_ratio_hr": OptionInfo(0.0, "Token merging ratio for high-res pass", gr.Slider, {"minimum": 0.0, "maximum": 0.9, "step": 0.1}, infotext='Token merging ratio hr').info("only applies if non-zero and overrides above"),
|
||||||
@ -380,7 +381,8 @@ options_templates.update(options_section(('sampler-params', "Sampler parameters"
|
|||||||
'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}, infotext='UniPC skip type'),
|
'uni_pc_skip_type': OptionInfo("time_uniform", "UniPC skip type", gr.Radio, {"choices": ["time_uniform", "time_quadratic", "logSNR"]}, infotext='UniPC skip type'),
|
||||||
'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}, infotext='UniPC order').info("must be < sampling steps"),
|
'uni_pc_order': OptionInfo(3, "UniPC order", gr.Slider, {"minimum": 1, "maximum": 50, "step": 1}, infotext='UniPC order').info("must be < sampling steps"),
|
||||||
'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final", infotext='UniPC lower order final'),
|
'uni_pc_lower_order_final': OptionInfo(True, "UniPC lower order final", infotext='UniPC lower order final'),
|
||||||
'sd_noise_schedule': OptionInfo("Default", "Noise schedule for sampling", gr.Radio, {"choices": ["Default", "Zero Terminal SNR"]}, infotext="Noise Schedule").info("for use with zero terminal SNR trained models")
|
'sd_noise_schedule': OptionInfo("Default", "Noise schedule for sampling", gr.Radio, {"choices": ["Default", "Zero Terminal SNR"]}, infotext="Noise Schedule").info("for use with zero terminal SNR trained models"),
|
||||||
|
'skip_early_cond': OptionInfo(0.0, "Ignore negative prompt during early sampling", gr.Slider, {"minimum": 0.0, "maximum": 1.0, "step": 0.01}, infotext="Skip Early CFG").info("disables CFG on a proportion of steps at the beginning of generation; 0=skip none; 1=skip all; can both improve sample diversity/quality and speed up sampling"),
|
||||||
}))
|
}))
|
||||||
|
|
||||||
options_templates.update(options_section(('postprocessing', "Postprocessing", "postprocessing"), {
|
options_templates.update(options_section(('postprocessing', "Postprocessing", "postprocessing"), {
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import torch.nn
|
import torch.nn
|
||||||
|
import torch
|
||||||
|
|
||||||
|
|
||||||
def get_param(model) -> torch.nn.Parameter:
|
def get_param(model) -> torch.nn.Parameter:
|
||||||
@ -15,3 +16,11 @@ def get_param(model) -> torch.nn.Parameter:
|
|||||||
return param
|
return param
|
||||||
|
|
||||||
raise ValueError(f"No parameters found in model {model!r}")
|
raise ValueError(f"No parameters found in model {model!r}")
|
||||||
|
|
||||||
|
|
||||||
|
def float64(t: torch.Tensor):
|
||||||
|
"""return torch.float64 if device is not mps or xpu, else return torch.float32"""
|
||||||
|
match t.device.type:
|
||||||
|
case 'mps', 'xpu':
|
||||||
|
return torch.float32
|
||||||
|
return torch.float64
|
||||||
|
@ -38,9 +38,11 @@ warnings.filterwarnings("default" if opts.show_gradio_deprecation_warnings else
|
|||||||
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
|
# this is a fix for Windows users. Without it, javascript files will be served with text/html content-type and the browser will not show any UI
|
||||||
mimetypes.init()
|
mimetypes.init()
|
||||||
mimetypes.add_type('application/javascript', '.js')
|
mimetypes.add_type('application/javascript', '.js')
|
||||||
|
mimetypes.add_type('application/javascript', '.mjs')
|
||||||
|
|
||||||
# Likewise, add explicit content-type header for certain missing image types
|
# Likewise, add explicit content-type header for certain missing image types
|
||||||
mimetypes.add_type('image/webp', '.webp')
|
mimetypes.add_type('image/webp', '.webp')
|
||||||
|
mimetypes.add_type('image/avif', '.avif')
|
||||||
|
|
||||||
if not cmd_opts.share and not cmd_opts.listen:
|
if not cmd_opts.share and not cmd_opts.listen:
|
||||||
# fix gradio phoning home
|
# fix gradio phoning home
|
||||||
@ -566,18 +568,25 @@ def create_ui():
|
|||||||
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask")
|
init_mask_inpaint = gr.Image(label="Mask", source="upload", interactive=True, type="pil", image_mode="RGBA", elem_id="img_inpaint_mask")
|
||||||
|
|
||||||
with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
|
with gr.TabItem('Batch', id='batch', elem_id="img2img_batch_tab") as tab_batch:
|
||||||
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
|
with gr.Tabs(elem_id="img2img_batch_source"):
|
||||||
gr.HTML(
|
img2img_batch_source_type = gr.Textbox(visible=False, value="upload")
|
||||||
"<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running." +
|
with gr.TabItem('Upload', id='batch_upload', elem_id="img2img_batch_upload_tab") as tab_batch_upload:
|
||||||
"<br>Use an empty output directory to save pictures normally instead of writing to the output directory." +
|
img2img_batch_upload = gr.Files(label="Files", interactive=True, elem_id="img2img_batch_upload")
|
||||||
f"<br>Add inpaint batch mask directory to enable inpaint batch processing."
|
with gr.TabItem('From directory', id='batch_from_dir', elem_id="img2img_batch_from_dir_tab") as tab_batch_from_dir:
|
||||||
f"{hidden}</p>"
|
hidden = '<br>Disabled when launched with --hide-ui-dir-config.' if shared.cmd_opts.hide_ui_dir_config else ''
|
||||||
)
|
gr.HTML(
|
||||||
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir")
|
"<p style='padding-bottom: 1em;' class=\"text-gray-500\">Process images in a directory on the same machine where the server is running." +
|
||||||
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir")
|
"<br>Use an empty output directory to save pictures normally instead of writing to the output directory." +
|
||||||
img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir")
|
f"<br>Add inpaint batch mask directory to enable inpaint batch processing."
|
||||||
|
f"{hidden}</p>"
|
||||||
|
)
|
||||||
|
img2img_batch_input_dir = gr.Textbox(label="Input directory", **shared.hide_dirs, elem_id="img2img_batch_input_dir")
|
||||||
|
img2img_batch_output_dir = gr.Textbox(label="Output directory", **shared.hide_dirs, elem_id="img2img_batch_output_dir")
|
||||||
|
img2img_batch_inpaint_mask_dir = gr.Textbox(label="Inpaint batch mask directory (required for inpaint batch processing only)", **shared.hide_dirs, elem_id="img2img_batch_inpaint_mask_dir")
|
||||||
|
tab_batch_upload.select(fn=lambda: "upload", inputs=[], outputs=[img2img_batch_source_type])
|
||||||
|
tab_batch_from_dir.select(fn=lambda: "from dir", inputs=[], outputs=[img2img_batch_source_type])
|
||||||
with gr.Accordion("PNG info", open=False):
|
with gr.Accordion("PNG info", open=False):
|
||||||
img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", **shared.hide_dirs, elem_id="img2img_batch_use_png_info")
|
img2img_batch_use_png_info = gr.Checkbox(label="Append png info to prompts", elem_id="img2img_batch_use_png_info")
|
||||||
img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir")
|
img2img_batch_png_info_dir = gr.Textbox(label="PNG info directory", **shared.hide_dirs, placeholder="Leave empty to use input directory", elem_id="img2img_batch_png_info_dir")
|
||||||
img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps", "Model hash"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.")
|
img2img_batch_png_info_props = gr.CheckboxGroup(["Prompt", "Negative prompt", "Seed", "CFG scale", "Sampler", "Steps", "Model hash"], label="Parameters to take from png info", info="Prompts from png info will be appended to prompts set in ui.")
|
||||||
|
|
||||||
@ -759,6 +768,8 @@ def create_ui():
|
|||||||
img2img_batch_use_png_info,
|
img2img_batch_use_png_info,
|
||||||
img2img_batch_png_info_props,
|
img2img_batch_png_info_props,
|
||||||
img2img_batch_png_info_dir,
|
img2img_batch_png_info_dir,
|
||||||
|
img2img_batch_source_type,
|
||||||
|
img2img_batch_upload,
|
||||||
] + custom_inputs,
|
] + custom_inputs,
|
||||||
outputs=[
|
outputs=[
|
||||||
output_panel.gallery,
|
output_panel.gallery,
|
||||||
|
@ -50,7 +50,7 @@ def reload_javascript():
|
|||||||
|
|
||||||
def template_response(*args, **kwargs):
|
def template_response(*args, **kwargs):
|
||||||
res = shared.GradioTemplateResponseOriginal(*args, **kwargs)
|
res = shared.GradioTemplateResponseOriginal(*args, **kwargs)
|
||||||
res.body = res.body.replace(b'</head>', f'{js}</head>'.encode("utf8"))
|
res.body = res.body.replace(b'</head>', f'{js}<meta name="referrer" content="no-referrer"/></head>'.encode("utf8"))
|
||||||
res.body = res.body.replace(b'</body>', f'{css}</body>'.encode("utf8"))
|
res.body = res.body.replace(b'</body>', f'{css}</body>'.encode("utf8"))
|
||||||
res.init_headers()
|
res.init_headers()
|
||||||
return res
|
return res
|
||||||
|
@ -1,3 +1,4 @@
|
|||||||
|
setuptools==69.5.1 # temp fix for compatibility with some old packages
|
||||||
GitPython==3.1.32
|
GitPython==3.1.32
|
||||||
Pillow==9.5.0
|
Pillow==9.5.0
|
||||||
accelerate==0.21.0
|
accelerate==0.21.0
|
||||||
|
@ -95,15 +95,15 @@ def confirm_checkpoints_or_none(p, xs):
|
|||||||
raise RuntimeError(f"Unknown checkpoint: {x}")
|
raise RuntimeError(f"Unknown checkpoint: {x}")
|
||||||
|
|
||||||
|
|
||||||
def apply_clip_skip(p, x, xs):
|
def confirm_range(min_val, max_val, axis_label):
|
||||||
opts.data["CLIP_stop_at_last_layers"] = x
|
"""Generates a AxisOption.confirm() function that checks all values are within the specified range."""
|
||||||
|
|
||||||
|
def confirm_range_fun(p, xs):
|
||||||
|
for x in xs:
|
||||||
|
if not (max_val >= x >= min_val):
|
||||||
|
raise ValueError(f'{axis_label} value "{x}" out of range [{min_val}, {max_val}]')
|
||||||
|
|
||||||
def apply_upscale_latent_space(p, x, xs):
|
return confirm_range_fun
|
||||||
if x.lower().strip() != '0':
|
|
||||||
opts.data["use_scale_latent_for_hires_fix"] = True
|
|
||||||
else:
|
|
||||||
opts.data["use_scale_latent_for_hires_fix"] = False
|
|
||||||
|
|
||||||
|
|
||||||
def apply_size(p, x: str, xs) -> None:
|
def apply_size(p, x: str, xs) -> None:
|
||||||
@ -118,21 +118,16 @@ def apply_size(p, x: str, xs) -> None:
|
|||||||
|
|
||||||
|
|
||||||
def find_vae(name: str):
|
def find_vae(name: str):
|
||||||
if name.lower() in ['auto', 'automatic']:
|
match name := name.lower().strip():
|
||||||
return modules.sd_vae.unspecified
|
case 'auto', 'automatic':
|
||||||
if name.lower() == 'none':
|
return 'Automatic'
|
||||||
return None
|
case 'none':
|
||||||
else:
|
return 'None'
|
||||||
choices = [x for x in sorted(modules.sd_vae.vae_dict, key=lambda x: len(x)) if name.lower().strip() in x.lower()]
|
return next((k for k in modules.sd_vae.vae_dict if k.lower() == name), print(f'No VAE found for {name}; using Automatic') or 'Automatic')
|
||||||
if len(choices) == 0:
|
|
||||||
print(f"No VAE found for {name}; using automatic")
|
|
||||||
return modules.sd_vae.unspecified
|
|
||||||
else:
|
|
||||||
return modules.sd_vae.vae_dict[choices[0]]
|
|
||||||
|
|
||||||
|
|
||||||
def apply_vae(p, x, xs):
|
def apply_vae(p, x, xs):
|
||||||
modules.sd_vae.reload_vae_weights(shared.sd_model, vae_file=find_vae(x))
|
p.override_settings['sd_vae'] = find_vae(x)
|
||||||
|
|
||||||
|
|
||||||
def apply_styles(p: StableDiffusionProcessingTxt2Img, x: str, _):
|
def apply_styles(p: StableDiffusionProcessingTxt2Img, x: str, _):
|
||||||
@ -140,7 +135,7 @@ def apply_styles(p: StableDiffusionProcessingTxt2Img, x: str, _):
|
|||||||
|
|
||||||
|
|
||||||
def apply_uni_pc_order(p, x, xs):
|
def apply_uni_pc_order(p, x, xs):
|
||||||
opts.data["uni_pc_order"] = min(x, p.steps - 1)
|
p.override_settings['uni_pc_order'] = min(x, p.steps - 1)
|
||||||
|
|
||||||
|
|
||||||
def apply_face_restore(p, opt, x):
|
def apply_face_restore(p, opt, x):
|
||||||
@ -162,12 +157,14 @@ def apply_override(field, boolean: bool = False):
|
|||||||
if boolean:
|
if boolean:
|
||||||
x = True if x.lower() == "true" else False
|
x = True if x.lower() == "true" else False
|
||||||
p.override_settings[field] = x
|
p.override_settings[field] = x
|
||||||
|
|
||||||
return fun
|
return fun
|
||||||
|
|
||||||
|
|
||||||
def boolean_choice(reverse: bool = False):
|
def boolean_choice(reverse: bool = False):
|
||||||
def choice():
|
def choice():
|
||||||
return ["False", "True"] if reverse else ["True", "False"]
|
return ["False", "True"] if reverse else ["True", "False"]
|
||||||
|
|
||||||
return choice
|
return choice
|
||||||
|
|
||||||
|
|
||||||
@ -212,7 +209,7 @@ def list_to_csv_string(data_list):
|
|||||||
|
|
||||||
|
|
||||||
def csv_string_to_list_strip(data_str):
|
def csv_string_to_list_strip(data_str):
|
||||||
return list(map(str.strip, chain.from_iterable(csv.reader(StringIO(data_str)))))
|
return list(map(str.strip, chain.from_iterable(csv.reader(StringIO(data_str), skipinitialspace=True))))
|
||||||
|
|
||||||
|
|
||||||
class AxisOption:
|
class AxisOption:
|
||||||
@ -264,13 +261,13 @@ axis_options = [
|
|||||||
AxisOption("Schedule max sigma", float, apply_override("sigma_max")),
|
AxisOption("Schedule max sigma", float, apply_override("sigma_max")),
|
||||||
AxisOption("Schedule rho", float, apply_override("rho")),
|
AxisOption("Schedule rho", float, apply_override("rho")),
|
||||||
AxisOption("Eta", float, apply_field("eta")),
|
AxisOption("Eta", float, apply_field("eta")),
|
||||||
AxisOption("Clip skip", int, apply_clip_skip),
|
AxisOption("Clip skip", int, apply_override('CLIP_stop_at_last_layers')),
|
||||||
AxisOption("Denoising", float, apply_field("denoising_strength")),
|
AxisOption("Denoising", float, apply_field("denoising_strength")),
|
||||||
AxisOption("Initial noise multiplier", float, apply_field("initial_noise_multiplier")),
|
AxisOption("Initial noise multiplier", float, apply_field("initial_noise_multiplier")),
|
||||||
AxisOption("Extra noise", float, apply_override("img2img_extra_noise")),
|
AxisOption("Extra noise", float, apply_override("img2img_extra_noise")),
|
||||||
AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
|
AxisOptionTxt2Img("Hires upscaler", str, apply_field("hr_upscaler"), choices=lambda: [*shared.latent_upscale_modes, *[x.name for x in shared.sd_upscalers]]),
|
||||||
AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")),
|
AxisOptionImg2Img("Cond. Image Mask Weight", float, apply_field("inpainting_mask_weight")),
|
||||||
AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: ['None'] + list(sd_vae.vae_dict)),
|
AxisOption("VAE", str, apply_vae, cost=0.7, choices=lambda: ['Automatic', 'None'] + list(sd_vae.vae_dict)),
|
||||||
AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)),
|
AxisOption("Styles", str, apply_styles, choices=lambda: list(shared.prompt_styles.styles)),
|
||||||
AxisOption("UniPC Order", int, apply_uni_pc_order, cost=0.5),
|
AxisOption("UniPC Order", int, apply_uni_pc_order, cost=0.5),
|
||||||
AxisOption("Face restore", str, apply_face_restore, format_value=format_value),
|
AxisOption("Face restore", str, apply_face_restore, format_value=format_value),
|
||||||
@ -399,18 +396,12 @@ def draw_xyz_grid(p, xs, ys, zs, x_labels, y_labels, z_labels, cell, draw_legend
|
|||||||
|
|
||||||
class SharedSettingsStackHelper(object):
|
class SharedSettingsStackHelper(object):
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
self.CLIP_stop_at_last_layers = opts.CLIP_stop_at_last_layers
|
pass
|
||||||
self.vae = opts.sd_vae
|
|
||||||
self.uni_pc_order = opts.uni_pc_order
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_value, tb):
|
def __exit__(self, exc_type, exc_value, tb):
|
||||||
opts.data["sd_vae"] = self.vae
|
|
||||||
opts.data["uni_pc_order"] = self.uni_pc_order
|
|
||||||
modules.sd_models.reload_model_weights()
|
modules.sd_models.reload_model_weights()
|
||||||
modules.sd_vae.reload_vae_weights()
|
modules.sd_vae.reload_vae_weights()
|
||||||
|
|
||||||
opts.data["CLIP_stop_at_last_layers"] = self.CLIP_stop_at_last_layers
|
|
||||||
|
|
||||||
|
|
||||||
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
|
re_range = re.compile(r"\s*([+-]?\s*\d+)\s*-\s*([+-]?\s*\d+)(?:\s*\(([+-]\d+)\s*\))?\s*")
|
||||||
re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*")
|
re_range_float = re.compile(r"\s*([+-]?\s*\d+(?:.\d*)?)\s*-\s*([+-]?\s*\d+(?:.\d*)?)(?:\s*\(([+-]\d+(?:.\d*)?)\s*\))?\s*")
|
||||||
@ -572,7 +563,7 @@ class Script(scripts.Script):
|
|||||||
mc = re_range_count.fullmatch(val)
|
mc = re_range_count.fullmatch(val)
|
||||||
if m is not None:
|
if m is not None:
|
||||||
start = int(m.group(1))
|
start = int(m.group(1))
|
||||||
end = int(m.group(2))+1
|
end = int(m.group(2)) + 1
|
||||||
step = int(m.group(3)) if m.group(3) is not None else 1
|
step = int(m.group(3)) if m.group(3) is not None else 1
|
||||||
|
|
||||||
valslist_ext += list(range(start, end, step))
|
valslist_ext += list(range(start, end, step))
|
||||||
@ -725,11 +716,11 @@ class Script(scripts.Script):
|
|||||||
ydim = len(ys) if vary_seeds_y else 1
|
ydim = len(ys) if vary_seeds_y else 1
|
||||||
|
|
||||||
if vary_seeds_x:
|
if vary_seeds_x:
|
||||||
pc.seed += ix
|
pc.seed += ix
|
||||||
if vary_seeds_y:
|
if vary_seeds_y:
|
||||||
pc.seed += iy * xdim
|
pc.seed += iy * xdim
|
||||||
if vary_seeds_z:
|
if vary_seeds_z:
|
||||||
pc.seed += iz * xdim * ydim
|
pc.seed += iz * xdim * ydim
|
||||||
|
|
||||||
try:
|
try:
|
||||||
res = process_images(pc)
|
res = process_images(pc)
|
||||||
@ -797,18 +788,18 @@ class Script(scripts.Script):
|
|||||||
z_count = len(zs)
|
z_count = len(zs)
|
||||||
|
|
||||||
# Set the grid infotexts to the real ones with extra_generation_params (1 main grid + z_count sub-grids)
|
# Set the grid infotexts to the real ones with extra_generation_params (1 main grid + z_count sub-grids)
|
||||||
processed.infotexts[:1+z_count] = grid_infotext[:1+z_count]
|
processed.infotexts[:1 + z_count] = grid_infotext[:1 + z_count]
|
||||||
|
|
||||||
if not include_lone_images:
|
if not include_lone_images:
|
||||||
# Don't need sub-images anymore, drop from list:
|
# Don't need sub-images anymore, drop from list:
|
||||||
processed.images = processed.images[:z_count+1]
|
processed.images = processed.images[:z_count + 1]
|
||||||
|
|
||||||
if opts.grid_save:
|
if opts.grid_save:
|
||||||
# Auto-save main and sub-grids:
|
# Auto-save main and sub-grids:
|
||||||
grid_count = z_count + 1 if z_count > 1 else 1
|
grid_count = z_count + 1 if z_count > 1 else 1
|
||||||
for g in range(grid_count):
|
for g in range(grid_count):
|
||||||
# TODO: See previous comment about intentional data misalignment.
|
# TODO: See previous comment about intentional data misalignment.
|
||||||
adj_g = g-1 if g > 0 else g
|
adj_g = g - 1 if g > 0 else g
|
||||||
images.save_image(processed.images[g], p.outpath_grids, "xyz_grid", info=processed.infotexts[g], extension=opts.grid_format, prompt=processed.all_prompts[adj_g], seed=processed.all_seeds[adj_g], grid=True, p=processed)
|
images.save_image(processed.images[g], p.outpath_grids, "xyz_grid", info=processed.infotexts[g], extension=opts.grid_format, prompt=processed.all_prompts[adj_g], seed=processed.all_seeds[adj_g], grid=True, p=processed)
|
||||||
if not include_sub_grids: # if not include_sub_grids then skip saving after the first grid
|
if not include_sub_grids: # if not include_sub_grids then skip saving after the first grid
|
||||||
break
|
break
|
||||||
|
10
style.css
10
style.css
@ -1,6 +1,6 @@
|
|||||||
/* temporary fix to load default gradio font in frontend instead of backend */
|
/* temporary fix to load default gradio font in frontend instead of backend */
|
||||||
|
|
||||||
@import url('/webui-assets/css/sourcesanspro.css');
|
@import url('webui-assets/css/sourcesanspro.css');
|
||||||
|
|
||||||
|
|
||||||
/* temporary fix to hide gradio crop tool until it's fixed https://github.com/gradio-app/gradio/issues/3810 */
|
/* temporary fix to hide gradio crop tool until it's fixed https://github.com/gradio-app/gradio/issues/3810 */
|
||||||
@ -780,9 +780,9 @@ table.popup-table .link{
|
|||||||
position:absolute;
|
position:absolute;
|
||||||
display:block;
|
display:block;
|
||||||
padding:0px 0;
|
padding:0px 0;
|
||||||
border:2px solid #a55000;
|
border:2px solid var(--primary-800);
|
||||||
border-radius:8px;
|
border-radius:8px;
|
||||||
box-shadow:1px 1px 2px #CE6400;
|
box-shadow:1px 1px 2px var(--primary-500);
|
||||||
width: 200px;
|
width: 200px;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -799,7 +799,7 @@ table.popup-table .link{
|
|||||||
}
|
}
|
||||||
|
|
||||||
.context-menu-items a:hover{
|
.context-menu-items a:hover{
|
||||||
background: #a55000;
|
background: var(--primary-700);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -807,6 +807,8 @@ table.popup-table .link{
|
|||||||
|
|
||||||
#tab_extensions table{
|
#tab_extensions table{
|
||||||
border-collapse: collapse;
|
border-collapse: collapse;
|
||||||
|
overflow-x: auto;
|
||||||
|
display: block;
|
||||||
}
|
}
|
||||||
|
|
||||||
#tab_extensions table td, #tab_extensions table th{
|
#tab_extensions table td, #tab_extensions table th{
|
||||||
|
@ -11,7 +11,12 @@ fi
|
|||||||
|
|
||||||
export install_dir="$HOME"
|
export install_dir="$HOME"
|
||||||
export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate"
|
export COMMANDLINE_ARGS="--skip-torch-cuda-test --upcast-sampling --no-half-vae --use-cpu interrogate"
|
||||||
export TORCH_COMMAND="pip install torch==2.1.0 torchvision==0.16.0"
|
|
||||||
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
export PYTORCH_ENABLE_MPS_FALLBACK=1
|
||||||
|
|
||||||
|
if [[ "$(sysctl -n machdep.cpu.brand_string)" =~ ^.*"Intel".*$ ]]; then
|
||||||
|
export TORCH_COMMAND="pip install torch==2.1.2 torchvision==0.16.2"
|
||||||
|
else
|
||||||
|
export TORCH_COMMAND="pip install torch==2.3.0 torchvision==0.18.0"
|
||||||
|
fi
|
||||||
|
|
||||||
####################################################################
|
####################################################################
|
||||||
|
@ -37,10 +37,15 @@ if %ERRORLEVEL% == 0 goto :activate_venv
|
|||||||
for /f "delims=" %%i in ('CALL %PYTHON% -c "import sys; print(sys.executable)"') do set PYTHON_FULLNAME="%%i"
|
for /f "delims=" %%i in ('CALL %PYTHON% -c "import sys; print(sys.executable)"') do set PYTHON_FULLNAME="%%i"
|
||||||
echo Creating venv in directory %VENV_DIR% using python %PYTHON_FULLNAME%
|
echo Creating venv in directory %VENV_DIR% using python %PYTHON_FULLNAME%
|
||||||
%PYTHON_FULLNAME% -m venv "%VENV_DIR%" >tmp/stdout.txt 2>tmp/stderr.txt
|
%PYTHON_FULLNAME% -m venv "%VENV_DIR%" >tmp/stdout.txt 2>tmp/stderr.txt
|
||||||
if %ERRORLEVEL% == 0 goto :activate_venv
|
if %ERRORLEVEL% == 0 goto :upgrade_pip
|
||||||
echo Unable to create venv in directory "%VENV_DIR%"
|
echo Unable to create venv in directory "%VENV_DIR%"
|
||||||
goto :show_stdout_stderr
|
goto :show_stdout_stderr
|
||||||
|
|
||||||
|
:upgrade_pip
|
||||||
|
"%VENV_DIR%\Scripts\Python.exe" -m pip install --upgrade pip
|
||||||
|
if %ERRORLEVEL% == 0 goto :activate_venv
|
||||||
|
echo Warning: Failed to upgrade PIP version
|
||||||
|
|
||||||
:activate_venv
|
:activate_venv
|
||||||
set PYTHON="%VENV_DIR%\Scripts\Python.exe"
|
set PYTHON="%VENV_DIR%\Scripts\Python.exe"
|
||||||
echo venv %PYTHON%
|
echo venv %PYTHON%
|
||||||
|
1
webui.sh
1
webui.sh
@ -210,6 +210,7 @@ then
|
|||||||
if [[ ! -d "${venv_dir}" ]]
|
if [[ ! -d "${venv_dir}" ]]
|
||||||
then
|
then
|
||||||
"${python_cmd}" -m venv "${venv_dir}"
|
"${python_cmd}" -m venv "${venv_dir}"
|
||||||
|
"${venv_dir}"/bin/python -m pip install --upgrade pip
|
||||||
first_launch=1
|
first_launch=1
|
||||||
fi
|
fi
|
||||||
# shellcheck source=/dev/null
|
# shellcheck source=/dev/null
|
||||||
|
Loading…
Reference in New Issue
Block a user