mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2024-12-29 19:05:05 +08:00
change hash to sha256
This commit is contained in:
parent
82725f0ac4
commit
a95f135308
1
.gitignore
vendored
1
.gitignore
vendored
@ -32,3 +32,4 @@ notification.mp3
|
|||||||
/extensions
|
/extensions
|
||||||
/test/stdout.txt
|
/test/stdout.txt
|
||||||
/test/stderr.txt
|
/test/stderr.txt
|
||||||
|
/cache.json
|
||||||
|
@ -371,7 +371,7 @@ class Api:
|
|||||||
return upscalers
|
return upscalers
|
||||||
|
|
||||||
def get_sd_models(self):
|
def get_sd_models(self):
|
||||||
return [{"title":x.title, "model_name":x.model_name, "hash":x.hash, "filename": x.filename, "config": find_checkpoint_config(x)} for x in checkpoints_list.values()]
|
return [{"title": x.title, "model_name": x.model_name, "hash": x.shorthash, "sha256": x.sha256, "filename": x.filename, "config": find_checkpoint_config(x)} for x in checkpoints_list.values()]
|
||||||
|
|
||||||
def get_hypernetworks(self):
|
def get_hypernetworks(self):
|
||||||
return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
|
return [{"name": name, "path": shared.hypernetworks[name]} for name in shared.hypernetworks]
|
||||||
|
@ -224,7 +224,8 @@ class UpscalerItem(BaseModel):
|
|||||||
class SDModelItem(BaseModel):
|
class SDModelItem(BaseModel):
|
||||||
title: str = Field(title="Title")
|
title: str = Field(title="Title")
|
||||||
model_name: str = Field(title="Model Name")
|
model_name: str = Field(title="Model Name")
|
||||||
hash: str = Field(title="Hash")
|
hash: Optional[str] = Field(title="Short hash")
|
||||||
|
sha256: Optional[str] = Field(title="sha256 hash")
|
||||||
filename: str = Field(title="Filename")
|
filename: str = Field(title="Filename")
|
||||||
config: str = Field(title="Config file")
|
config: str = Field(title="Config file")
|
||||||
|
|
||||||
|
72
modules/hashes.py
Normal file
72
modules/hashes.py
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
import hashlib
|
||||||
|
import json
|
||||||
|
import os.path
|
||||||
|
|
||||||
|
import filelock
|
||||||
|
|
||||||
|
|
||||||
|
cache_filename = "cache.json"
|
||||||
|
cache_data = None
|
||||||
|
|
||||||
|
|
||||||
|
def dump_cache():
|
||||||
|
with filelock.FileLock(cache_filename+".lock"):
|
||||||
|
with open(cache_filename, "w", encoding="utf8") as file:
|
||||||
|
json.dump(cache_data, file, indent=4)
|
||||||
|
|
||||||
|
|
||||||
|
def cache(subsection):
|
||||||
|
global cache_data
|
||||||
|
|
||||||
|
if cache_data is None:
|
||||||
|
with filelock.FileLock(cache_filename+".lock"):
|
||||||
|
if not os.path.isfile(cache_filename):
|
||||||
|
cache_data = {}
|
||||||
|
else:
|
||||||
|
with open(cache_filename, "r", encoding="utf8") as file:
|
||||||
|
cache_data = json.load(file)
|
||||||
|
|
||||||
|
s = cache_data.get(subsection, {})
|
||||||
|
cache_data[subsection] = s
|
||||||
|
|
||||||
|
return s
|
||||||
|
|
||||||
|
|
||||||
|
def calculate_sha256(filename):
|
||||||
|
hash_sha256 = hashlib.sha256()
|
||||||
|
|
||||||
|
with open(filename, "rb") as f:
|
||||||
|
for chunk in iter(lambda: f.read(4096), b""):
|
||||||
|
hash_sha256.update(chunk)
|
||||||
|
|
||||||
|
return hash_sha256.hexdigest()
|
||||||
|
|
||||||
|
|
||||||
|
def sha256(filename, title):
|
||||||
|
hashes = cache("hashes")
|
||||||
|
ondisk_mtime = os.path.getmtime(filename)
|
||||||
|
|
||||||
|
if title in hashes:
|
||||||
|
cached_sha256 = hashes[title].get("sha256", None)
|
||||||
|
cached_mtime = hashes[title].get("mtime", 0)
|
||||||
|
|
||||||
|
if ondisk_mtime <= cached_mtime and cached_sha256 is not None:
|
||||||
|
return cached_sha256
|
||||||
|
|
||||||
|
print(f"Calculating sha256 for {filename}: ", end='')
|
||||||
|
sha256_value = calculate_sha256(filename)
|
||||||
|
print(f"{sha256_value}")
|
||||||
|
|
||||||
|
hashes[title] = {
|
||||||
|
"mtime": ondisk_mtime,
|
||||||
|
"sha256": sha256_value,
|
||||||
|
}
|
||||||
|
|
||||||
|
dump_cache()
|
||||||
|
|
||||||
|
return sha256_value
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -509,7 +509,7 @@ def train_hypernetwork(hypernetwork_name, learn_rate, batch_size, gradient_step,
|
|||||||
|
|
||||||
if shared.opts.save_training_settings_to_txt:
|
if shared.opts.save_training_settings_to_txt:
|
||||||
saved_params = dict(
|
saved_params = dict(
|
||||||
model_name=checkpoint.model_name, model_hash=checkpoint.hash, num_of_dataset_images=len(ds),
|
model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds),
|
||||||
**{field: getattr(hypernetwork, field) for field in ['layer_structure', 'activation_func', 'weight_init', 'add_layer_norm', 'use_dropout', ]}
|
**{field: getattr(hypernetwork, field) for field in ['layer_structure', 'activation_func', 'weight_init', 'add_layer_norm', 'use_dropout', ]}
|
||||||
)
|
)
|
||||||
logging.save_settings_to_file(log_directory, {**saved_params, **locals()})
|
logging.save_settings_to_file(log_directory, {**saved_params, **locals()})
|
||||||
@ -737,7 +737,7 @@ def save_hypernetwork(hypernetwork, checkpoint, hypernetwork_name, filename):
|
|||||||
old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None
|
old_sd_checkpoint = hypernetwork.sd_checkpoint if hasattr(hypernetwork, "sd_checkpoint") else None
|
||||||
old_sd_checkpoint_name = hypernetwork.sd_checkpoint_name if hasattr(hypernetwork, "sd_checkpoint_name") else None
|
old_sd_checkpoint_name = hypernetwork.sd_checkpoint_name if hasattr(hypernetwork, "sd_checkpoint_name") else None
|
||||||
try:
|
try:
|
||||||
hypernetwork.sd_checkpoint = checkpoint.hash
|
hypernetwork.sd_checkpoint = checkpoint.shorthash
|
||||||
hypernetwork.sd_checkpoint_name = checkpoint.model_name
|
hypernetwork.sd_checkpoint_name = checkpoint.model_name
|
||||||
hypernetwork.name = hypernetwork_name
|
hypernetwork.name = hypernetwork_name
|
||||||
hypernetwork.save(filename)
|
hypernetwork.save(filename)
|
||||||
|
@ -14,17 +14,56 @@ import ldm.modules.midas as midas
|
|||||||
|
|
||||||
from ldm.util import instantiate_from_config
|
from ldm.util import instantiate_from_config
|
||||||
|
|
||||||
from modules import shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors
|
from modules import shared, modelloader, devices, script_callbacks, sd_vae, sd_disable_initialization, errors, hashes
|
||||||
from modules.paths import models_path
|
from modules.paths import models_path
|
||||||
from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inpainting
|
from modules.sd_hijack_inpainting import do_inpainting_hijack, should_hijack_inpainting
|
||||||
|
|
||||||
model_dir = "Stable-diffusion"
|
model_dir = "Stable-diffusion"
|
||||||
model_path = os.path.abspath(os.path.join(models_path, model_dir))
|
model_path = os.path.abspath(os.path.join(models_path, model_dir))
|
||||||
|
|
||||||
CheckpointInfo = namedtuple("CheckpointInfo", ['filename', 'title', 'hash', 'model_name'])
|
|
||||||
checkpoints_list = {}
|
checkpoints_list = {}
|
||||||
|
checkpoint_alisases = {}
|
||||||
checkpoints_loaded = collections.OrderedDict()
|
checkpoints_loaded = collections.OrderedDict()
|
||||||
|
|
||||||
|
|
||||||
|
class CheckpointInfo:
|
||||||
|
def __init__(self, filename):
|
||||||
|
self.filename = filename
|
||||||
|
abspath = os.path.abspath(filename)
|
||||||
|
|
||||||
|
if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
|
||||||
|
name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
|
||||||
|
elif abspath.startswith(model_path):
|
||||||
|
name = abspath.replace(model_path, '')
|
||||||
|
else:
|
||||||
|
name = os.path.basename(filename)
|
||||||
|
|
||||||
|
if name.startswith("\\") or name.startswith("/"):
|
||||||
|
name = name[1:]
|
||||||
|
|
||||||
|
self.title = name
|
||||||
|
self.model_name = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
|
||||||
|
self.hash = model_hash(filename)
|
||||||
|
self.ids = [self.hash, self.model_name, self.title, f'{name} [{self.hash}]']
|
||||||
|
self.shorthash = None
|
||||||
|
self.sha256 = None
|
||||||
|
|
||||||
|
def register(self):
|
||||||
|
checkpoints_list[self.title] = self
|
||||||
|
for id in self.ids:
|
||||||
|
checkpoint_alisases[id] = self
|
||||||
|
|
||||||
|
def calculate_shorthash(self):
|
||||||
|
self.sha256 = hashes.sha256(self.filename, self.title)
|
||||||
|
self.shorthash = self.sha256[0:10]
|
||||||
|
|
||||||
|
if self.shorthash not in self.ids:
|
||||||
|
self.ids += [self.shorthash, self.sha256]
|
||||||
|
self.register()
|
||||||
|
|
||||||
|
return self.shorthash
|
||||||
|
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
|
# this silences the annoying "Some weights of the model checkpoint were not used when initializing..." message at start.
|
||||||
|
|
||||||
@ -43,10 +82,14 @@ def setup_model():
|
|||||||
enable_midas_autodownload()
|
enable_midas_autodownload()
|
||||||
|
|
||||||
|
|
||||||
def checkpoint_tiles():
|
def checkpoint_tiles():
|
||||||
convert = lambda name: int(name) if name.isdigit() else name.lower()
|
def convert(name):
|
||||||
alphanumeric_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]
|
return int(name) if name.isdigit() else name.lower()
|
||||||
return sorted([x.title for x in checkpoints_list.values()], key = alphanumeric_key)
|
|
||||||
|
def alphanumeric_key(key):
|
||||||
|
return [convert(c) for c in re.split('([0-9]+)', key)]
|
||||||
|
|
||||||
|
return sorted([x.title for x in checkpoints_list.values()], key=alphanumeric_key)
|
||||||
|
|
||||||
|
|
||||||
def find_checkpoint_config(info):
|
def find_checkpoint_config(info):
|
||||||
@ -62,48 +105,38 @@ def find_checkpoint_config(info):
|
|||||||
|
|
||||||
def list_models():
|
def list_models():
|
||||||
checkpoints_list.clear()
|
checkpoints_list.clear()
|
||||||
|
checkpoint_alisases.clear()
|
||||||
model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], ext_blacklist=[".vae.safetensors"])
|
model_list = modelloader.load_models(model_path=model_path, command_path=shared.cmd_opts.ckpt_dir, ext_filter=[".ckpt", ".safetensors"], ext_blacklist=[".vae.safetensors"])
|
||||||
|
|
||||||
def modeltitle(path, shorthash):
|
|
||||||
abspath = os.path.abspath(path)
|
|
||||||
|
|
||||||
if shared.cmd_opts.ckpt_dir is not None and abspath.startswith(shared.cmd_opts.ckpt_dir):
|
|
||||||
name = abspath.replace(shared.cmd_opts.ckpt_dir, '')
|
|
||||||
elif abspath.startswith(model_path):
|
|
||||||
name = abspath.replace(model_path, '')
|
|
||||||
else:
|
|
||||||
name = os.path.basename(path)
|
|
||||||
|
|
||||||
if name.startswith("\\") or name.startswith("/"):
|
|
||||||
name = name[1:]
|
|
||||||
|
|
||||||
shortname = os.path.splitext(name.replace("/", "_").replace("\\", "_"))[0]
|
|
||||||
|
|
||||||
return f'{name} [{shorthash}]', shortname
|
|
||||||
|
|
||||||
cmd_ckpt = shared.cmd_opts.ckpt
|
cmd_ckpt = shared.cmd_opts.ckpt
|
||||||
if os.path.exists(cmd_ckpt):
|
if os.path.exists(cmd_ckpt):
|
||||||
h = model_hash(cmd_ckpt)
|
checkpoint_info = CheckpointInfo(cmd_ckpt)
|
||||||
title, short_model_name = modeltitle(cmd_ckpt, h)
|
checkpoint_info.register()
|
||||||
checkpoints_list[title] = CheckpointInfo(cmd_ckpt, title, h, short_model_name)
|
|
||||||
shared.opts.data['sd_model_checkpoint'] = title
|
shared.opts.data['sd_model_checkpoint'] = checkpoint_info.title
|
||||||
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
|
elif cmd_ckpt is not None and cmd_ckpt != shared.default_sd_model_file:
|
||||||
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
|
print(f"Checkpoint in --ckpt argument not found (Possible it was moved to {model_path}: {cmd_ckpt}", file=sys.stderr)
|
||||||
|
|
||||||
for filename in model_list:
|
for filename in model_list:
|
||||||
h = model_hash(filename)
|
checkpoint_info = CheckpointInfo(filename)
|
||||||
title, short_model_name = modeltitle(filename, h)
|
checkpoint_info.register()
|
||||||
|
|
||||||
checkpoints_list[title] = CheckpointInfo(filename, title, h, short_model_name)
|
|
||||||
|
|
||||||
|
|
||||||
def get_closet_checkpoint_match(searchString):
|
def get_closet_checkpoint_match(search_string):
|
||||||
applicable = sorted([info for info in checkpoints_list.values() if searchString in info.title], key = lambda x:len(x.title))
|
checkpoint_info = checkpoint_alisases.get(search_string, None)
|
||||||
if len(applicable) > 0:
|
if checkpoint_info is not None:
|
||||||
return applicable[0]
|
return
|
||||||
|
|
||||||
|
found = sorted([info for info in checkpoints_list.values() if search_string in info.title], key=lambda x: len(x.title))
|
||||||
|
if found:
|
||||||
|
return found[0]
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def model_hash(filename):
|
def model_hash(filename):
|
||||||
|
"""old hash that only looks at a small part of the file and is prone to collisions"""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
with open(filename, "rb") as file:
|
with open(filename, "rb") as file:
|
||||||
import hashlib
|
import hashlib
|
||||||
@ -119,7 +152,7 @@ def model_hash(filename):
|
|||||||
def select_checkpoint():
|
def select_checkpoint():
|
||||||
model_checkpoint = shared.opts.sd_model_checkpoint
|
model_checkpoint = shared.opts.sd_model_checkpoint
|
||||||
|
|
||||||
checkpoint_info = checkpoints_list.get(model_checkpoint, None)
|
checkpoint_info = checkpoint_alisases.get(model_checkpoint, None)
|
||||||
if checkpoint_info is not None:
|
if checkpoint_info is not None:
|
||||||
return checkpoint_info
|
return checkpoint_info
|
||||||
|
|
||||||
@ -189,9 +222,8 @@ def read_state_dict(checkpoint_file, print_global_state=False, map_location=None
|
|||||||
return sd
|
return sd
|
||||||
|
|
||||||
|
|
||||||
def load_model_weights(model, checkpoint_info, vae_file="auto"):
|
def load_model_weights(model, checkpoint_info: CheckpointInfo, vae_file="auto"):
|
||||||
checkpoint_file = checkpoint_info.filename
|
sd_model_hash = checkpoint_info.calculate_shorthash()
|
||||||
sd_model_hash = checkpoint_info.hash
|
|
||||||
|
|
||||||
cache_enabled = shared.opts.sd_checkpoint_cache > 0
|
cache_enabled = shared.opts.sd_checkpoint_cache > 0
|
||||||
|
|
||||||
@ -201,9 +233,9 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
|
|||||||
model.load_state_dict(checkpoints_loaded[checkpoint_info])
|
model.load_state_dict(checkpoints_loaded[checkpoint_info])
|
||||||
else:
|
else:
|
||||||
# load from file
|
# load from file
|
||||||
print(f"Loading weights [{sd_model_hash}] from {checkpoint_file}")
|
print(f"Loading weights [{sd_model_hash}] from {checkpoint_info.filename}")
|
||||||
|
|
||||||
sd = read_state_dict(checkpoint_file)
|
sd = read_state_dict(checkpoint_info.filename)
|
||||||
model.load_state_dict(sd, strict=False)
|
model.load_state_dict(sd, strict=False)
|
||||||
del sd
|
del sd
|
||||||
|
|
||||||
@ -235,14 +267,14 @@ def load_model_weights(model, checkpoint_info, vae_file="auto"):
|
|||||||
checkpoints_loaded.popitem(last=False) # LRU
|
checkpoints_loaded.popitem(last=False) # LRU
|
||||||
|
|
||||||
model.sd_model_hash = sd_model_hash
|
model.sd_model_hash = sd_model_hash
|
||||||
model.sd_model_checkpoint = checkpoint_file
|
model.sd_model_checkpoint = checkpoint_info.filename
|
||||||
model.sd_checkpoint_info = checkpoint_info
|
model.sd_checkpoint_info = checkpoint_info
|
||||||
|
|
||||||
model.logvar = model.logvar.to(devices.device) # fix for training
|
model.logvar = model.logvar.to(devices.device) # fix for training
|
||||||
|
|
||||||
sd_vae.delete_base_vae()
|
sd_vae.delete_base_vae()
|
||||||
sd_vae.clear_loaded_vae()
|
sd_vae.clear_loaded_vae()
|
||||||
vae_file = sd_vae.resolve_vae(checkpoint_file, vae_file=vae_file)
|
vae_file = sd_vae.resolve_vae(checkpoint_info.filename, vae_file=vae_file)
|
||||||
sd_vae.load_vae(model, vae_file)
|
sd_vae.load_vae(model, vae_file)
|
||||||
|
|
||||||
|
|
||||||
|
@ -428,7 +428,7 @@ options_templates.update(options_section(('ui', "User interface"), {
|
|||||||
"return_grid": OptionInfo(True, "Show grid in results for web"),
|
"return_grid": OptionInfo(True, "Show grid in results for web"),
|
||||||
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
|
"do_not_show_images": OptionInfo(False, "Do not show any images in results for web"),
|
||||||
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
|
"add_model_hash_to_info": OptionInfo(True, "Add model hash to generation information"),
|
||||||
"add_model_name_to_info": OptionInfo(False, "Add model name to generation information"),
|
"add_model_name_to_info": OptionInfo(True, "Add model name to generation information"),
|
||||||
"disable_weights_auto_swap": OptionInfo(False, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."),
|
"disable_weights_auto_swap": OptionInfo(False, "When reading generation parameters from text into UI (from PNG info or pasted text), do not change the selected model/checkpoint."),
|
||||||
"send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
|
"send_seed": OptionInfo(True, "Send seed when sending prompt or image to other interface"),
|
||||||
"send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"),
|
"send_size": OptionInfo(True, "Send size when sending prompt or image to another interface"),
|
||||||
|
@ -407,7 +407,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
|
|||||||
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize)
|
ds = modules.textual_inversion.dataset.PersonalizedBase(data_root=data_root, width=training_width, height=training_height, repeats=shared.opts.training_image_repeats_per_epoch, placeholder_token=embedding_name, model=shared.sd_model, cond_model=shared.sd_model.cond_stage_model, device=devices.device, template_file=template_file, batch_size=batch_size, gradient_step=gradient_step, shuffle_tags=shuffle_tags, tag_drop_out=tag_drop_out, latent_sampling_method=latent_sampling_method, varsize=varsize)
|
||||||
|
|
||||||
if shared.opts.save_training_settings_to_txt:
|
if shared.opts.save_training_settings_to_txt:
|
||||||
save_settings_to_file(log_directory, {**dict(model_name=checkpoint.model_name, model_hash=checkpoint.hash, num_of_dataset_images=len(ds), num_vectors_per_token=len(embedding.vec)), **locals()})
|
save_settings_to_file(log_directory, {**dict(model_name=checkpoint.model_name, model_hash=checkpoint.shorthash, num_of_dataset_images=len(ds), num_vectors_per_token=len(embedding.vec)), **locals()})
|
||||||
|
|
||||||
latent_sampling_method = ds.latent_sampling_method
|
latent_sampling_method = ds.latent_sampling_method
|
||||||
|
|
||||||
@ -584,7 +584,7 @@ def train_embedding(embedding_name, learn_rate, batch_size, gradient_step, data_
|
|||||||
|
|
||||||
checkpoint = sd_models.select_checkpoint()
|
checkpoint = sd_models.select_checkpoint()
|
||||||
footer_left = checkpoint.model_name
|
footer_left = checkpoint.model_name
|
||||||
footer_mid = '[{}]'.format(checkpoint.hash)
|
footer_mid = '[{}]'.format(checkpoint.shorthash)
|
||||||
footer_right = '{}v {}s'.format(vectorSize, steps_done)
|
footer_right = '{}v {}s'.format(vectorSize, steps_done)
|
||||||
|
|
||||||
captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right)
|
captioned_image = caption_image_overlay(image, title, footer_left, footer_mid, footer_right)
|
||||||
@ -626,7 +626,7 @@ def save_embedding(embedding, optimizer, checkpoint, embedding_name, filename, r
|
|||||||
old_sd_checkpoint_name = embedding.sd_checkpoint_name if hasattr(embedding, "sd_checkpoint_name") else None
|
old_sd_checkpoint_name = embedding.sd_checkpoint_name if hasattr(embedding, "sd_checkpoint_name") else None
|
||||||
old_cached_checksum = embedding.cached_checksum if hasattr(embedding, "cached_checksum") else None
|
old_cached_checksum = embedding.cached_checksum if hasattr(embedding, "cached_checksum") else None
|
||||||
try:
|
try:
|
||||||
embedding.sd_checkpoint = checkpoint.hash
|
embedding.sd_checkpoint = checkpoint.shorthash
|
||||||
embedding.sd_checkpoint_name = checkpoint.model_name
|
embedding.sd_checkpoint_name = checkpoint.model_name
|
||||||
if remove_cached_checksum:
|
if remove_cached_checksum:
|
||||||
embedding.cached_checksum = None
|
embedding.cached_checksum = None
|
||||||
|
2
webui.py
2
webui.py
@ -78,6 +78,8 @@ def initialize():
|
|||||||
print("Stable diffusion model failed to load, exiting", file=sys.stderr)
|
print("Stable diffusion model failed to load, exiting", file=sys.stderr)
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
|
shared.opts.data["sd_model_checkpoint"] = shared.sd_model.sd_checkpoint_info.title
|
||||||
|
|
||||||
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()))
|
shared.opts.onchange("sd_model_checkpoint", wrap_queued_call(lambda: modules.sd_models.reload_model_weights()))
|
||||||
shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
|
shared.opts.onchange("sd_vae", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
|
||||||
shared.opts.onchange("sd_vae_as_default", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
|
shared.opts.onchange("sd_vae_as_default", wrap_queued_call(lambda: modules.sd_vae.reload_vae_weights()), call=False)
|
||||||
|
Loading…
Reference in New Issue
Block a user