mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2025-04-19 03:19:02 +08:00
fix missing infotext cased by conda cache
some generation params such as TI hashes or Emphasis is added in sd_hijack / sd_hijack_clip if conda are fetche from cache sd_hijack_clip will not be executed and it won't have a chance to to add generation params the generation params will also be missing if in non low-vram mode because the hijack.extra_generation_params was never read after calculate_hr_conds
This commit is contained in:
parent
023454b49e
commit
e72a6c411a
@ -187,6 +187,7 @@ class StableDiffusionProcessing:
|
|||||||
|
|
||||||
cached_uc = [None, None]
|
cached_uc = [None, None]
|
||||||
cached_c = [None, None]
|
cached_c = [None, None]
|
||||||
|
hijack_generation_params_state_list = []
|
||||||
|
|
||||||
comments: dict = None
|
comments: dict = None
|
||||||
sampler: sd_samplers_common.Sampler | None = field(default=None, init=False)
|
sampler: sd_samplers_common.Sampler | None = field(default=None, init=False)
|
||||||
@ -480,6 +481,10 @@ class StableDiffusionProcessing:
|
|||||||
|
|
||||||
for cache in caches:
|
for cache in caches:
|
||||||
if cache[0] is not None and cached_params == cache[0]:
|
if cache[0] is not None and cached_params == cache[0]:
|
||||||
|
if len(cache) == 3:
|
||||||
|
generation_params_state, cached_params_2 = cache[2]
|
||||||
|
if cached_params == cached_params_2:
|
||||||
|
self.hijack_generation_params_state_list.extend(generation_params_state)
|
||||||
return cache[1]
|
return cache[1]
|
||||||
|
|
||||||
cache = caches[0]
|
cache = caches[0]
|
||||||
@ -487,9 +492,25 @@ class StableDiffusionProcessing:
|
|||||||
with devices.autocast():
|
with devices.autocast():
|
||||||
cache[1] = function(shared.sd_model, required_prompts, steps, hires_steps, shared.opts.use_old_scheduling)
|
cache[1] = function(shared.sd_model, required_prompts, steps, hires_steps, shared.opts.use_old_scheduling)
|
||||||
|
|
||||||
|
generation_params_state = model_hijack.capture_generation_params_state()
|
||||||
|
self.hijack_generation_params_state_list.extend(generation_params_state)
|
||||||
|
if len(cache) == 2:
|
||||||
|
cache.append((generation_params_state, cached_params))
|
||||||
|
else:
|
||||||
|
cache[2] = (generation_params_state, cached_params)
|
||||||
|
|
||||||
cache[0] = cached_params
|
cache[0] = cached_params
|
||||||
return cache[1]
|
return cache[1]
|
||||||
|
|
||||||
|
def apply_hijack_generation_params(self):
|
||||||
|
self.extra_generation_params.update(model_hijack.extra_generation_params)
|
||||||
|
for func in self.hijack_generation_params_state_list:
|
||||||
|
try:
|
||||||
|
func(self.extra_generation_params)
|
||||||
|
except Exception:
|
||||||
|
errors.report(f"Failed to apply hijack generation params state", exc_info=True)
|
||||||
|
self.hijack_generation_params_state_list.clear()
|
||||||
|
|
||||||
def setup_conds(self):
|
def setup_conds(self):
|
||||||
prompts = prompt_parser.SdConditioning(self.prompts, width=self.width, height=self.height)
|
prompts = prompt_parser.SdConditioning(self.prompts, width=self.width, height=self.height)
|
||||||
negative_prompts = prompt_parser.SdConditioning(self.negative_prompts, width=self.width, height=self.height, is_negative_prompt=True)
|
negative_prompts = prompt_parser.SdConditioning(self.negative_prompts, width=self.width, height=self.height, is_negative_prompt=True)
|
||||||
@ -502,6 +523,8 @@ class StableDiffusionProcessing:
|
|||||||
self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, total_steps, [self.cached_uc], self.extra_network_data)
|
self.uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, negative_prompts, total_steps, [self.cached_uc], self.extra_network_data)
|
||||||
self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, total_steps, [self.cached_c], self.extra_network_data)
|
self.c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, prompts, total_steps, [self.cached_c], self.extra_network_data)
|
||||||
|
|
||||||
|
self.apply_hijack_generation_params()
|
||||||
|
|
||||||
def get_conds(self):
|
def get_conds(self):
|
||||||
return self.c, self.uc
|
return self.c, self.uc
|
||||||
|
|
||||||
@ -965,8 +988,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
|
|||||||
|
|
||||||
p.setup_conds()
|
p.setup_conds()
|
||||||
|
|
||||||
p.extra_generation_params.update(model_hijack.extra_generation_params)
|
|
||||||
|
|
||||||
# params.txt should be saved after scripts.process_batch, since the
|
# params.txt should be saved after scripts.process_batch, since the
|
||||||
# infotext could be modified by that callback
|
# infotext could be modified by that callback
|
||||||
# Example: a wildcard processed by process_batch sets an extra model
|
# Example: a wildcard processed by process_batch sets an extra model
|
||||||
@ -1513,6 +1534,8 @@ class StableDiffusionProcessingTxt2Img(StableDiffusionProcessing):
|
|||||||
self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, self.firstpass_steps, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data, total_steps)
|
self.hr_uc = self.get_conds_with_caching(prompt_parser.get_learned_conditioning, hr_negative_prompts, self.firstpass_steps, [self.cached_hr_uc, self.cached_uc], self.hr_extra_network_data, total_steps)
|
||||||
self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, self.firstpass_steps, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data, total_steps)
|
self.hr_c = self.get_conds_with_caching(prompt_parser.get_multicond_learned_conditioning, hr_prompts, self.firstpass_steps, [self.cached_hr_c, self.cached_c], self.hr_extra_network_data, total_steps)
|
||||||
|
|
||||||
|
self.apply_hijack_generation_params()
|
||||||
|
|
||||||
def setup_conds(self):
|
def setup_conds(self):
|
||||||
if self.is_hr_pass:
|
if self.is_hr_pass:
|
||||||
# if we are in hr pass right now, the call is being made from the refiner, and we don't need to setup firstpass cons or switch model
|
# if we are in hr pass right now, the call is being made from the refiner, and we don't need to setup firstpass cons or switch model
|
||||||
|
@ -6,6 +6,7 @@ from modules import devices, sd_hijack_optimizations, shared, script_callbacks,
|
|||||||
from modules.hypernetworks import hypernetwork
|
from modules.hypernetworks import hypernetwork
|
||||||
from modules.shared import cmd_opts
|
from modules.shared import cmd_opts
|
||||||
from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr, xlmr_m18
|
from modules import sd_hijack_clip, sd_hijack_open_clip, sd_hijack_unet, sd_hijack_xlmr, xlmr, xlmr_m18
|
||||||
|
from modules.util import GenerationParamsState
|
||||||
|
|
||||||
import ldm.modules.attention
|
import ldm.modules.attention
|
||||||
import ldm.modules.diffusionmodules.model
|
import ldm.modules.diffusionmodules.model
|
||||||
@ -321,6 +322,13 @@ class StableDiffusionModelHijack:
|
|||||||
self.comments = []
|
self.comments = []
|
||||||
self.extra_generation_params = {}
|
self.extra_generation_params = {}
|
||||||
|
|
||||||
|
def capture_generation_params_state(self):
|
||||||
|
state = []
|
||||||
|
for key in list(self.extra_generation_params):
|
||||||
|
if isinstance(self.extra_generation_params[key], GenerationParamsState):
|
||||||
|
state.append(self.extra_generation_params.pop(key))
|
||||||
|
return state
|
||||||
|
|
||||||
def get_prompt_lengths(self, text):
|
def get_prompt_lengths(self, text):
|
||||||
if self.clip is None:
|
if self.clip is None:
|
||||||
return "-", "-"
|
return "-", "-"
|
||||||
|
@ -5,6 +5,7 @@ import torch
|
|||||||
|
|
||||||
from modules import prompt_parser, devices, sd_hijack, sd_emphasis
|
from modules import prompt_parser, devices, sd_hijack, sd_emphasis
|
||||||
from modules.shared import opts
|
from modules.shared import opts
|
||||||
|
from modules.util import GenerationParamsState
|
||||||
|
|
||||||
|
|
||||||
class PromptChunk:
|
class PromptChunk:
|
||||||
@ -27,6 +28,31 @@ chunk. Those objects are found in PromptChunk.fixes and, are placed into FrozenC
|
|||||||
are applied by sd_hijack.EmbeddingsWithFixes's forward function."""
|
are applied by sd_hijack.EmbeddingsWithFixes's forward function."""
|
||||||
|
|
||||||
|
|
||||||
|
class EmbeddingHashes(GenerationParamsState):
|
||||||
|
def __init__(self, hashes: list):
|
||||||
|
super().__init__()
|
||||||
|
self.hashes = hashes
|
||||||
|
|
||||||
|
def __call__(self, extra_generation_params):
|
||||||
|
unique_hashes = dict.fromkeys(self.hashes)
|
||||||
|
if existing_ti_hashes := extra_generation_params.get('TI hashes'):
|
||||||
|
unique_hashes.update(dict.fromkeys(existing_ti_hashes.split(', ')))
|
||||||
|
extra_generation_params['TI hashes'] = ', '.join(unique_hashes)
|
||||||
|
|
||||||
|
|
||||||
|
class EmphasisMode(GenerationParamsState):
|
||||||
|
def __init__(self, texts):
|
||||||
|
super().__init__()
|
||||||
|
if opts.emphasis != 'Original' and any(x for x in texts if '(' in x or '[' in x):
|
||||||
|
self.emphasis = opts.emphasis
|
||||||
|
else:
|
||||||
|
self.emphasis = None
|
||||||
|
|
||||||
|
def __call__(self, extra_generation_params):
|
||||||
|
if self.emphasis:
|
||||||
|
extra_generation_params['Emphasis'] = self.emphasis
|
||||||
|
|
||||||
|
|
||||||
class TextConditionalModel(torch.nn.Module):
|
class TextConditionalModel(torch.nn.Module):
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
super().__init__()
|
super().__init__()
|
||||||
@ -238,12 +264,9 @@ class TextConditionalModel(torch.nn.Module):
|
|||||||
hashes.append(f"{name}: {shorthash}")
|
hashes.append(f"{name}: {shorthash}")
|
||||||
|
|
||||||
if hashes:
|
if hashes:
|
||||||
if self.hijack.extra_generation_params.get("TI hashes"):
|
self.hijack.extra_generation_params["TI hashes"] = EmbeddingHashes(hashes)
|
||||||
hashes.append(self.hijack.extra_generation_params.get("TI hashes"))
|
|
||||||
self.hijack.extra_generation_params["TI hashes"] = ", ".join(hashes)
|
|
||||||
|
|
||||||
if any(x for x in texts if "(" in x or "[" in x) and opts.emphasis != "Original":
|
self.hijack.extra_generation_params["Emphasis"] = EmphasisMode(texts)
|
||||||
self.hijack.extra_generation_params["Emphasis"] = opts.emphasis
|
|
||||||
|
|
||||||
if self.return_pooled:
|
if self.return_pooled:
|
||||||
return torch.hstack(zs), zs[0].pooled
|
return torch.hstack(zs), zs[0].pooled
|
||||||
|
@ -288,3 +288,18 @@ def compare_sha256(file_path: str, hash_prefix: str) -> bool:
|
|||||||
for chunk in iter(lambda: f.read(blksize), b""):
|
for chunk in iter(lambda: f.read(blksize), b""):
|
||||||
hash_sha256.update(chunk)
|
hash_sha256.update(chunk)
|
||||||
return hash_sha256.hexdigest().startswith(hash_prefix.strip().lower())
|
return hash_sha256.hexdigest().startswith(hash_prefix.strip().lower())
|
||||||
|
|
||||||
|
|
||||||
|
class GenerationParamsState:
|
||||||
|
"""A custom class used in StableDiffusionModelHijack for assigning extra_generation_params
|
||||||
|
generation_params assigned using this class will work properly with StableDiffusionProcessing.get_conds_with_caching()
|
||||||
|
if assigned directly the generation_params will not be populated if conda cache is used
|
||||||
|
|
||||||
|
Generation_params of this class will be captured (see StableDiffusionModelHijack.capture_generation_params_state) and stored with conda cache, and will be extracted in StableDiffusionProcessing.apply_hijack_generation_params()
|
||||||
|
|
||||||
|
To use this class, create a subclass with a __call__ method that takes extra_generation_params: dict as input
|
||||||
|
|
||||||
|
Example usage: sd_hijack_clip.EmbeddingHashes, sd_hijack_clip.EmphasisMode
|
||||||
|
"""
|
||||||
|
def __call__(self, extra_generation_params: dict):
|
||||||
|
raise NotImplementedError
|
||||||
|
Loading…
x
Reference in New Issue
Block a user