mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2024-12-29 19:05:05 +08:00
fix the problem with infinite prompts where empty cond would be calculated incorrectly
This commit is contained in:
parent
0b64633584
commit
179ae47d64
@ -177,12 +177,13 @@ class SD3Cond(torch.nn.Module):
|
||||
self.weights_loaded = False
|
||||
|
||||
def forward(self, prompts: list[str]):
|
||||
lg_out, vector_out = self.model_lg(prompts)
|
||||
with devices.without_autocast():
|
||||
lg_out, vector_out = self.model_lg(prompts)
|
||||
|
||||
token_count = lg_out.shape[1]
|
||||
token_count = lg_out.shape[1]
|
||||
|
||||
t5_out = self.model_t5(prompts, token_count=token_count)
|
||||
lgt_out = torch.cat([lg_out, t5_out], dim=-2)
|
||||
t5_out = self.model_t5(prompts, token_count=token_count)
|
||||
lgt_out = torch.cat([lg_out, t5_out], dim=-2)
|
||||
|
||||
return {
|
||||
'crossattn': lgt_out,
|
||||
|
@ -47,8 +47,7 @@ class SD3Inferencer(torch.nn.Module):
|
||||
return contextlib.nullcontext()
|
||||
|
||||
def get_learned_conditioning(self, batch: list[str]):
|
||||
with devices.without_autocast():
|
||||
return self.cond_stage_model(batch)
|
||||
return self.cond_stage_model(batch)
|
||||
|
||||
def apply_model(self, x, t, cond):
|
||||
return self.model(x, t, c_crossattn=cond['crossattn'], y=cond['vector'])
|
||||
|
@ -718,16 +718,15 @@ def get_empty_cond(sd_model):
|
||||
p = processing.StableDiffusionProcessingTxt2Img()
|
||||
extra_networks.activate(p, {})
|
||||
|
||||
if hasattr(sd_model, 'conditioner'):
|
||||
if hasattr(sd_model, 'get_learned_conditioning'):
|
||||
d = sd_model.get_learned_conditioning([""])
|
||||
return d['crossattn']
|
||||
else:
|
||||
d = sd_model.cond_stage_model([""])
|
||||
|
||||
if isinstance(d, dict):
|
||||
d = d['crossattn']
|
||||
if isinstance(d, dict):
|
||||
d = d['crossattn']
|
||||
|
||||
return d
|
||||
return d
|
||||
|
||||
|
||||
def send_model_to_cpu(m):
|
||||
|
Loading…
Reference in New Issue
Block a user