mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2025-01-06 15:15:05 +08:00
Apply suggestions from code review
Co-authored-by: Aarni Koskela <akx@iki.fi>
This commit is contained in:
parent
8a3d232839
commit
1e5afd4fa9
@ -19,10 +19,10 @@ diffusionmodules_model_AttnBlock_forward = ldm.modules.diffusionmodules.model.At
|
|||||||
|
|
||||||
|
|
||||||
class SdOptimization:
|
class SdOptimization:
|
||||||
def __init__(self, name, label=None, cmd_opt=None):
|
name: str = None
|
||||||
self.name = name
|
label: str | None = None
|
||||||
self.label = label
|
cmd_opt: str | None = None
|
||||||
self.cmd_opt = cmd_opt
|
priority: int = 0
|
||||||
|
|
||||||
def title(self):
|
def title(self):
|
||||||
if self.label is None:
|
if self.label is None:
|
||||||
@ -33,9 +33,6 @@ class SdOptimization:
|
|||||||
def is_available(self):
|
def is_available(self):
|
||||||
return True
|
return True
|
||||||
|
|
||||||
def priority(self):
|
|
||||||
return 0
|
|
||||||
|
|
||||||
def apply(self):
|
def apply(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@ -45,41 +42,37 @@ class SdOptimization:
|
|||||||
|
|
||||||
|
|
||||||
class SdOptimizationXformers(SdOptimization):
|
class SdOptimizationXformers(SdOptimization):
|
||||||
def __init__(self):
|
name = "xformers"
|
||||||
super().__init__("xformers", cmd_opt="xformers")
|
cmd_opt = "xformers"
|
||||||
|
priority = 100
|
||||||
|
|
||||||
def is_available(self):
|
def is_available(self):
|
||||||
return shared.cmd_opts.force_enable_xformers or (shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0))
|
return shared.cmd_opts.force_enable_xformers or (shared.xformers_available and torch.version.cuda and (6, 0) <= torch.cuda.get_device_capability(shared.device) <= (9, 0))
|
||||||
|
|
||||||
def priority(self):
|
|
||||||
return 100
|
|
||||||
|
|
||||||
def apply(self):
|
def apply(self):
|
||||||
ldm.modules.attention.CrossAttention.forward = xformers_attention_forward
|
ldm.modules.attention.CrossAttention.forward = xformers_attention_forward
|
||||||
ldm.modules.diffusionmodules.model.AttnBlock.forward = xformers_attnblock_forward
|
ldm.modules.diffusionmodules.model.AttnBlock.forward = xformers_attnblock_forward
|
||||||
|
|
||||||
|
|
||||||
class SdOptimizationSdpNoMem(SdOptimization):
|
class SdOptimizationSdpNoMem(SdOptimization):
|
||||||
def __init__(self, name="sdp-no-mem", label="scaled dot product without memory efficient attention", cmd_opt="opt_sdp_no_mem_attention"):
|
name = "sdp-no-mem"
|
||||||
super().__init__(name, label, cmd_opt)
|
label = "scaled dot product without memory efficient attention"
|
||||||
|
cmd_opt = "opt_sdp_no_mem_attention"
|
||||||
|
priority = 90
|
||||||
|
|
||||||
def is_available(self):
|
def is_available(self):
|
||||||
return hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention)
|
return hasattr(torch.nn.functional, "scaled_dot_product_attention") and callable(torch.nn.functional.scaled_dot_product_attention)
|
||||||
|
|
||||||
def priority(self):
|
|
||||||
return 90
|
|
||||||
|
|
||||||
def apply(self):
|
def apply(self):
|
||||||
ldm.modules.attention.CrossAttention.forward = scaled_dot_product_no_mem_attention_forward
|
ldm.modules.attention.CrossAttention.forward = scaled_dot_product_no_mem_attention_forward
|
||||||
ldm.modules.diffusionmodules.model.AttnBlock.forward = sdp_no_mem_attnblock_forward
|
ldm.modules.diffusionmodules.model.AttnBlock.forward = sdp_no_mem_attnblock_forward
|
||||||
|
|
||||||
|
|
||||||
class SdOptimizationSdp(SdOptimizationSdpNoMem):
|
class SdOptimizationSdp(SdOptimizationSdpNoMem):
|
||||||
def __init__(self):
|
name = "sdp"
|
||||||
super().__init__("sdp", "scaled dot product", cmd_opt="opt_sdp_attention")
|
label = "scaled dot product"
|
||||||
|
cmd_opt = "opt_sdp_attention"
|
||||||
def priority(self):
|
priority = 80
|
||||||
return 80
|
|
||||||
|
|
||||||
def apply(self):
|
def apply(self):
|
||||||
ldm.modules.attention.CrossAttention.forward = scaled_dot_product_attention_forward
|
ldm.modules.attention.CrossAttention.forward = scaled_dot_product_attention_forward
|
||||||
@ -87,11 +80,9 @@ class SdOptimizationSdp(SdOptimizationSdpNoMem):
|
|||||||
|
|
||||||
|
|
||||||
class SdOptimizationSubQuad(SdOptimization):
|
class SdOptimizationSubQuad(SdOptimization):
|
||||||
def __init__(self):
|
name = "sub-quadratic"
|
||||||
super().__init__("sub-quadratic", cmd_opt="opt_sub_quad_attention")
|
cmd_opt = "opt_sub_quad_attention"
|
||||||
|
priority = 10
|
||||||
def priority(self):
|
|
||||||
return 10
|
|
||||||
|
|
||||||
def apply(self):
|
def apply(self):
|
||||||
ldm.modules.attention.CrossAttention.forward = sub_quad_attention_forward
|
ldm.modules.attention.CrossAttention.forward = sub_quad_attention_forward
|
||||||
@ -99,20 +90,21 @@ class SdOptimizationSubQuad(SdOptimization):
|
|||||||
|
|
||||||
|
|
||||||
class SdOptimizationV1(SdOptimization):
|
class SdOptimizationV1(SdOptimization):
|
||||||
def __init__(self):
|
name = "V1"
|
||||||
super().__init__("V1", "original v1", cmd_opt="opt_split_attention_v1")
|
label = "original v1"
|
||||||
|
cmd_opt = "opt_split_attention_v1"
|
||||||
|
priority = 10
|
||||||
|
|
||||||
def priority(self):
|
|
||||||
return 10
|
|
||||||
|
|
||||||
def apply(self):
|
def apply(self):
|
||||||
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward_v1
|
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward_v1
|
||||||
|
|
||||||
|
|
||||||
class SdOptimizationInvokeAI(SdOptimization):
|
class SdOptimizationInvokeAI(SdOptimization):
|
||||||
def __init__(self):
|
name = "InvokeAI"
|
||||||
super().__init__("InvokeAI", cmd_opt="opt_split_attention_invokeai")
|
cmd_opt = "opt_split_attention_invokeai"
|
||||||
|
|
||||||
|
@property
|
||||||
def priority(self):
|
def priority(self):
|
||||||
return 1000 if not torch.cuda.is_available() else 10
|
return 1000 if not torch.cuda.is_available() else 10
|
||||||
|
|
||||||
@ -121,11 +113,9 @@ class SdOptimizationInvokeAI(SdOptimization):
|
|||||||
|
|
||||||
|
|
||||||
class SdOptimizationDoggettx(SdOptimization):
|
class SdOptimizationDoggettx(SdOptimization):
|
||||||
def __init__(self):
|
name = "Doggettx"
|
||||||
super().__init__("Doggettx", cmd_opt="opt_split_attention")
|
cmd_opt = "opt_split_attention"
|
||||||
|
priority = 20
|
||||||
def priority(self):
|
|
||||||
return 20
|
|
||||||
|
|
||||||
def apply(self):
|
def apply(self):
|
||||||
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward
|
ldm.modules.attention.CrossAttention.forward = split_cross_attention_forward
|
||||||
|
Loading…
Reference in New Issue
Block a user