mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2025-03-04 21:14:54 +08:00
add shared.opts.lora_without_backup_weight option to reduce ram usage
This commit is contained in:
parent
537d9dd71c
commit
2060886450
@ -377,6 +377,8 @@ def store_weights_backup(weight):
|
||||
if weight is None:
|
||||
return None
|
||||
|
||||
if shared.opts.lora_without_backup_weight:
|
||||
return True
|
||||
return weight.to(devices.cpu, copy=True)
|
||||
|
||||
|
||||
@ -395,6 +397,9 @@ def network_restore_weights_from_backup(self: Union[torch.nn.Conv2d, torch.nn.Li
|
||||
if weights_backup is None and bias_backup is None:
|
||||
return
|
||||
|
||||
if weights_backup is True or weights_backup == (True, True): # fake backup
|
||||
return
|
||||
|
||||
if weights_backup is not None:
|
||||
if isinstance(self, torch.nn.MultiheadAttention):
|
||||
restore_weights_backup(self, 'in_proj_weight', weights_backup[0])
|
||||
@ -539,7 +544,12 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
|
||||
logging.debug(f"Network {net.name} layer {network_layer_name}: couldn't find supported operation")
|
||||
extra_network_lora.errors[net.name] = extra_network_lora.errors.get(net.name, 0) + 1
|
||||
|
||||
self.network_current_names = wanted_names
|
||||
|
||||
if weights_backup is True or weights_backup == (True, True): # fake backup
|
||||
self.network_weights_backup = None
|
||||
self.network_bias_backup = None
|
||||
else:
|
||||
self.network_current_names = wanted_names
|
||||
|
||||
|
||||
def network_forward(org_module, input, original_forward):
|
||||
|
@ -242,6 +242,7 @@ options_templates.update(options_section(('optimizations', "Optimizations", "sd"
|
||||
"batch_cond_uncond": OptionInfo(True, "Batch cond/uncond").info("do both conditional and unconditional denoising in one batch; uses a bit more VRAM during sampling, but improves speed; previously this was controlled by --always-batch-cond-uncond commandline argument"),
|
||||
"fp8_storage": OptionInfo("Disable", "FP8 weight", gr.Radio, {"choices": ["Disable", "Enable for SDXL", "Enable"]}).info("Use FP8 to store Linear/Conv layers' weight. Require pytorch>=2.1.0."),
|
||||
"cache_fp16_weight": OptionInfo(False, "Cache FP16 weight for LoRA").info("Cache fp16 weight when enabling FP8, will increase the quality of LoRA. Use more system ram."),
|
||||
"lora_without_backup_weight": OptionInfo(False, "LoRA without backup weights").info("LoRA without backup weights to save RAM."),
|
||||
}))
|
||||
|
||||
options_templates.update(options_section(('compatibility', "Compatibility", "sd"), {
|
||||
|
Loading…
Reference in New Issue
Block a user