mirror of
https://github.com/AUTOMATIC1111/stable-diffusion-webui.git
synced 2025-04-08 05:49:00 +08:00
Use self.scale instead of custom finalize
This commit is contained in:
parent
735c9e8059
commit
265bc26c21
@ -21,6 +21,8 @@ class NetworkModuleOFT(network.NetworkModule):
|
||||
self.lin_module = None
|
||||
self.org_module: list[torch.Module] = [self.sd_module]
|
||||
|
||||
self.scale = 1.0
|
||||
|
||||
# kohya-ss
|
||||
if "oft_blocks" in weights.w.keys():
|
||||
self.is_kohya = True
|
||||
@ -78,21 +80,3 @@ class NetworkModuleOFT(network.NetworkModule):
|
||||
print(torch.norm(updown))
|
||||
output_shape = orig_weight.shape
|
||||
return self.finalize_updown(updown, orig_weight, output_shape)
|
||||
|
||||
def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None):
|
||||
if self.bias is not None:
|
||||
updown = updown.reshape(self.bias.shape)
|
||||
updown += self.bias.to(orig_weight.device, dtype=orig_weight.dtype)
|
||||
updown = updown.reshape(output_shape)
|
||||
|
||||
if len(output_shape) == 4:
|
||||
updown = updown.reshape(output_shape)
|
||||
|
||||
if orig_weight.size().numel() == updown.size().numel():
|
||||
updown = updown.reshape(orig_weight.shape)
|
||||
|
||||
if ex_bias is not None:
|
||||
ex_bias = ex_bias * self.multiplier()
|
||||
|
||||
# Ignore calc_scale, which is not used in OFT.
|
||||
return updown * self.multiplier(), ex_bias
|
||||
|
Loading…
x
Reference in New Issue
Block a user