Merge pull request #15815 from AUTOMATIC1111/torch-float64-or-float32

fix soft inpainting on mps and xpu, torch_utils.float64
This commit is contained in:
AUTOMATIC1111 2024-06-08 11:07:29 +03:00 committed by GitHub
commit b4723bb8c1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 16 additions and 7 deletions

View File

@ -3,6 +3,7 @@ import gradio as gr
import math import math
from modules.ui_components import InputAccordion from modules.ui_components import InputAccordion
import modules.scripts as scripts import modules.scripts as scripts
from modules.torch_utils import float64
class SoftInpaintingSettings: class SoftInpaintingSettings:
@ -79,13 +80,11 @@ def latent_blend(settings, a, b, t):
# Calculate the magnitude of the interpolated vectors. (We will remove this magnitude.) # Calculate the magnitude of the interpolated vectors. (We will remove this magnitude.)
# 64-bit operations are used here to allow large exponents. # 64-bit operations are used here to allow large exponents.
current_magnitude = torch.norm(image_interp, p=2, dim=1, keepdim=True).to(torch.float64).add_(0.00001) current_magnitude = torch.norm(image_interp, p=2, dim=1, keepdim=True).to(float64(image_interp)).add_(0.00001)
# Interpolate the powered magnitudes, then un-power them (bring them back to a power of 1). # Interpolate the powered magnitudes, then un-power them (bring them back to a power of 1).
a_magnitude = torch.norm(a, p=2, dim=1, keepdim=True).to(torch.float64).pow_( a_magnitude = torch.norm(a, p=2, dim=1, keepdim=True).to(float64(a)).pow_(settings.inpaint_detail_preservation) * one_minus_t3
settings.inpaint_detail_preservation) * one_minus_t3 b_magnitude = torch.norm(b, p=2, dim=1, keepdim=True).to(float64(b)).pow_(settings.inpaint_detail_preservation) * t3
b_magnitude = torch.norm(b, p=2, dim=1, keepdim=True).to(torch.float64).pow_(
settings.inpaint_detail_preservation) * t3
desired_magnitude = a_magnitude desired_magnitude = a_magnitude
desired_magnitude.add_(b_magnitude).pow_(1 / settings.inpaint_detail_preservation) desired_magnitude.add_(b_magnitude).pow_(1 / settings.inpaint_detail_preservation)
del a_magnitude, b_magnitude, t3, one_minus_t3 del a_magnitude, b_magnitude, t3, one_minus_t3

View File

@ -5,13 +5,14 @@ import numpy as np
from modules import shared from modules import shared
from modules.models.diffusion.uni_pc import uni_pc from modules.models.diffusion.uni_pc import uni_pc
from modules.torch_utils import float64
@torch.no_grad() @torch.no_grad()
def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=0.0): def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=0.0):
alphas_cumprod = model.inner_model.inner_model.alphas_cumprod alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
alphas = alphas_cumprod[timesteps] alphas = alphas_cumprod[timesteps]
alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' and x.device.type != 'xpu' else torch.float32) alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(float64(x))
sqrt_one_minus_alphas = torch.sqrt(1 - alphas) sqrt_one_minus_alphas = torch.sqrt(1 - alphas)
sigmas = eta * np.sqrt((1 - alphas_prev.cpu().numpy()) / (1 - alphas.cpu()) * (1 - alphas.cpu() / alphas_prev.cpu().numpy())) sigmas = eta * np.sqrt((1 - alphas_prev.cpu().numpy()) / (1 - alphas.cpu()) * (1 - alphas.cpu() / alphas_prev.cpu().numpy()))
@ -43,7 +44,7 @@ def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=
def plms(model, x, timesteps, extra_args=None, callback=None, disable=None): def plms(model, x, timesteps, extra_args=None, callback=None, disable=None):
alphas_cumprod = model.inner_model.inner_model.alphas_cumprod alphas_cumprod = model.inner_model.inner_model.alphas_cumprod
alphas = alphas_cumprod[timesteps] alphas = alphas_cumprod[timesteps]
alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' and x.device.type != 'xpu' else torch.float32) alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(float64(x))
sqrt_one_minus_alphas = torch.sqrt(1 - alphas) sqrt_one_minus_alphas = torch.sqrt(1 - alphas)
extra_args = {} if extra_args is None else extra_args extra_args = {} if extra_args is None else extra_args

View File

@ -1,6 +1,7 @@
from __future__ import annotations from __future__ import annotations
import torch.nn import torch.nn
import torch
def get_param(model) -> torch.nn.Parameter: def get_param(model) -> torch.nn.Parameter:
@ -15,3 +16,11 @@ def get_param(model) -> torch.nn.Parameter:
return param return param
raise ValueError(f"No parameters found in model {model!r}") raise ValueError(f"No parameters found in model {model!r}")
def float64(t: torch.Tensor):
"""return torch.float64 if device is not mps or xpu, else return torch.float32"""
match t.device.type:
case 'mps', 'xpu':
return torch.float32
return torch.float64