From 9c8075ba8e538f695ef25f85e6513227b58b71ce Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 16 May 2024 23:16:50 +0900 Subject: [PATCH 1/3] torch_utils.float64 return torch.float64 if device is not mps or xpu, else return torch.float32 --- modules/torch_utils.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/modules/torch_utils.py b/modules/torch_utils.py index e5b52393ec8..a07e02853b1 100644 --- a/modules/torch_utils.py +++ b/modules/torch_utils.py @@ -1,6 +1,7 @@ from __future__ import annotations import torch.nn +import torch def get_param(model) -> torch.nn.Parameter: @@ -15,3 +16,11 @@ def get_param(model) -> torch.nn.Parameter: return param raise ValueError(f"No parameters found in model {model!r}") + + +def float64(t: torch.Tensor): + """return torch.float64 if device is not mps or xpu, else return torch.float32""" + match t.device.type: + case 'mps', 'xpu': + return torch.float32 + return torch.float64 From 41f66849c7feac1efd0b9eb6884209be382e9e74 Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 16 May 2024 23:18:20 +0900 Subject: [PATCH 2/3] mps, xpu compatibility --- .../soft-inpainting/scripts/soft_inpainting.py | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/extensions-builtin/soft-inpainting/scripts/soft_inpainting.py b/extensions-builtin/soft-inpainting/scripts/soft_inpainting.py index f56e1e2266d..0e629963af4 100644 --- a/extensions-builtin/soft-inpainting/scripts/soft_inpainting.py +++ b/extensions-builtin/soft-inpainting/scripts/soft_inpainting.py @@ -3,6 +3,7 @@ import math from modules.ui_components import InputAccordion import modules.scripts as scripts +from modules.torch_utils import float64 class SoftInpaintingSettings: @@ -79,13 +80,11 @@ def latent_blend(settings, a, b, t): # Calculate the magnitude of the interpolated vectors. (We will remove this magnitude.) # 64-bit operations are used here to allow large exponents. - current_magnitude = torch.norm(image_interp, p=2, dim=1, keepdim=True).to(torch.float64).add_(0.00001) + current_magnitude = torch.norm(image_interp, p=2, dim=1, keepdim=True).to(float64(image_interp)).add_(0.00001) # Interpolate the powered magnitudes, then un-power them (bring them back to a power of 1). - a_magnitude = torch.norm(a, p=2, dim=1, keepdim=True).to(torch.float64).pow_( - settings.inpaint_detail_preservation) * one_minus_t3 - b_magnitude = torch.norm(b, p=2, dim=1, keepdim=True).to(torch.float64).pow_( - settings.inpaint_detail_preservation) * t3 + a_magnitude = torch.norm(a, p=2, dim=1, keepdim=True).to(float64(a)).pow_(settings.inpaint_detail_preservation) * one_minus_t3 + b_magnitude = torch.norm(b, p=2, dim=1, keepdim=True).to(float64(b)).pow_(settings.inpaint_detail_preservation) * t3 desired_magnitude = a_magnitude desired_magnitude.add_(b_magnitude).pow_(1 / settings.inpaint_detail_preservation) del a_magnitude, b_magnitude, t3, one_minus_t3 From f015b94176d6df372ce153eddc018cb3b08c03ba Mon Sep 17 00:00:00 2001 From: w-e-w <40751091+w-e-w@users.noreply.github.com> Date: Thu, 16 May 2024 23:19:06 +0900 Subject: [PATCH 3/3] use torch_utils.float64 --- modules/sd_samplers_timesteps_impl.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/modules/sd_samplers_timesteps_impl.py b/modules/sd_samplers_timesteps_impl.py index 930a64af590..84867d6ee65 100644 --- a/modules/sd_samplers_timesteps_impl.py +++ b/modules/sd_samplers_timesteps_impl.py @@ -5,13 +5,14 @@ from modules import shared from modules.models.diffusion.uni_pc import uni_pc +from modules.torch_utils import float64 @torch.no_grad() def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta=0.0): alphas_cumprod = model.inner_model.inner_model.alphas_cumprod alphas = alphas_cumprod[timesteps] - alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' and x.device.type != 'xpu' else torch.float32) + alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(float64(x)) sqrt_one_minus_alphas = torch.sqrt(1 - alphas) sigmas = eta * np.sqrt((1 - alphas_prev.cpu().numpy()) / (1 - alphas.cpu()) * (1 - alphas.cpu() / alphas_prev.cpu().numpy())) @@ -43,7 +44,7 @@ def ddim(model, x, timesteps, extra_args=None, callback=None, disable=None, eta= def plms(model, x, timesteps, extra_args=None, callback=None, disable=None): alphas_cumprod = model.inner_model.inner_model.alphas_cumprod alphas = alphas_cumprod[timesteps] - alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(torch.float64 if x.device.type != 'mps' and x.device.type != 'xpu' else torch.float32) + alphas_prev = alphas_cumprod[torch.nn.functional.pad(timesteps[:-1], pad=(1, 0))].to(float64(x)) sqrt_one_minus_alphas = torch.sqrt(1 - alphas) extra_args = {} if extra_args is None else extra_args