Skip to content

Commit

Permalink
Browse files Browse the repository at this point in the history
…on-webui into dev
  • Loading branch information
MisterSeajay committed Mar 8, 2024
2 parents 15a82ec + 7d1368c commit 42043fc
Show file tree
Hide file tree
Showing 69 changed files with 926 additions and 392 deletions.
2 changes: 2 additions & 0 deletions .eslintrc.js
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,8 @@ module.exports = {
//extraNetworks.js
requestGet: "readonly",
popup: "readonly",
// profilerVisualization.js
createVisualizationTable: "readonly",
// from python
localization: "readonly",
// progrssbar.js
Expand Down
144 changes: 138 additions & 6 deletions CHANGELOG.md

Large diffs are not rendered by default.

5 changes: 5 additions & 0 deletions _typos.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
[default.extend-words]
# Part of "RGBa" (Pillow's pre-multiplied alpha RGB mode)
Ba = "Ba"
# HSA is something AMD uses for their GPUs
HSA = "HSA"
8 changes: 4 additions & 4 deletions extensions-builtin/LDSR/sd_hijack_ddpm_v1.py
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ def p_losses(self, x_start, t, noise=None):
elif self.parameterization == "x0":
target = x_start
else:
raise NotImplementedError(f"Paramterization {self.parameterization} not yet supported")
raise NotImplementedError(f"Parameterization {self.parameterization} not yet supported")

loss = self.get_loss(model_out, target, mean=False).mean(dim=[1, 2, 3])

Expand Down Expand Up @@ -880,7 +880,7 @@ def forward(self, x, c, *args, **kwargs):
def apply_model(self, x_noisy, t, cond, return_ids=False):

if isinstance(cond, dict):
# hybrid case, cond is exptected to be a dict
# hybrid case, cond is expected to be a dict
pass
else:
if not isinstance(cond, list):
Expand Down Expand Up @@ -916,7 +916,7 @@ def apply_model(self, x_noisy, t, cond, return_ids=False):
cond_list = [{c_key: [c[:, :, :, :, i]]} for i in range(c.shape[-1])]

elif self.cond_stage_key == 'coordinates_bbox':
assert 'original_image_size' in self.split_input_params, 'BoudingBoxRescaling is missing original_image_size'
assert 'original_image_size' in self.split_input_params, 'BoundingBoxRescaling is missing original_image_size'

# assuming padding of unfold is always 0 and its dilation is always 1
n_patches_per_row = int((w - ks[0]) / stride[0] + 1)
Expand All @@ -926,7 +926,7 @@ def apply_model(self, x_noisy, t, cond, return_ids=False):
num_downs = self.first_stage_model.encoder.num_resolutions - 1
rescale_latent = 2 ** (num_downs)

# get top left postions of patches as conforming for the bbbox tokenizer, therefore we
# get top left positions of patches as conforming for the bbbox tokenizer, therefore we
# need to rescale the tl patch coordinates to be in between (0,1)
tl_patch_coordinates = [(rescale_latent * stride[0] * (patch_nr % n_patches_per_row) / full_img_w,
rescale_latent * stride[1] * (patch_nr // n_patches_per_row) / full_img_h)
Expand Down
2 changes: 1 addition & 1 deletion extensions-builtin/Lora/lyco_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ def factorization(dimension: int, factor:int=-1) -> tuple[int, int]:
In LoRA with Kroneckor Product, first value is a value for weight scale.
secon value is a value for weight.
Becuase of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different.
Because of non-commutative property, A⊗B ≠ B⊗A. Meaning of two matrices is slightly different.
examples)
factor
Expand Down
15 changes: 15 additions & 0 deletions extensions-builtin/Lora/network.py
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,9 @@ def __init__(self, net: Network, weights: NetworkWeights):
self.alpha = weights.w["alpha"].item() if "alpha" in weights.w else None
self.scale = weights.w["scale"].item() if "scale" in weights.w else None

self.dora_scale = weights.w["dora_scale"] if "dora_scale" in weights.w else None
self.dora_mean_dim = tuple(i for i in range(len(self.shape)) if i != 1)

def multiplier(self):
if 'transformer' in self.sd_key[:20]:
return self.network.te_multiplier
Expand All @@ -160,6 +163,15 @@ def calc_scale(self):

return 1.0

def apply_weight_decompose(self, updown, orig_weight):
orig_weight = orig_weight.to(updown)
merged_scale1 = updown + orig_weight
dora_merged = (
merged_scale1 / merged_scale1(dim=self.dora_mean_dim, keepdim=True) * self.dora_scale
)
final_updown = dora_merged - orig_weight
return final_updown

def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None):
if self.bias is not None:
updown = updown.reshape(self.bias.shape)
Expand All @@ -175,6 +187,9 @@ def finalize_updown(self, updown, orig_weight, output_shape, ex_bias=None):
if ex_bias is not None:
ex_bias = ex_bias * self.multiplier()

if self.dora_scale is not None:
updown = self.apply_weight_decompose(updown, orig_weight)

return updown * self.calc_scale() * self.multiplier(), ex_bias

def calc_updown(self, target):
Expand Down
86 changes: 61 additions & 25 deletions extensions-builtin/Lora/network_oft.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import torch
import network
from lyco_helpers import factorization
from einops import rearrange


Expand All @@ -22,20 +21,28 @@ def __init__(self, net: network.Network, weights: network.NetworkWeights):
self.org_module: list[torch.Module] = [self.sd_module]

self.scale = 1.0
self.is_R = False
self.is_boft = False

# kohya-ss
# kohya-ss/New LyCORIS OFT/BOFT
if "oft_blocks" in weights.w.keys():
self.is_kohya = True
self.oft_blocks = weights.w["oft_blocks"] # (num_blocks, block_size, block_size)
self.alpha = weights.w["alpha"] # alpha is constraint
self.alpha = weights.w.get("alpha", None) # alpha is constraint
self.dim = self.oft_blocks.shape[0] # lora dim
# LyCORIS
# Old LyCORIS OFT
elif "oft_diag" in weights.w.keys():
self.is_kohya = False
self.is_R = True
self.oft_blocks = weights.w["oft_diag"]
# self.alpha is unused
self.dim = self.oft_blocks.shape[1] # (num_blocks, block_size, block_size)

# LyCORIS BOFT
if self.oft_blocks.dim() == 4:
self.is_boft = True
self.rescale = weights.w.get('rescale', None)
if self.rescale is not None:
self.rescale = self.rescale.reshape(-1, *[1]*(self.org_module[0].weight.dim() - 1))

is_linear = type(self.sd_module) in [torch.nn.Linear, torch.nn.modules.linear.NonDynamicallyQuantizableLinear]
is_conv = type(self.sd_module) in [torch.nn.Conv2d]
is_other_linear = type(self.sd_module) in [torch.nn.MultiheadAttention] # unsupported
Expand All @@ -47,35 +54,64 @@ def __init__(self, net: network.Network, weights: network.NetworkWeights):
elif is_other_linear:
self.out_dim = self.sd_module.embed_dim

if self.is_kohya:
self.constraint = self.alpha * self.out_dim
self.num_blocks = self.dim
self.block_size = self.out_dim // self.dim
else:
self.num_blocks = self.dim
self.block_size = self.out_dim // self.dim
self.constraint = (0 if self.alpha is None else self.alpha) * self.out_dim
if self.is_R:
self.constraint = None
self.block_size, self.num_blocks = factorization(self.out_dim, self.dim)
self.block_size = self.dim
self.num_blocks = self.out_dim // self.dim
elif self.is_boft:
self.boft_m = self.oft_blocks.shape[0]
self.num_blocks = self.oft_blocks.shape[1]
self.block_size = self.oft_blocks.shape[2]
self.boft_b = self.block_size

def calc_updown(self, orig_weight):
oft_blocks = self.oft_blocks.to(orig_weight.device)
eye = torch.eye(self.block_size, device=oft_blocks.device)

if self.is_kohya:
block_Q = oft_blocks - oft_blocks.transpose(1, 2) # ensure skew-symmetric orthogonal matrix
norm_Q = torch.norm(block_Q.flatten())
new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device))
block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8))
if not self.is_R:
block_Q = oft_blocks - oft_blocks.transpose(-1, -2) # ensure skew-symmetric orthogonal matrix
if self.constraint != 0:
norm_Q = torch.norm(block_Q.flatten())
new_norm_Q = torch.clamp(norm_Q, max=self.constraint.to(oft_blocks.device))
block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8))
oft_blocks = torch.matmul(eye + block_Q, (eye - block_Q).float().inverse())

R = oft_blocks.to(orig_weight.device)

# This errors out for MultiheadAttention, might need to be handled up-stream
merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size)
merged_weight = torch.einsum(
'k n m, k n ... -> k m ...',
R,
merged_weight
)
merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...')
if not self.is_boft:
# This errors out for MultiheadAttention, might need to be handled up-stream
merged_weight = rearrange(orig_weight, '(k n) ... -> k n ...', k=self.num_blocks, n=self.block_size)
merged_weight = torch.einsum(
'k n m, k n ... -> k m ...',
R,
merged_weight
)
merged_weight = rearrange(merged_weight, 'k m ... -> (k m) ...')
else:
# TODO: determine correct value for scale
scale = 1.0
m = self.boft_m
b = self.boft_b
r_b = b // 2
inp = orig_weight
for i in range(m):
bi = R[i] # b_num, b_size, b_size
if i == 0:
# Apply multiplier/scale and rescale into first weight
bi = bi * scale + (1 - scale) * eye
inp = rearrange(inp, "(c g k) ... -> (c k g) ...", g=2, k=2**i * r_b)
inp = rearrange(inp, "(d b) ... -> d b ...", b=b)
inp = torch.einsum("b i j, b j ... -> b i ...", bi, inp)
inp = rearrange(inp, "d b ... -> (d b) ...")
inp = rearrange(inp, "(c k g) ... -> (c g k) ...", g=2, k=2**i * r_b)
merged_weight = inp

# Rescale mechanism
if self.rescale is not None:
merged_weight = self.rescale.to(merged_weight) * merged_weight

updown = merged_weight.to(orig_weight.device) - orig_weight.to(merged_weight.dtype)
output_shape = orig_weight.shape
Expand Down
2 changes: 1 addition & 1 deletion extensions-builtin/Lora/networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,7 +355,7 @@ def network_apply_weights(self: Union[torch.nn.Conv2d, torch.nn.Linear, torch.nn
"""
Applies the currently selected set of networks to the weights of torch layer self.
If weights already have this particular set of networks applied, does nothing.
If not, restores orginal weights from backup and alters weights according to networks.
If not, restores original weights from backup and alters weights according to networks.
"""

network_layer_name = getattr(self, 'network_layer_name', None)
Expand Down
4 changes: 2 additions & 2 deletions extensions-builtin/canvas-zoom-and-pan/javascript/zoom.js
Original file line number Diff line number Diff line change
Expand Up @@ -292,7 +292,7 @@ onUiLoaded(async() => {

// Create tooltip
function createTooltip() {
const toolTipElemnt =
const toolTipElement =
targetElement.querySelector(".image-container");
const tooltip = document.createElement("div");
tooltip.className = "canvas-tooltip";
Expand Down Expand Up @@ -355,7 +355,7 @@ onUiLoaded(async() => {
tooltip.appendChild(tooltipContent);

// Add a hint element to the target element
toolTipElemnt.appendChild(tooltip);
toolTipElement.appendChild(tooltip);
}

//Show tool tip if setting enable
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,8 @@
"canvas_hotkey_grow_brush": shared.OptionInfo("W", "Enlarge the brush size"),
"canvas_hotkey_move": shared.OptionInfo("F", "Moving the canvas").info("To work correctly in firefox, turn off 'Automatically search the page text when typing' in the browser settings"),
"canvas_hotkey_fullscreen": shared.OptionInfo("S", "Fullscreen Mode, maximizes the picture so that it fits into the screen and stretches it to its full width "),
"canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas positon"),
"canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, neededs for testing"),
"canvas_hotkey_reset": shared.OptionInfo("R", "Reset zoom and canvas position"),
"canvas_hotkey_overlap": shared.OptionInfo("O", "Toggle overlap").info("Technical button, needed for testing"),
"canvas_show_tooltip": shared.OptionInfo(True, "Enable tooltip on the canvas"),
"canvas_auto_expand": shared.OptionInfo(True, "Automatically expands an image that does not fit completely in the canvas area, similar to manually pressing the S and R buttons"),
"canvas_blur_prompt": shared.OptionInfo(False, "Take the focus off the prompt when working with a canvas"),
Expand Down
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import math

import gradio as gr
from modules import scripts, shared, ui_components, ui_settings, infotext_utils
from modules import scripts, shared, ui_components, ui_settings, infotext_utils, errors
from modules.ui_components import FormColumn


Expand Down Expand Up @@ -42,7 +42,11 @@ def ui(self, is_img2img):
setting_name = extra_options[index]

with FormColumn():
comp = ui_settings.create_setting_component(setting_name)
try:
comp = ui_settings.create_setting_component(setting_name)
except KeyError:
errors.report(f"Can't add extra options for {setting_name} in ui")
continue

self.comps.append(comp)
self.setting_names.append(setting_name)
Expand Down
28 changes: 21 additions & 7 deletions extensions-builtin/soft-inpainting/scripts/soft_inpainting.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,10 +57,14 @@ def latent_blend(settings, a, b, t):

# NOTE: We use inplace operations wherever possible.

# [4][w][h] to [1][4][w][h]
t2 = t.unsqueeze(0)
# [4][w][h] to [1][1][w][h] - the [4] seem redundant.
t3 = t[0].unsqueeze(0).unsqueeze(0)
if len(t.shape) == 3:
# [4][w][h] to [1][4][w][h]
t2 = t.unsqueeze(0)
# [4][w][h] to [1][1][w][h] - the [4] seem redundant.
t3 = t[0].unsqueeze(0).unsqueeze(0)
else:
t2 = t
t3 = t[:, 0][:, None]

one_minus_t2 = 1 - t2
one_minus_t3 = 1 - t3
Expand Down Expand Up @@ -104,7 +108,7 @@ def latent_blend(settings, a, b, t):

def get_modified_nmask(settings, nmask, sigma):
"""
Converts a negative mask representing the transparency of the original latent vectors being overlayed
Converts a negative mask representing the transparency of the original latent vectors being overlaid
to a mask that is scaled according to the denoising strength for this step.
Where:
Expand Down Expand Up @@ -135,7 +139,10 @@ def apply_adaptive_masks(
from PIL import Image, ImageOps, ImageFilter

# TODO: Bias the blending according to the latent mask, add adjustable parameter for bias control.
latent_mask = nmask[0].float()
if len(nmask.shape) == 3:
latent_mask = nmask[0].float()
else:
latent_mask = nmask[:, 0].float()
# convert the original mask into a form we use to scale distances for thresholding
mask_scalar = 1 - (torch.clamp(latent_mask, min=0, max=1) ** (settings.mask_blend_scale / 2))
mask_scalar = (0.5 * (1 - settings.composite_mask_influence)
Expand All @@ -157,7 +164,14 @@ def apply_adaptive_masks(
percentile_min=0.25, percentile_max=0.75, min_width=1)

# The distance at which opacity of original decreases to 50%
half_weighted_distance = settings.composite_difference_threshold * mask_scalar
if len(mask_scalar.shape) == 3:
if mask_scalar.shape[0] > i:
half_weighted_distance = settings.composite_difference_threshold * mask_scalar[i]
else:
half_weighted_distance = settings.composite_difference_threshold * mask_scalar[0]
else:
half_weighted_distance = settings.composite_difference_threshold * mask_scalar

converted_mask = converted_mask / half_weighted_distance

converted_mask = 1 / (1 + converted_mask ** settings.composite_difference_contrast)
Expand Down
8 changes: 8 additions & 0 deletions html/extra-networks-pane-dirs.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
<div class="extra-network-pane-content-dirs">
<div id='{tabname}_{extra_networks_tabname}_dirs' class='extra-network-dirs'>
{dirs_html}
</div>
<div id='{tabname}_{extra_networks_tabname}_cards' class='extra-network-cards'>
{items_html}
</div>
</div>
8 changes: 8 additions & 0 deletions html/extra-networks-pane-tree.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
<div class="extra-network-pane-content-tree resize-handle-row">
<div id='{tabname}_{extra_networks_tabname}_tree' class='extra-network-tree' style='flex-basis: {extra_networks_tree_view_default_width}px'>
{tree_html}
</div>
<div id='{tabname}_{extra_networks_tabname}_cards' class='extra-network-cards' style='flex-grow: 1;'>
{items_html}
</div>
</div>
13 changes: 3 additions & 10 deletions html/extra-networks-pane.html
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
<div id='{tabname}_{extra_networks_tabname}_pane' class='extra-network-pane'>
<div id='{tabname}_{extra_networks_tabname}_pane' class='extra-network-pane {tree_view_div_default_display_class}'>
<div class="extra-network-control" id="{tabname}_{extra_networks_tabname}_controls" style="display:none" >
<div class="extra-network-control--search">
<input
Expand Down Expand Up @@ -44,12 +44,5 @@
<i class="extra-network-control--refresh-icon"></i>
</div>
</div>
<div class="extra-network-pane-content">
<div id='{tabname}_{extra_networks_tabname}_tree' class='extra-network-tree {tree_view_div_extra_class}'>
{tree_html}
</div>
<div id='{tabname}_{extra_networks_tabname}_cards' class='extra-network-cards'>
{items_html}
</div>
</div>
</div>
{pane_content}
</div>
Loading

0 comments on commit 42043fc

Please sign in to comment.