Skip to content

Commit

Permalink
Merge branch 'release_candidate'
Browse files Browse the repository at this point in the history
  • Loading branch information
AUTOMATIC1111 committed Jul 27, 2023
2 parents a3ddf46 + 50973ec commit 68f336b
Show file tree
Hide file tree
Showing 16 changed files with 140 additions and 164 deletions.
25 changes: 25 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,28 @@
## 1.5.1

### Minor:
* support parsing text encoder blocks in some new LoRAs
* delete scale checker script due to user demand

### Extensions and API:
* add postprocess_batch_list script callback

### Bug Fixes:
* fix TI training for SD1
* fix reload altclip model error
* prepend the pythonpath instead of overriding it
* fix typo in SD_WEBUI_RESTARTING
* if txt2img/img2img raises an exception, finally call state.end()
* fix composable diffusion weight parsing
* restyle Startup profile for black users
* fix webui not launching with --nowebui
* catch exception for non git extensions
* fix some options missing from /sdapi/v1/options
* fix for extension update status always saying "unknown"
* fix display of extra network cards that have `<>` in the name
* update lora extension to work with python 3.8


## 1.5.0

### Features:
Expand Down
1 change: 1 addition & 0 deletions extensions-builtin/Lora/network.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from __future__ import annotations
import os
from collections import namedtuple
import enum
Expand Down
5 changes: 5 additions & 0 deletions extensions-builtin/Lora/networks.py
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,11 @@ def load_network(name, network_on_disk):
key = key_network_without_network_parts.replace("lora_te1_text_model", "0_transformer_text_model")
sd_module = shared.sd_model.network_layer_mapping.get(key, None)

# some SD1 Loras also have correct compvis keys
if sd_module is None:
key = key_network_without_network_parts.replace("lora_te1_text_model", "transformer_text_model")
sd_module = shared.sd_model.network_layer_mapping.get(key, None)

if sd_module is None:
keys_failed_to_match[key_network] = key
continue
Expand Down
108 changes: 0 additions & 108 deletions javascript/badScaleChecker.js

This file was deleted.

40 changes: 22 additions & 18 deletions modules/api/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -333,14 +333,16 @@ def text2imgapi(self, txt2imgreq: models.StableDiffusionTxt2ImgProcessingAPI):
p.outpath_grids = opts.outdir_txt2img_grids
p.outpath_samples = opts.outdir_txt2img_samples

shared.state.begin(job="scripts_txt2img")
if selectable_scripts is not None:
p.script_args = script_args
processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
else:
p.script_args = tuple(script_args) # Need to pass args as tuple here
processed = process_images(p)
shared.state.end()
try:
shared.state.begin(job="scripts_txt2img")
if selectable_scripts is not None:
p.script_args = script_args
processed = scripts.scripts_txt2img.run(p, *p.script_args) # Need to pass args as list here
else:
p.script_args = tuple(script_args) # Need to pass args as tuple here
processed = process_images(p)
finally:
shared.state.end()

b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []

Expand Down Expand Up @@ -390,14 +392,16 @@ def img2imgapi(self, img2imgreq: models.StableDiffusionImg2ImgProcessingAPI):
p.outpath_grids = opts.outdir_img2img_grids
p.outpath_samples = opts.outdir_img2img_samples

shared.state.begin(job="scripts_img2img")
if selectable_scripts is not None:
p.script_args = script_args
processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
else:
p.script_args = tuple(script_args) # Need to pass args as tuple here
processed = process_images(p)
shared.state.end()
try:
shared.state.begin(job="scripts_img2img")
if selectable_scripts is not None:
p.script_args = script_args
processed = scripts.scripts_img2img.run(p, *p.script_args) # Need to pass args as list here
else:
p.script_args = tuple(script_args) # Need to pass args as tuple here
processed = process_images(p)
finally:
shared.state.end()

b64images = list(map(encode_pil_to_base64, processed.images)) if send_images else []

Expand Down Expand Up @@ -720,9 +724,9 @@ def get_memory(self):
cuda = {'error': f'{err}'}
return models.MemoryResponse(ram=ram, cuda=cuda)

def launch(self, server_name, port):
def launch(self, server_name, port, root_path):
self.app.include_router(self.router)
uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=shared.cmd_opts.timeout_keep_alive)
uvicorn.run(self.app, host=server_name, port=port, timeout_keep_alive=shared.cmd_opts.timeout_keep_alive, root_path=root_path)

def kill_webui(self):
restart.stop_program()
Expand Down
6 changes: 2 additions & 4 deletions modules/api/models.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,11 +208,9 @@ class PreprocessResponse(BaseModel):
fields = {}
for key, metadata in opts.data_labels.items():
value = opts.data.get(key)
optType = opts.typemap.get(type(metadata.default), type(metadata.default))
optType = opts.typemap.get(type(metadata.default), type(metadata.default)) if metadata.default else Any

if metadata.default is None:
pass
elif metadata is not None:
if metadata is not None:
fields.update({key: (Optional[optType], Field(default=metadata.default, description=metadata.label))})
else:
fields.update({key: (Optional[optType], Field())})
Expand Down
10 changes: 6 additions & 4 deletions modules/extensions.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,10 +56,12 @@ def read_from_repo():
self.do_read_info_from_repo()

return self.to_dict()

d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo)
self.from_dict(d)
self.status = 'unknown'
try:
d = cache.cached_data_for_file('extensions-git', self.name, os.path.join(self.path, ".git"), read_from_repo)
self.from_dict(d)
except FileNotFoundError:
pass
self.status = 'unknown' if self.status == '' else self.status

def do_read_info_from_repo(self):
repo = None
Expand Down
8 changes: 4 additions & 4 deletions modules/launch_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ def run_extension_installer(extension_dir):

try:
env = os.environ.copy()
env['PYTHONPATH'] = os.path.abspath(".")
env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env.get('PYTHONPATH', '')}"

print(run(f'"{python}" "{path_installer}"', errdesc=f"Error running install.py for extension {extension_dir}", custom_env=env))
except Exception as e:
Expand Down Expand Up @@ -233,7 +233,7 @@ def run_extensions_installers(settings_file):
re_requirement = re.compile(r"\s*([-_a-zA-Z0-9]+)\s*(?:==\s*([-+_.a-zA-Z0-9]+))?\s*")


def requrements_met(requirements_file):
def requirements_met(requirements_file):
"""
Does a simple parse of a requirements.txt file to determine if all rerqirements in it
are already installed. Returns True if so, False if not installed or parsing fails.
Expand Down Expand Up @@ -293,7 +293,7 @@ def prepare_environment():
try:
# the existance of this file is a signal to webui.sh/bat that webui needs to be restarted when it stops execution
os.remove(os.path.join(script_path, "tmp", "restart"))
os.environ.setdefault('SD_WEBUI_RESTARTING ', '1')
os.environ.setdefault('SD_WEBUI_RESTARTING', '1')
except OSError:
pass

Expand Down Expand Up @@ -354,7 +354,7 @@ def prepare_environment():
if not os.path.isfile(requirements_file):
requirements_file = os.path.join(script_path, requirements_file)

if not requrements_met(requirements_file):
if not requirements_met(requirements_file):
run_pip(f"install -r \"{requirements_file}\"", "requirements")

run_extensions_installers(settings_file=args.ui_settings_file)
Expand Down
41 changes: 26 additions & 15 deletions modules/processing.py
Original file line number Diff line number Diff line change
Expand Up @@ -600,8 +600,12 @@ def program_version():
return res


def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False):
index = position_in_batch + iteration * p.batch_size
def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iteration=0, position_in_batch=0, use_main_prompt=False, index=None, all_negative_prompts=None):
if index is None:
index = position_in_batch + iteration * p.batch_size

if all_negative_prompts is None:
all_negative_prompts = p.all_negative_prompts

clip_skip = getattr(p, 'clip_skip', opts.CLIP_stop_at_last_layers)
enable_hr = getattr(p, 'enable_hr', False)
Expand All @@ -617,12 +621,12 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
"Sampler": p.sampler_name,
"CFG scale": p.cfg_scale,
"Image CFG scale": getattr(p, 'image_cfg_scale', None),
"Seed": all_seeds[index],
"Seed": p.all_seeds[0] if use_main_prompt else all_seeds[index],
"Face restoration": (opts.face_restoration_model if p.restore_faces else None),
"Size": f"{p.width}x{p.height}",
"Model hash": getattr(p, 'sd_model_hash', None if not opts.add_model_hash_to_info or not shared.sd_model.sd_model_hash else shared.sd_model.sd_model_hash),
"Model": (None if not opts.add_model_name_to_info else shared.sd_model.sd_checkpoint_info.name_for_extra),
"Variation seed": (None if p.subseed_strength == 0 else all_subseeds[index]),
"Variation seed": (None if p.subseed_strength == 0 else (p.all_subseeds[0] if use_main_prompt else all_subseeds[index])),
"Variation seed strength": (None if p.subseed_strength == 0 else p.subseed_strength),
"Seed resize from": (None if p.seed_resize_from_w <= 0 or p.seed_resize_from_h <= 0 else f"{p.seed_resize_from_w}x{p.seed_resize_from_h}"),
"Denoising strength": getattr(p, 'denoising_strength', None),
Expand All @@ -642,7 +646,7 @@ def create_infotext(p, all_prompts, all_seeds, all_subseeds, comments=None, iter
generation_params_text = ", ".join([k if k == v else f'{k}: {generation_parameters_copypaste.quote(v)}' for k, v in generation_params.items() if v is not None])

prompt_text = p.prompt if use_main_prompt else all_prompts[index]
negative_prompt_text = f"\nNegative prompt: {p.all_negative_prompts[index]}" if p.all_negative_prompts[index] else ""
negative_prompt_text = f"\nNegative prompt: {all_negative_prompts[index]}" if all_negative_prompts[index] else ""

return f"{prompt_text}{negative_prompt_text}\n{generation_params_text}".strip()

Expand Down Expand Up @@ -716,9 +720,6 @@ def process_images_inner(p: StableDiffusionProcessing) -> Processed:
else:
p.all_subseeds = [int(subseed) + x for x in range(len(p.all_prompts))]

def infotext(iteration=0, position_in_batch=0, use_main_prompt=False):
return create_infotext(p, p.all_prompts, p.all_seeds, p.all_subseeds, comments, iteration, position_in_batch, use_main_prompt)

if os.path.exists(cmd_opts.embeddings_dir) and not p.do_not_reload_embeddings:
model_hijack.embedding_db.load_textual_inversion_embeddings()

Expand Down Expand Up @@ -806,6 +807,16 @@ def infotext(iteration=0, position_in_batch=0, use_main_prompt=False):
if p.scripts is not None:
p.scripts.postprocess_batch(p, x_samples_ddim, batch_number=n)

p.prompts = p.all_prompts[n * p.batch_size:(n + 1) * p.batch_size]
p.negative_prompts = p.all_negative_prompts[n * p.batch_size:(n + 1) * p.batch_size]

batch_params = scripts.PostprocessBatchListArgs(list(x_samples_ddim))
p.scripts.postprocess_batch_list(p, batch_params, batch_number=n)
x_samples_ddim = batch_params.images

def infotext(index=0, use_main_prompt=False):
return create_infotext(p, p.prompts, p.seeds, p.subseeds, use_main_prompt=use_main_prompt, index=index, all_negative_prompts=p.negative_prompts)

for i, x_sample in enumerate(x_samples_ddim):
p.batch_index = i

Expand All @@ -814,7 +825,7 @@ def infotext(iteration=0, position_in_batch=0, use_main_prompt=False):

if p.restore_faces:
if opts.save and not p.do_not_save_samples and opts.save_images_before_face_restoration:
images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-face-restoration")
images.save_image(Image.fromarray(x_sample), p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-face-restoration")

devices.torch_gc()

Expand All @@ -831,15 +842,15 @@ def infotext(iteration=0, position_in_batch=0, use_main_prompt=False):
if p.color_corrections is not None and i < len(p.color_corrections):
if opts.save and not p.do_not_save_samples and opts.save_images_before_color_correction:
image_without_cc = apply_overlay(image, p.paste_to, i, p.overlay_images)
images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-before-color-correction")
images.save_image(image_without_cc, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-before-color-correction")
image = apply_color_correction(p.color_corrections[i], image)

image = apply_overlay(image, p.paste_to, i, p.overlay_images)

if opts.samples_save and not p.do_not_save_samples:
images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p)
images.save_image(image, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p)

text = infotext(n, i)
text = infotext(i)
infotexts.append(text)
if opts.enable_pnginfo:
image.info["parameters"] = text
Expand All @@ -850,10 +861,10 @@ def infotext(iteration=0, position_in_batch=0, use_main_prompt=False):
image_mask_composite = Image.composite(image.convert('RGBA').convert('RGBa'), Image.new('RGBa', image.size), images.resize_image(2, p.mask_for_overlay, image.width, image.height).convert('L')).convert('RGBA')

if opts.save_mask:
images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask")
images.save_image(image_mask, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask")

if opts.save_mask_composite:
images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(n, i), p=p, suffix="-mask-composite")
images.save_image(image_mask_composite, p.outpath_samples, "", p.seeds[i], p.prompts[i], opts.samples_format, info=infotext(i), p=p, suffix="-mask-composite")

if opts.return_mask:
output_images.append(image_mask)
Expand Down Expand Up @@ -894,7 +905,7 @@ def infotext(iteration=0, position_in_batch=0, use_main_prompt=False):
p,
images_list=output_images,
seed=p.all_seeds[0],
info=infotext(),
info=infotexts[0],
comments="".join(f"{comment}\n" for comment in comments),
subseed=p.all_subseeds[0],
index_of_first_image=index_of_first_image,
Expand Down
Loading

0 comments on commit 68f336b

Please sign in to comment.