diff --git a/fooocus_version.py b/fooocus_version.py index d4b750f9f..6c3c2c903 100644 --- a/fooocus_version.py +++ b/fooocus_version.py @@ -1 +1 @@ -version = '2.2.0' +version = '2.2.1' diff --git a/language/en.json b/language/en.json index cb5603f92..f61255c96 100644 --- a/language/en.json +++ b/language/en.json @@ -379,7 +379,7 @@ "Metadata": "Metadata", "Apply Metadata": "Apply Metadata", "Metadata Scheme": "Metadata Scheme", - "Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.", + "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.": "Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.", "fooocus (json)": "fooocus (json)", "a1111 (plain text)": "a1111 (plain text)" } \ No newline at end of file diff --git a/modules/async_worker.py b/modules/async_worker.py index fd785f076..a8661f4dd 100644 --- a/modules/async_worker.py +++ b/modules/async_worker.py @@ -830,17 +830,21 @@ def callback(step, x0, x, total_steps, y): ('Negative Prompt', 'negative_prompt', task['log_negative_prompt']), ('Fooocus V2 Expansion', 'prompt_expansion', task['expansion']), ('Styles', 'styles', str(raw_style_selections)), - ('Performance', 'performance', performance_selection.value), - ('Resolution', 'resolution', str((width, height))), - ('Guidance Scale', 'guidance_scale', guidance_scale), - ('Sharpness', 'sharpness', sharpness), - ('ADM Guidance', 'adm_guidance', str(( - modules.patch.patch_settings[pid].positive_adm_scale, - modules.patch.patch_settings[pid].negative_adm_scale, - modules.patch.patch_settings[pid].adm_scaler_end))), - ('Base Model', 'base_model', base_model_name), - ('Refiner Model', 'refiner_model', refiner_model_name), - ('Refiner Switch', 'refiner_switch', refiner_switch)] + ('Performance', 'performance', performance_selection.value)] + + if performance_selection.steps() != steps: + d.append(('Steps', 'steps', steps)) + + d += [('Resolution', 'resolution', str((width, height))), + ('Guidance Scale', 'guidance_scale', guidance_scale), + ('Sharpness', 'sharpness', sharpness), + ('ADM Guidance', 'adm_guidance', str(( + modules.patch.patch_settings[pid].positive_adm_scale, + modules.patch.patch_settings[pid].negative_adm_scale, + modules.patch.patch_settings[pid].adm_scaler_end))), + ('Base Model', 'base_model', base_model_name), + ('Refiner Model', 'refiner_model', refiner_model_name), + ('Refiner Switch', 'refiner_switch', refiner_switch)] if refiner_model_name != 'None': if overwrite_switch > 0: @@ -857,17 +861,17 @@ def callback(step, x0, x, total_steps, y): if freeu_enabled: d.append(('FreeU', 'freeu', str((freeu_b1, freeu_b2, freeu_s1, freeu_s2)))) + for li, (n, w) in enumerate(loras): + if n != 'None': + d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}')) + metadata_parser = None if save_metadata_to_images: metadata_parser = modules.meta_parser.get_metadata_parser(metadata_scheme) metadata_parser.set_data(task['log_positive_prompt'], task['positive'], task['log_negative_prompt'], task['negative'], steps, base_model_name, refiner_model_name, loras) - - for li, (n, w) in enumerate(loras): - if n != 'None': - d.append((f'LoRA {li + 1}', f'lora_combined_{li + 1}', f'{n} : {w}')) - + d.append(('Metadata Scheme', 'metadata_scheme', metadata_scheme.value if save_metadata_to_images else save_metadata_to_images)) d.append(('Version', 'version', 'Fooocus v' + fooocus_version.version)) img_paths.append(log(x, d, metadata_parser, output_format)) diff --git a/modules/config.py b/modules/config.py index 09c8fd7c5..a68bd2187 100644 --- a/modules/config.py +++ b/modules/config.py @@ -264,7 +264,7 @@ def get_config_item_or_set_default(key, default_value, validator, disable_empty_ ) default_max_lora_number = get_config_item_or_set_default( key='default_max_lora_number', - default_value=len(default_loras), + default_value=len(default_loras) if isinstance(default_loras, list) and len(default_loras) > 0 else 5, validator=lambda x: isinstance(x, int) and x >= 1 ) default_cfg_scale = get_config_item_or_set_default( diff --git a/modules/html.py b/modules/html.py index 47a1483a5..769151a9f 100644 --- a/modules/html.py +++ b/modules/html.py @@ -112,10 +112,6 @@ margin-left: -5px !important; } -.lora_enable { - flex-grow: 1 !important; -} - .lora_enable label { height: 100%; } @@ -128,12 +124,10 @@ display: none; } -.lora_model { - flex-grow: 5 !important; -} - -.lora_weight { - flex-grow: 5 !important; +@-moz-document url-prefix() { + .lora_weight input[type=number] { + width: 80px; + } } ''' diff --git a/modules/private_logger.py b/modules/private_logger.py index 8fa5f73c6..01e570a7d 100644 --- a/modules/private_logger.py +++ b/modules/private_logger.py @@ -26,7 +26,7 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for date_string, local_temp_filename, only_name = generate_temp_filename(folder=path_outputs, extension=output_format) os.makedirs(os.path.dirname(local_temp_filename), exist_ok=True) - parsed_parameters = metadata_parser.parse_string(metadata) if metadata_parser is not None else '' + parsed_parameters = metadata_parser.parse_string(metadata.copy()) if metadata_parser is not None else '' image = Image.fromarray(img) if output_format == 'png': @@ -90,7 +90,7 @@ def log(img, metadata, metadata_parser: MetadataParser | None = None, output_for """ ) - begin_part = f"
Fooocus Log {date_string} (private)
\nAll images are clean, without any hidden data/meta, and safe to share with others.
\n\n" + begin_part = f"Fooocus Log {date_string} (private)
\nMetadata is embedded if enabled in the config or developer debug mode. You can find the information for each image in line Metadata Scheme.
\n\n" end_part = f'\n' middle_part = log_cache.get(html_name, "") diff --git a/modules/sdxl_styles.py b/modules/sdxl_styles.py index 71afc402f..2a310024c 100644 --- a/modules/sdxl_styles.py +++ b/modules/sdxl_styles.py @@ -94,9 +94,8 @@ def get_words(arrays, totalMult, index): return [word] + get_words(arrays[1:], math.floor(totalMult/len(words)), index) - def apply_arrays(text, index): - arrays = re.findall(r'\[\[([\s,\w-]+)\]\]', text) + arrays = re.findall(r'\[\[(.*?)\]\]', text) if len(arrays) == 0: return text diff --git a/update_log.md b/update_log.md index b0192d0d8..322c19c12 100644 --- a/update_log.md +++ b/update_log.md @@ -1,3 +1,9 @@ +# [2.2.1](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.1) + +* Fix some small bugs (e.g. image grid, upscale fast 2x, LoRA weight width in Firefox) +* Allow prompt weights in array syntax +* Add steps override and metadata scheme to history log + # [2.2.0](https://github.com/lllyasviel/Fooocus/releases/tag/2.2.0) * Isolate every image generation to truly allow multi-user usage diff --git a/webui.py b/webui.py index 180c7d2ba..42dd890f7 100644 --- a/webui.py +++ b/webui.py @@ -355,13 +355,13 @@ def update_history_link(): for i, (n, v) in enumerate(modules.config.default_loras): with gr.Row(): lora_enabled = gr.Checkbox(label='Enable', value=True, - elem_classes=['lora_enable', 'min_check']) + elem_classes=['lora_enable', 'min_check'], scale=1) lora_model = gr.Dropdown(label=f'LoRA {i + 1}', choices=['None'] + modules.config.lora_filenames, value=n, - elem_classes='lora_model') + elem_classes='lora_model', scale=5) lora_weight = gr.Slider(label='Weight', minimum=modules.config.default_loras_min_weight, maximum=modules.config.default_loras_max_weight, step=0.01, value=v, - elem_classes='lora_weight') + elem_classes='lora_weight', scale=5) lora_ctrls += [lora_enabled, lora_model, lora_weight] with gr.Row(): @@ -438,7 +438,7 @@ def update_history_link(): save_metadata_to_images = gr.Checkbox(label='Save Metadata to Images', value=modules.config.default_save_metadata_to_images, info='Adds parameters to generated images allowing manual regeneration.') metadata_scheme = gr.Radio(label='Metadata Scheme', choices=flags.metadata_scheme, value=modules.config.default_metadata_scheme, - info='Image Prompt parameters are not included. Use a1111 for compatibility with Civitai.', + info='Image Prompt parameters are not included. Use png and a1111 for compatibility with Civitai.', visible=modules.config.default_save_metadata_to_images) save_metadata_to_images.change(lambda x: gr.update(visible=x), inputs=[save_metadata_to_images], outputs=[metadata_scheme], diff --git a/wildcards/animal.txt b/wildcards/animal.txt index 9a6f09ba8..3c479daa0 100644 --- a/wildcards/animal.txt +++ b/wildcards/animal.txt @@ -18,7 +18,7 @@ Chihuahua Chimpanzee Chinchilla Chipmunk -Comodo Dragon +Komodo Dragon Cow Coyote Crocodile @@ -97,4 +97,4 @@ Whale Wolf Wombat Yak -Zebra \ No newline at end of file +Zebra