diff --git a/.release b/.release index e5c4a88b9..b2dcd183e 100644 --- a/.release +++ b/.release @@ -1 +1 @@ -v24.0.2 \ No newline at end of file +v24.0.3 \ No newline at end of file diff --git a/README.md b/README.md index 3ffe872a0..770a9bd6e 100644 --- a/README.md +++ b/README.md @@ -42,6 +42,8 @@ The GUI allows you to set the training parameters and generate and run the requi - [SDXL training](#sdxl-training) - [Masked loss](#masked-loss) - [Change History](#change-history) + - [2024/04/24 (v24.0.3)](#20240424-v2403) + - [2024/04/24 (v24.0.2)](#20240424-v2402) - [2024/04/17 (v24.0.1)](#20240417-v2401) - [Enhancements](#enhancements) - [Security and Stability](#security-and-stability) @@ -406,6 +408,11 @@ ControlNet dataset is used to specify the mask. The mask images should be the RG ## Change History + +### 2024/04/24 (v24.0.3) + +- Fix issue with sample prompt creation + ### 2024/04/24 (v24.0.2) - Fixed issue with clip_skip not being passed as an int to sd-scripts when using old config.json files. diff --git a/kohya_gui/basic_caption_gui.py b/kohya_gui/basic_caption_gui.py index e7d4aa75f..2f473fb7b 100644 --- a/kohya_gui/basic_caption_gui.py +++ b/kohya_gui/basic_caption_gui.py @@ -63,35 +63,31 @@ def caption_images( log.info(f"Captioning files in {images_dir} with {caption_text}...") # Build the command to run caption.py - run_cmd = [PYTHON, fr'"{scriptdir}/tools/caption.py"'] - - # Add required arguments - run_cmd.append('--caption_text') - run_cmd.append(caption_text) + run_cmd = rf'"{PYTHON}" "{scriptdir}/tools/caption.py"' + run_cmd += f' --caption_text="{caption_text}"' # Add optional flags to the command if overwrite: - run_cmd.append("--overwrite") + run_cmd += f" --overwrite" if caption_ext: - run_cmd.append('--caption_file_ext') - run_cmd.append(caption_ext) + run_cmd += f' --caption_file_ext="{caption_ext}"' + + run_cmd += f' "{images_dir}"' - # Add the directory containing the images - run_cmd.append(fr'"{images_dir}"') + # Log the command + log.info(run_cmd) # Set the environment variable for the Python path env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) env["TF_ENABLE_ONEDNN_OPTS"] = "0" - # Reconstruct the safe command string for display - command_to_run = " ".join(run_cmd) - log.info(f"Executing command: {command_to_run} with shell={use_shell}") + log.info(f"Executing command: {run_cmd} with shell={use_shell}") # Run the command in the sd-scripts folder context - subprocess.run(command_to_run, env=env, shell=use_shell) + subprocess.run(run_cmd, env=env, shell=use_shell) # Check if overwrite option is enabled diff --git a/kohya_gui/blip_caption_gui.py b/kohya_gui/blip_caption_gui.py index 42cad871c..ede340dc6 100644 --- a/kohya_gui/blip_caption_gui.py +++ b/kohya_gui/blip_caption_gui.py @@ -58,7 +58,7 @@ def caption_images( log.info(f"Captioning files in {train_data_dir}...") # Construct the command to run make_captions.py - run_cmd = [PYTHON, fr'"{scriptdir}/sd-scripts/finetune/make_captions.py"'] + run_cmd = [fr'"{PYTHON}"', fr'"{scriptdir}/sd-scripts/finetune/make_captions.py"'] # Add required arguments run_cmd.append('--batch_size') diff --git a/kohya_gui/common_gui.py b/kohya_gui/common_gui.py index e2183001a..dc2dd3f96 100644 --- a/kohya_gui/common_gui.py +++ b/kohya_gui/common_gui.py @@ -323,6 +323,7 @@ def update_my_data(my_data): # Convert values to int if they are strings for key in [ + "adaptive_noise_scale", "clip_skip", "epoch", "gradient_accumulation_steps", @@ -351,7 +352,7 @@ def update_my_data(my_data): except ValueError: # Handle the case where the string is not a valid float my_data[key] = int(1) - + # Convert values to int if they are strings for key in ["max_token_length"]: value = my_data.get(key) @@ -1449,3 +1450,21 @@ def is_file_writable(file_path: str) -> bool: except IOError: # If an IOError occurs, the file cannot be written to return False + +def print_command_and_toml(run_cmd, tmpfilename): + log.warning( + "Here is the trainer command as a reference. It will not be executed:\n" + ) + # Reconstruct the safe command string for display + command_to_run = " ".join(run_cmd) + + log.info(command_to_run) + print("") + + log.info(f"Showing toml config file: {tmpfilename}") + print("") + with open(tmpfilename, "r") as toml_file: + log.info(toml_file.read()) + log.info(f"end of toml config file: {tmpfilename}") + + save_to_file(command_to_run) \ No newline at end of file diff --git a/kohya_gui/convert_lcm_gui.py b/kohya_gui/convert_lcm_gui.py index 00114eb45..605109b31 100644 --- a/kohya_gui/convert_lcm_gui.py +++ b/kohya_gui/convert_lcm_gui.py @@ -47,40 +47,29 @@ def convert_lcm( path, ext = os.path.splitext(save_to) save_to = f"{path}_lcm{ext}" - # Initialize the command to run the script - run_cmd = [ - PYTHON, - f"{scriptdir}/path_to_script.py", - ] # Adjust the script path accordingly - - # Add required arguments - run_cmd.append("--lora-scale") - run_cmd.append(str(lora_scale)) - run_cmd.append("--model") - run_cmd.append(rf'"{model_path}"') - run_cmd.append("--name") - run_cmd.append(name) - - # Add conditional flags based on the model type + # Construct the command to run the script + run_cmd += f" --lora-scale {lora_scale}" + run_cmd += f' --model "{model_path}"' + run_cmd += f' --name "{name}"' + if model_type == "SDXL": - run_cmd.append("--sdxl") + run_cmd += f" --sdxl" if model_type == "SSD-1B": - run_cmd.append("--ssd-1b") + run_cmd += f" --ssd-1b" # Set up the environment env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) env["TF_ENABLE_ONEDNN_OPTS"] = "0" # Reconstruct the safe command string for display - command_to_run = " ".join(run_cmd) - log.info(f"Executing command: {command_to_run} with shell={use_shell}") + log.info(f"Executing command: {run_cmd} with shell={use_shell}") # Run the command in the sd-scripts folder context subprocess.run( - command_to_run, env=env, shell=use_shell + run_cmd, env=env, shell=use_shell ) # Return a success message diff --git a/kohya_gui/convert_model_gui.py b/kohya_gui/convert_model_gui.py index 822be8883..722fd5bcf 100644 --- a/kohya_gui/convert_model_gui.py +++ b/kohya_gui/convert_model_gui.py @@ -50,7 +50,7 @@ def convert_model( return run_cmd = [ - PYTHON, + fr'"{PYTHON}"', fr'"{scriptdir}/sd-scripts/tools/convert_diffusers20_original_sd.py"', ] @@ -100,7 +100,7 @@ def convert_model( env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) # Adding an example of an environment variable that might be relevant env["TF_ENABLE_ONEDNN_OPTS"] = "0" diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 4e9cf1499..5b149a457 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -7,16 +7,16 @@ import toml from datetime import datetime from .common_gui import ( + check_if_model_exist, + color_aug_changed, get_executable_path, get_file_path, get_saveasfile_path, - color_aug_changed, + print_command_and_toml, run_cmd_advanced_training, - update_my_data, - check_if_model_exist, SaveConfigFile, - save_to_file, scriptdir, + update_my_data, validate_paths, ) from .class_accelerate_launch import AccelerateLaunch @@ -49,8 +49,6 @@ huggingface = None use_shell = False -PYTHON = sys.executable - TRAIN_BUTTON_VISIBLE = [gr.Button(visible=True), gr.Button(visible=False), gr.Textbox(value=time.time())] @@ -669,7 +667,7 @@ def train_model( "save_state_to_huggingface": save_state_to_huggingface, "resume_from_huggingface": resume_from_huggingface, "async_upload": async_upload, - "adaptive_noise_scale": adaptive_noise_scale if adaptive_noise_scale != 0 else None, + "adaptive_noise_scale": adaptive_noise_scale if not 0 else None, "bucket_no_upscale": bucket_no_upscale, "bucket_reso_steps": bucket_reso_steps, "cache_latents": cache_latents, @@ -749,7 +747,7 @@ def train_model( "resume": resume, "sample_every_n_epochs": sample_every_n_epochs if sample_every_n_epochs != 0 else None, "sample_every_n_steps": sample_every_n_steps if sample_every_n_steps != 0 else None, - "sample_prompts": create_prompt_file(output_dir, output_dir), + "sample_prompts": create_prompt_file(sample_prompts, output_dir), "sample_sampler": sample_sampler, "save_every_n_epochs": save_every_n_epochs if save_every_n_epochs!= 0 else None, "save_every_n_steps": save_every_n_steps if save_every_n_steps != 0 else None, @@ -806,15 +804,7 @@ def train_model( run_cmd = run_cmd_advanced_training(run_cmd=run_cmd, **kwargs_for_training) if print_only: - log.warning( - "Here is the trainer command as a reference. It will not be executed:\n" - ) - # Reconstruct the safe command string for display - command_to_run = " ".join(run_cmd) - - print(command_to_run) - - save_to_file(command_to_run) + print_command_and_toml(run_cmd, tmpfilename) else: # Saving config file for model current_datetime = datetime.now() @@ -834,7 +824,7 @@ def train_model( env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) env["TF_ENABLE_ONEDNN_OPTS"] = "0" diff --git a/kohya_gui/extract_lora_from_dylora_gui.py b/kohya_gui/extract_lora_from_dylora_gui.py index 4bf39027f..eec3cf002 100644 --- a/kohya_gui/extract_lora_from_dylora_gui.py +++ b/kohya_gui/extract_lora_from_dylora_gui.py @@ -51,7 +51,7 @@ def extract_dylora( save_to = f"{path}_tmp{ext}" run_cmd = [ - PYTHON, + fr'"{PYTHON}"', rf'"{scriptdir}/sd-scripts/networks/extract_lora_from_dylora.py"', "--save_to", rf'"{save_to}"', @@ -63,7 +63,7 @@ def extract_dylora( env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) # Example environment variable adjustment for the Python environment env["TF_ENABLE_ONEDNN_OPTS"] = "0" diff --git a/kohya_gui/extract_lora_gui.py b/kohya_gui/extract_lora_gui.py index db86f4061..e0693034d 100644 --- a/kohya_gui/extract_lora_gui.py +++ b/kohya_gui/extract_lora_gui.py @@ -74,7 +74,7 @@ def extract_lora( return run_cmd = [ - PYTHON, + fr'"{PYTHON}"', fr'"{scriptdir}/sd-scripts/networks/extract_lora_from_models.py"', "--load_precision", load_precision, @@ -112,7 +112,7 @@ def extract_lora( env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) # Adding an example of another potentially relevant environment variable env["TF_ENABLE_ONEDNN_OPTS"] = "0" diff --git a/kohya_gui/extract_lycoris_locon_gui.py b/kohya_gui/extract_lycoris_locon_gui.py index ff3bf1897..fa9fd689e 100644 --- a/kohya_gui/extract_lycoris_locon_gui.py +++ b/kohya_gui/extract_lycoris_locon_gui.py @@ -74,7 +74,7 @@ def extract_lycoris_locon( path, ext = os.path.splitext(output_name) output_name = f"{path}_tmp{ext}" - run_cmd = [PYTHON, fr'"{scriptdir}/tools/lycoris_locon_extract.py"'] + run_cmd = [fr'"{PYTHON}"', fr'"{scriptdir}/tools/lycoris_locon_extract.py"'] if is_sdxl: run_cmd.append("--is_sdxl") @@ -127,7 +127,7 @@ def extract_lycoris_locon( env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) # Adding an example of an environment variable that might be relevant env["TF_ENABLE_ONEDNN_OPTS"] = "0" diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index c15490cef..27495627e 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -8,16 +8,16 @@ import toml from datetime import datetime from .common_gui import ( + check_if_model_exist, + color_aug_changed, get_executable_path, get_file_path, get_saveasfile_path, + print_command_and_toml, run_cmd_advanced_training, - color_aug_changed, - update_my_data, - check_if_model_exist, SaveConfigFile, - save_to_file, scriptdir, + update_my_data, validate_paths, ) from .class_accelerate_launch import AccelerateLaunch @@ -569,8 +569,8 @@ def train_model( if generate_caption_database: # Define the command components run_cmd = [ - PYTHON, - f"{scriptdir}/sd-scripts/finetune/merge_captions_to_metadata.py", + fr'"{PYTHON}"', + f'"{scriptdir}/sd-scripts/finetune/merge_captions_to_metadata.py"', ] # Add the caption extension @@ -606,8 +606,8 @@ def train_model( if generate_image_buckets: # Build the command to run the preparation script run_cmd = [ - PYTHON, - f"{scriptdir}/sd-scripts/finetune/prepare_buckets_latents.py", + fr'"{PYTHON}"', + f'"{scriptdir}/sd-scripts/finetune/prepare_buckets_latents.py"', image_folder, os.path.join(train_dir, caption_metadata_filename), os.path.join(train_dir, latent_metadata_filename), @@ -824,7 +824,7 @@ def train_model( "resume": resume, "sample_every_n_epochs": sample_every_n_epochs if sample_every_n_epochs != 0 else None, "sample_every_n_steps": sample_every_n_steps if sample_every_n_steps != 0 else None, - "sample_prompts": create_prompt_file(output_dir, output_dir), + "sample_prompts": create_prompt_file(sample_prompts, output_dir), "sample_sampler": sample_sampler, "save_every_n_epochs": save_every_n_epochs if save_every_n_epochs!= 0 else None, "save_every_n_steps": save_every_n_steps if save_every_n_steps != 0 else None, @@ -880,15 +880,7 @@ def train_model( run_cmd = run_cmd_advanced_training(run_cmd=run_cmd, **kwargs_for_training) if print_only: - log.warning( - "Here is the trainer command as a reference. It will not be executed:\n" - ) - # Reconstruct the safe command string for display - command_to_run = " ".join(run_cmd) - - print(command_to_run) - - save_to_file(command_to_run) + print_command_and_toml(run_cmd, tmpfilename) else: # Saving config file for model current_datetime = datetime.now() @@ -908,7 +900,7 @@ def train_model( env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) env["TF_ENABLE_ONEDNN_OPTS"] = "0" @@ -1308,7 +1300,7 @@ def list_presets(path): with gr.Tab("Guides"): gr.Markdown("This section provide Various Finetuning guides and information...") - top_level_path = rf"{scriptdir}/docs/Finetuning/top_level.md" + top_level_path = rf'"{scriptdir}/docs/Finetuning/top_level.md"' if os.path.exists(top_level_path): with open(os.path.join(top_level_path), "r", encoding="utf8") as file: guides_top_level = file.read() + "\n" diff --git a/kohya_gui/git_caption_gui.py b/kohya_gui/git_caption_gui.py index cb28f573c..df287ce4f 100644 --- a/kohya_gui/git_caption_gui.py +++ b/kohya_gui/git_caption_gui.py @@ -35,7 +35,7 @@ def caption_images( log.info(f"GIT captioning files in {train_data_dir}...") - run_cmd = [PYTHON, fr'"{scriptdir}/sd-scripts/finetune/make_captions_by_git.py"'] + run_cmd = [fr'"{PYTHON}"', fr'"{scriptdir}/sd-scripts/finetune/make_captions_by_git.py"'] # Add --model_id if provided if model_id != "": @@ -62,7 +62,7 @@ def caption_images( env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) # Adding an example of an environment variable that might be relevant env["TF_ENABLE_ONEDNN_OPTS"] = "0" diff --git a/kohya_gui/group_images_gui.py b/kohya_gui/group_images_gui.py index 29a10dc5c..d7224481d 100644 --- a/kohya_gui/group_images_gui.py +++ b/kohya_gui/group_images_gui.py @@ -34,8 +34,8 @@ def group_images( log.info(f"Grouping images in {input_folder}...") run_cmd = [ - PYTHON, - f"{scriptdir}/tools/group_images.py", + fr'"{PYTHON}"', + f'"{scriptdir}/tools/group_images.py"', fr'"{input_folder}"', fr'"{output_folder}"', str(group_size), diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 7aef5bab2..930d0a145 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -7,18 +7,18 @@ from datetime import datetime from .common_gui import ( + check_if_model_exist, + color_aug_changed, + get_any_file_path, get_executable_path, get_file_path, - get_any_file_path, get_saveasfile_path, - color_aug_changed, - run_cmd_advanced_training, - update_my_data, - check_if_model_exist, output_message, + print_command_and_toml, + run_cmd_advanced_training, SaveConfigFile, - save_to_file, scriptdir, + update_my_data, validate_paths, ) from .class_accelerate_launch import AccelerateLaunch @@ -1117,7 +1117,7 @@ def train_model( "resume": resume, "sample_every_n_epochs": sample_every_n_epochs if sample_every_n_epochs != 0 else None, "sample_every_n_steps": sample_every_n_steps if sample_every_n_steps != 0 else None, - "sample_prompts": create_prompt_file(output_dir, output_dir), + "sample_prompts": create_prompt_file(sample_prompts, output_dir), "sample_sampler": sample_sampler, "save_every_n_epochs": save_every_n_epochs if save_every_n_epochs!= 0 else None, "save_every_n_steps": save_every_n_steps if save_every_n_steps != 0 else None, @@ -1178,15 +1178,7 @@ def train_model( run_cmd = run_cmd_advanced_training(run_cmd=run_cmd, **run_cmd_params) if print_only: - log.warning( - "Here is the trainer command as a reference. It will not be executed:\n" - ) - # Reconstruct the safe command string for display - command_to_run = " ".join(run_cmd) - - print(command_to_run) - - save_to_file(command_to_run) + print_command_and_toml(run_cmd, tmpfilename) else: # Saving config file for model current_datetime = datetime.now() @@ -1205,7 +1197,7 @@ def train_model( # log.info(run_cmd) env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) env["TF_ENABLE_ONEDNN_OPTS"] = "0" diff --git a/kohya_gui/merge_lora_gui.py b/kohya_gui/merge_lora_gui.py index a261c83d7..a9c798291 100644 --- a/kohya_gui/merge_lora_gui.py +++ b/kohya_gui/merge_lora_gui.py @@ -425,9 +425,9 @@ def merge_lora( return if not sdxl_model: - run_cmd = [PYTHON, fr'"{scriptdir}/sd-scripts/networks/merge_lora.py"'] + run_cmd = [fr'"{PYTHON}"', fr'"{scriptdir}/sd-scripts/networks/merge_lora.py"'] else: - run_cmd = [PYTHON, fr'"{scriptdir}/sd-scripts/networks/sdxl_merge_lora.py"'] + run_cmd = [fr'"{PYTHON}"', fr'"{scriptdir}/sd-scripts/networks/sdxl_merge_lora.py"'] if sd_model: run_cmd.append("--sd_model") @@ -452,7 +452,7 @@ def merge_lora( env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) # Example of adding an environment variable for TensorFlow, if necessary env["TF_ENABLE_ONEDNN_OPTS"] = "0" diff --git a/kohya_gui/merge_lycoris_gui.py b/kohya_gui/merge_lycoris_gui.py index 22749970d..14337d802 100644 --- a/kohya_gui/merge_lycoris_gui.py +++ b/kohya_gui/merge_lycoris_gui.py @@ -39,7 +39,7 @@ def merge_lycoris( # Build the command to run merge_lycoris.py using list format run_cmd = [ - PYTHON, + fr'"{PYTHON}"', fr'"{scriptdir}/tools/merge_lycoris.py"', fr'"{base_model}"', fr'"{lycoris_model}"', @@ -60,7 +60,7 @@ def merge_lycoris( # Copy and update the environment variables env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) env["TF_ENABLE_ONEDNN_OPTS"] = "0" diff --git a/kohya_gui/resize_lora_gui.py b/kohya_gui/resize_lora_gui.py index df71d6ef1..04d3a8a0e 100644 --- a/kohya_gui/resize_lora_gui.py +++ b/kohya_gui/resize_lora_gui.py @@ -65,7 +65,7 @@ def resize_lora( device = "cuda" run_cmd = [ - PYTHON, + fr'"{PYTHON}"', fr'"{scriptdir}/sd-scripts/networks/resize_lora.py"', "--save_precision", save_precision, @@ -91,7 +91,7 @@ def resize_lora( env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) # Adding example environment variables if relevant diff --git a/kohya_gui/svd_merge_lora_gui.py b/kohya_gui/svd_merge_lora_gui.py index 669c0051c..d5a8c1e4e 100644 --- a/kohya_gui/svd_merge_lora_gui.py +++ b/kohya_gui/svd_merge_lora_gui.py @@ -3,6 +3,7 @@ import subprocess import os import sys +import shlex from .common_gui import ( get_saveasfilename_path, get_file_path, @@ -53,60 +54,56 @@ def svd_merge_lora( ratio_c /= total_ratio ratio_d /= total_ratio - run_cmd = [ - PYTHON, - fr'"{scriptdir}/sd-scripts/networks/svd_merge_lora.py"', - "--save_precision", - save_precision, - "--precision", - precision, - "--save_to", - fr'"{save_to}"', - ] - - # Variables for model paths and their ratios - models = [] - ratios = [] + run_cmd = rf'"{PYTHON}" "{scriptdir}/sd-scripts/networks/svd_merge_lora.py"' + run_cmd += f" --save_precision {save_precision}" + run_cmd += f" --precision {precision}" + run_cmd += rf' --save_to "{save_to}"' + run_cmd_models = " --models" + run_cmd_ratios = " --ratios" # Add non-empty models and their ratios to the command - def add_model(model_path, ratio): - if not os.path.isfile(model_path): - msgbox(f"The provided model at {model_path} is not a file") - return False - models.append(fr'"{model_path}"') - ratios.append(str(ratio)) - return True - - if lora_a_model and add_model(lora_a_model, ratio_a): - pass - if lora_b_model and add_model(lora_b_model, ratio_b): - pass - if lora_c_model and add_model(lora_c_model, ratio_c): - pass - if lora_d_model and add_model(lora_d_model, ratio_d): - pass - - if models and ratios: # Ensure we have valid models and ratios before appending - run_cmd.extend(["--models"] + models) - run_cmd.extend(["--ratios"] + ratios) - - run_cmd.extend( - ["--device", device, "--new_rank", new_rank, "--new_conv_rank", new_conv_rank] - ) + if lora_a_model: + if not os.path.isfile(lora_a_model): + msgbox("The provided model A is not a file") + return + run_cmd_models += rf' "{lora_a_model}"' + run_cmd_ratios += f" {ratio_a}" + if lora_b_model: + if not os.path.isfile(lora_b_model): + msgbox("The provided model B is not a file") + return + run_cmd_models += rf' "{lora_b_model}"' + run_cmd_ratios += f" {ratio_b}" + if lora_c_model: + if not os.path.isfile(lora_c_model): + msgbox("The provided model C is not a file") + return + run_cmd_models += rf' "{lora_c_model}"' + run_cmd_ratios += f" {ratio_c}" + if lora_d_model: + if not os.path.isfile(lora_d_model): + msgbox("The provided model D is not a file") + return + run_cmd_models += rf' "{lora_d_model}"' + run_cmd_ratios += f" {ratio_d}" + + run_cmd += run_cmd_models + run_cmd += run_cmd_ratios + run_cmd += f" --device {device}" + run_cmd += f' --new_rank "{new_rank}"' + run_cmd += f' --new_conv_rank "{new_conv_rank}"' env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) # Example of setting additional environment variables if needed env["TF_ENABLE_ONEDNN_OPTS"] = "0" - # Reconstruct the safe command string for display - command_to_run = " ".join(run_cmd) - log.info(f"Executing command: {command_to_run} with shell={use_shell}") + log.info(f"Executing command: {run_cmd} with shell={use_shell}") # Run the command in the sd-scripts folder context - subprocess.run(command_to_run, env=env, shell=use_shell) + subprocess.run(run_cmd, env=env, shell=use_shell) diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index 90f20367e..376ee3a62 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -6,19 +6,19 @@ import time from datetime import datetime from .common_gui import ( + check_if_model_exist, + color_aug_changed, + create_refresh_button, get_executable_path, get_file_path, get_saveasfile_path, - color_aug_changed, - run_cmd_advanced_training, - update_my_data, - check_if_model_exist, + list_files, output_message, + print_command_and_toml, + run_cmd_advanced_training, SaveConfigFile, - save_to_file, scriptdir, - list_files, - create_refresh_button, + update_my_data, validate_paths, ) from .class_accelerate_launch import AccelerateLaunch @@ -765,7 +765,7 @@ def train_model( "resume": resume, "sample_every_n_epochs": sample_every_n_epochs if sample_every_n_epochs != 0 else None, "sample_every_n_steps": sample_every_n_steps if sample_every_n_steps != 0 else None, - "sample_prompts": create_prompt_file(output_dir, output_dir), + "sample_prompts": create_prompt_file(sample_prompts, output_dir), "sample_sampler": sample_sampler, "save_every_n_epochs": save_every_n_epochs if save_every_n_epochs!= 0 else None, "save_every_n_steps": save_every_n_steps if save_every_n_steps != 0 else None, @@ -825,15 +825,7 @@ def train_model( run_cmd = run_cmd_advanced_training(run_cmd=run_cmd, **kwargs_for_training) if print_only: - log.warning( - "Here is the trainer command as a reference. It will not be executed:\n" - ) - # Reconstruct the safe command string for display - command_to_run = " ".join(run_cmd) - - print(command_to_run) - - save_to_file(command_to_run) + print_command_and_toml(run_cmd, tmpfilename) else: # Saving config file for model current_datetime = datetime.now() @@ -853,7 +845,7 @@ def train_model( env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) env["TF_ENABLE_ONEDNN_OPTS"] = "0" diff --git a/kohya_gui/verify_lora_gui.py b/kohya_gui/verify_lora_gui.py index ec5bdf087..8eec90d5a 100644 --- a/kohya_gui/verify_lora_gui.py +++ b/kohya_gui/verify_lora_gui.py @@ -36,21 +36,16 @@ def verify_lora( msgbox("The provided model A is not a file") return - # Build the command to run check_lora_weights.py - run_cmd = [ - PYTHON, - f"{scriptdir}/sd-scripts/networks/check_lora_weights.py", - lora_model, - ] + run_cmd = rf'"{PYTHON}" "{scriptdir}/sd-scripts/networks/check_lora_weights.py" "{lora_model}"' - # Log the command - log.info(" ".join(run_cmd)) + log.info(run_cmd) # Set the environment variable for the Python path env = os.environ.copy() env["PYTHONPATH"] = ( - f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + fr"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) + # Example of adding an environment variable for TensorFlow, if necessary env["TF_ENABLE_ONEDNN_OPTS"] = "0" diff --git a/kohya_gui/wd14_caption_gui.py b/kohya_gui/wd14_caption_gui.py index 3928140c1..91bf7584f 100644 --- a/kohya_gui/wd14_caption_gui.py +++ b/kohya_gui/wd14_caption_gui.py @@ -53,7 +53,7 @@ def caption_images( log.info(f"Captioning files in {train_data_dir}...") run_cmd = [ - get_executable_path("accelerate"), + fr'"{get_executable_path("accelerate")}"', "launch", fr'"{scriptdir}/sd-scripts/finetune/tag_images_by_wd14_tagger.py"', ] @@ -116,7 +116,7 @@ def caption_images( env = os.environ.copy() env["PYTHONPATH"] = ( - rf"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) # Adding an example of an environment variable that might be relevant env["TF_ENABLE_ONEDNN_OPTS"] = "0" diff --git a/test/config/dreambooth-AdamW.json b/test/config/dreambooth-AdamW.json index 651a36e5f..05ee95b11 100644 --- a/test/config/dreambooth-AdamW.json +++ b/test/config/dreambooth-AdamW.json @@ -1,5 +1,4 @@ { - "adaptive_noise_scale": 0, "additional_parameters": "", "bucket_no_upscale": true, "bucket_reso_steps": 64, @@ -58,7 +57,6 @@ "multires_noise_discount": 0, "multires_noise_iterations": 0, "no_token_padding": false, - "noise_offset": 0.05, "noise_offset_random_strength": false, "noise_offset_type": "Original", "num_cpu_threads_per_process": 2,