diff --git a/.release b/.release index b0698964b..13be740f2 100644 --- a/.release +++ b/.release @@ -1 +1 @@ -v24.0.4 \ No newline at end of file +v24.0.5 \ No newline at end of file diff --git a/README.md b/README.md index d4726b2f2..6bf9e22c4 100644 --- a/README.md +++ b/README.md @@ -42,7 +42,10 @@ The GUI allows you to set the training parameters and generate and run the requi - [SDXL training](#sdxl-training) - [Masked loss](#masked-loss) - [Change History](#change-history) - - [2024/04/25 (v24.0.4)](#20240425-v2404) + - [2024/04/19 (v24.0.5)](#20240419-v2405) + - [2024/04/18 (v24.0.4)](#20240418-v2404) + - [What's Changed](#whats-changed) + - [New Contributors](#new-contributors) - [2024/04/24 (v24.0.3)](#20240424-v2403) - [2024/04/24 (v24.0.2)](#20240424-v2402) - [2024/04/17 (v24.0.1)](#20240417-v2401) @@ -409,9 +412,25 @@ ControlNet dataset is used to specify the mask. The mask images should be the RG ## Change History -### 2024/04/25 (v24.0.4) +### 2024/04/19 (v24.0.5) -- ... +- fdds + +### 2024/04/18 (v24.0.4) + +#### What's Changed + +- Fix options.md heading by @bmaltais in +- Use correct file extensions when browsing for model file by @b-fission in +- Add argument for Gradio's `root_path` to enable reverse proxy support by @hlky in +- 2325 quotes wrapping python path cause subprocess cant find target in v2403 by @bmaltais in +- 2330 another seemingly new data validation leads to unusable configs 2403 by @bmaltais in +- Fix bad Lora parameters by @bmaltais in + +#### New Contributors + +- @b-fission made their first contribution in +- @hlky made their first contribution in ### 2024/04/24 (v24.0.3) diff --git a/kohya_gui/blip2_caption_gui.py b/kohya_gui/blip2_caption_gui.py index 6336a58ff..5429db0b6 100644 --- a/kohya_gui/blip2_caption_gui.py +++ b/kohya_gui/blip2_caption_gui.py @@ -120,7 +120,7 @@ def generate_caption( output_file_path = os.path.splitext(file_path)[0] + caption_file_ext # Write the generated text to the output file - with open(output_file_path, "w") as output_file: + with open(output_file_path, "w", encoding="utf-8") as output_file: output_file.write(generated_text) # Log the image file path with a message about the fact that the caption was generated diff --git a/kohya_gui/class_advanced_training.py b/kohya_gui/class_advanced_training.py index 67b4c79e2..c9784c304 100644 --- a/kohya_gui/class_advanced_training.py +++ b/kohya_gui/class_advanced_training.py @@ -510,10 +510,11 @@ def list_state_dirs(path): value=self.config.get("advanced.max_data_loader_n_workers", 0), ) with gr.Row(): - self.use_wandb = gr.Checkbox( - label="WANDB Logging", - value=self.config.get("advanced.use_wandb", False), - info="If unchecked, tensorboard will be used as the default for logging.", + self.log_with = gr.Dropdown( + label="Logging", + choices=["","wandb", "tensorboard","all"], + value="", + info="Loggers to use, tensorboard will be used as the default.", ) self.wandb_api_key = gr.Textbox( label="WANDB API Key", diff --git a/kohya_gui/class_basic_training.py b/kohya_gui/class_basic_training.py index 594faf1b6..91b1f258f 100644 --- a/kohya_gui/class_basic_training.py +++ b/kohya_gui/class_basic_training.py @@ -98,7 +98,7 @@ def init_training_controls(self) -> None: step=1, # precision=0, minimum=0, - value=self.config.get("basic.max_train_epochs", 1600), + value=self.config.get("basic.max_train_epochs", 0), ) # Initialize the maximum train steps input self.max_train_steps = gr.Number( @@ -106,7 +106,7 @@ def init_training_controls(self) -> None: info="Overrides # training steps. 0 = no override", step=1, # precision=0, - value=self.config.get("basic.max_train_steps", 0), + value=self.config.get("basic.max_train_steps", 1600), ) # Initialize the save every N epochs input self.save_every_n_epochs = gr.Number( diff --git a/kohya_gui/class_gui_config.py b/kohya_gui/class_gui_config.py index a19e855af..33064a9da 100644 --- a/kohya_gui/class_gui_config.py +++ b/kohya_gui/class_gui_config.py @@ -45,7 +45,7 @@ def save_config(self, config: dict, config_file_path: str = "./config.toml"): - config (dict): The configuration data to save. """ # Write the configuration data to the TOML file - with open(f"{config_file_path}", "w") as f: + with open(f"{config_file_path}", "w", encoding="utf-8") as f: toml.dump(config, f) def get(self, key: str, default=None): diff --git a/kohya_gui/class_sample_images.py b/kohya_gui/class_sample_images.py index 47ffc0f97..8f69a2ec6 100644 --- a/kohya_gui/class_sample_images.py +++ b/kohya_gui/class_sample_images.py @@ -30,7 +30,7 @@ def create_prompt_file(sample_prompts, output_dir): """ sample_prompts_path = os.path.join(output_dir, "prompt.txt") - with open(sample_prompts_path, "w") as f: + with open(sample_prompts_path, "w", encoding="utf-8") as f: f.write(sample_prompts) return sample_prompts_path diff --git a/kohya_gui/class_tensorboard.py b/kohya_gui/class_tensorboard.py index 9fc1b76b5..70c5d8e00 100644 --- a/kohya_gui/class_tensorboard.py +++ b/kohya_gui/class_tensorboard.py @@ -94,9 +94,20 @@ def stop_tensorboard(self): return self.get_button_states(started=False) def gradio_interface(self): + try: + os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0' + + import tensorflow # Attempt to import tensorflow to check if it is installed + + visibility = True + + except ImportError: + self.log.error("tensorflow is not installed, hiding the tensorboard button...") + visibility = False + with gr.Row(): button_start_tensorboard = gr.Button( - value="Start tensorboard", elem_id="myTensorButton" + value="Start tensorboard", elem_id="myTensorButton", visible=visibility ) button_stop_tensorboard = gr.Button( value="Stop tensorboard", diff --git a/kohya_gui/common_gui.py b/kohya_gui/common_gui.py index dc2dd3f96..a00d3e681 100644 --- a/kohya_gui/common_gui.py +++ b/kohya_gui/common_gui.py @@ -331,7 +331,6 @@ def update_my_data(my_data): "lr_warmup", "max_data_loader_n_workers", "max_train_epochs", - "max_train_steps", "save_every_n_epochs", "seed", ]: @@ -352,6 +351,17 @@ def update_my_data(my_data): except ValueError: # Handle the case where the string is not a valid float my_data[key] = int(1) + + for key in [ + "max_train_steps", + ]: + value = my_data.get(key) + if value is not None: + try: + my_data[key] = int(value) + except ValueError: + # Handle the case where the string is not a valid float + my_data[key] = int(1600) # Convert values to int if they are strings for key in ["max_token_length"]: @@ -406,7 +416,20 @@ def update_my_data(my_data): my_data["xformers"] = "xformers" else: my_data["xformers"] = "none" - + + # Convert use_wandb to log_with="wandb" if it is set to True + for key in ["use_wandb"]: + value = my_data.get(key) + if value is not None: + try: + if value == "True": + my_data["log_with"] = "wandb" + except ValueError: + # Handle the case where the string is not a valid float + pass + + my_data.pop(key, None) + return my_data @@ -743,13 +766,13 @@ def add_pre_postfix( # Check if the caption file does not exist if not os.path.exists(caption_file_path): # Create a new caption file with the specified prefix and/or postfix - with open(caption_file_path, "w", encoding="utf8") as f: + with open(caption_file_path, "w", encoding="utf-8") as f: # Determine the separator based on whether both prefix and postfix are provided separator = " " if prefix and postfix else "" f.write(f"{prefix}{separator}{postfix}") else: # Open the existing caption file for reading and writing - with open(caption_file_path, "r+", encoding="utf8") as f: + with open(caption_file_path, "r+", encoding="utf-8") as f: # Read the content of the caption file, stripping any trailing whitespace content = f.read().rstrip() # Move the file pointer to the beginning of the file @@ -850,11 +873,11 @@ def find_replace( file_path = os.path.join(folder_path, caption_file) # Read and replace text try: - with open(file_path, "r", errors="ignore") as f: + with open(file_path, "r", errors="ignore", encoding="utf-8") as f: content = f.read().replace(search_text, replace_text) # Write the updated content back to the file - with open(file_path, "w") as f: + with open(file_path, "w", encoding="utf-8") as f: f.write(content) except Exception as e: log.error(f"Error processing file {file_path}: {e}") @@ -1218,7 +1241,7 @@ def SaveConfigFile( log.info(f"Creating folder {folder_path} for the configuration file...") # Save the data to the specified JSON file - with open(file_path, "w") as file: + with open(file_path, "w", encoding="utf-8") as file: json.dump(variables, file, indent=2) @@ -1242,7 +1265,7 @@ def save_to_file(content): # Append content to the specified file try: - with open(file_path, "a") as file: + with open(file_path, "a", encoding="utf-8") as file: file.write(content + "\n") except IOError as e: print(f"Error: Could not write to file - {e}") @@ -1443,7 +1466,7 @@ def is_file_writable(file_path: str) -> bool: try: # Attempt to open the file in append mode to check if it can be written to - with open(file_path, "a"): + with open(file_path, "a", encoding="utf-8"): pass # If the file can be opened, it is considered writable return True @@ -1463,7 +1486,7 @@ def print_command_and_toml(run_cmd, tmpfilename): log.info(f"Showing toml config file: {tmpfilename}") print("") - with open(tmpfilename, "r") as toml_file: + with open(tmpfilename, "r", encoding="utf-8") as toml_file: log.info(toml_file.read()) log.info(f"end of toml config file: {tmpfilename}") diff --git a/kohya_gui/dreambooth_gui.py b/kohya_gui/dreambooth_gui.py index 6f7ce99bd..bcbe7e59c 100644 --- a/kohya_gui/dreambooth_gui.py +++ b/kohya_gui/dreambooth_gui.py @@ -159,7 +159,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -317,7 +317,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -350,7 +350,7 @@ def open_configuration( if not file_path == "" and not file_path == None: # load variables from JSON file - with open(file_path, "r") as f: + with open(file_path, "r", encoding="utf-8") as f: my_data = json.load(f) log.info("Loading config...") # Update values to fix deprecated use_8bit_adam checkbox and set appropriate optimizer if it is set to True @@ -470,7 +470,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -783,7 +783,7 @@ def train_model( ), "train_batch_size": train_batch_size, "train_data_dir": train_data_dir, - "use_wandb": use_wandb, + "log_with": log_with, "v2": v2, "v_parameterization": v_parameterization, "v_pred_like_loss": v_pred_like_loss if v_pred_like_loss != 0 else None, @@ -810,7 +810,7 @@ def train_model( tmpfilename = "./outputs/tmpfiledbooth.toml" # Save the updated TOML data back to the file - with open(tmpfilename, "w") as toml_file: + with open(tmpfilename, "w", encoding="utf-8") as toml_file: toml.dump(config_toml_data, toml_file) if not os.path.exists(toml_file.name): @@ -1056,7 +1056,7 @@ def dreambooth_tab( advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, - advanced_training.use_wandb, + advanced_training.log_with, advanced_training.wandb_api_key, advanced_training.wandb_run_name, advanced_training.log_tracker_name, diff --git a/kohya_gui/finetune_gui.py b/kohya_gui/finetune_gui.py index f4cdee5c7..4c7de548e 100644 --- a/kohya_gui/finetune_gui.py +++ b/kohya_gui/finetune_gui.py @@ -169,7 +169,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -335,7 +335,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -383,7 +383,7 @@ def open_configuration( if not file_path == "" and not file_path == None: # load variables from JSON file - with open(file_path, "r") as f: + with open(file_path, "r", encoding="utf-8") as f: my_data = json.load(f) log.info("Loading config...") # Update values to fix deprecated use_8bit_adam checkbox and set appropriate optimizer if it is set to True @@ -507,7 +507,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -856,7 +856,7 @@ def train_model( "train_batch_size": train_batch_size, "train_data_dir": image_folder, "train_text_encoder": train_text_encoder, - "use_wandb": use_wandb, + "log_with": log_with, "v2": v2, "v_parameterization": v_parameterization, "v_pred_like_loss": v_pred_like_loss if v_pred_like_loss != 0 else None, @@ -882,7 +882,7 @@ def train_model( tmpfilename = "./outputs/tmpfilefinetune.toml" # Save the updated TOML data back to the file - with open(tmpfilename, "w") as toml_file: + with open(tmpfilename, "w", encoding="utf-8") as toml_file: toml.dump(config_toml_data, toml_file) if not os.path.exists(toml_file.name): @@ -1209,7 +1209,7 @@ def list_presets(path): advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, - advanced_training.use_wandb, + advanced_training.log_with, advanced_training.wandb_api_key, advanced_training.wandb_run_name, advanced_training.log_tracker_name, @@ -1322,6 +1322,6 @@ def list_presets(path): gr.Markdown("This section provide Various Finetuning guides and information...") top_level_path = rf'"{scriptdir}/docs/Finetuning/top_level.md"' if os.path.exists(top_level_path): - with open(os.path.join(top_level_path), "r", encoding="utf8") as file: + with open(os.path.join(top_level_path), "r", encoding="utf-8") as file: guides_top_level = file.read() + "\n" gr.Markdown(guides_top_level) diff --git a/kohya_gui/localization.py b/kohya_gui/localization.py index 3cddec740..5c91772eb 100644 --- a/kohya_gui/localization.py +++ b/kohya_gui/localization.py @@ -20,7 +20,7 @@ def load_language_js(language_name: str) -> str: data = {} if fn is not None: try: - with open(fn, "r", encoding="utf8") as file: + with open(fn, "r", encoding="utf-8") as file: data = json.load(file) except Exception: logging.ERROR(f"Error loading localization from {fn}") diff --git a/kohya_gui/localization_ext.py b/kohya_gui/localization_ext.py index 4571dd38b..0a58752ea 100644 --- a/kohya_gui/localization_ext.py +++ b/kohya_gui/localization_ext.py @@ -10,9 +10,9 @@ def file_path(fn): def js_html_str(language): head = f'\n' head += ( - f'\n' + f'\n' ) - head += f'\n' + head += f'\n' return head @@ -24,7 +24,7 @@ def add_javascript(language): def template_response(*args, **kwargs): res = localization.GrRoutesTemplateResponse(*args, **kwargs) - res.body = res.body.replace(b"", f"{jsStr}".encode("utf8")) + res.body = res.body.replace(b"", f"{jsStr}".encode("utf-8")) res.init_headers() return res diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 9586f9c6d..059705404 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -195,7 +195,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -400,7 +400,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -469,7 +469,7 @@ def open_configuration( return # Load variables from JSON file - with open(file_path, "r") as f: + with open(file_path, "r", encoding="utf-8") as f: my_data = json.load(f) log.info("Loading config...") @@ -635,7 +635,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -1152,7 +1152,7 @@ def train_model( "train_data_dir": train_data_dir, "training_comment": training_comment, "unet_lr": unet_lr if not 0 else None, - "use_wandb": use_wandb, + "log_with": log_with, "v2": v2, "v_parameterization": v_parameterization, "v_pred_like_loss": v_pred_like_loss if v_pred_like_loss != 0 else None, @@ -1179,7 +1179,7 @@ def train_model( tmpfilename = "./outputs/tmpfilelora.toml" # Save the updated TOML data back to the file - with open(tmpfilename, "w") as toml_file: + with open(tmpfilename, "w", encoding="utf-8") as toml_file: toml.dump(config_toml_data, toml_file) if not os.path.exists(toml_file.name): @@ -2223,7 +2223,7 @@ def update_LoRA_settings( advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, - advanced_training.use_wandb, + advanced_training.log_with, advanced_training.wandb_api_key, advanced_training.wandb_run_name, advanced_training.log_tracker_name, @@ -2341,7 +2341,7 @@ def update_LoRA_settings( with open( os.path.join(rf"{scriptdir}/docs/LoRA/top_level.md"), "r", - encoding="utf8", + encoding="utf-8", ) as file: guides_top_level = file.read() + "\n" gr.Markdown(guides_top_level) diff --git a/kohya_gui/manual_caption_gui.py b/kohya_gui/manual_caption_gui.py index cca885a92..d1b571934 100644 --- a/kohya_gui/manual_caption_gui.py +++ b/kohya_gui/manual_caption_gui.py @@ -59,7 +59,7 @@ def paginate(page, max_page, page_change): def save_caption(caption, caption_ext, image_file, images_dir): caption_path = _get_caption_path(image_file, images_dir, caption_ext) - with open(caption_path, "w+", encoding="utf8") as f: + with open(caption_path, "w+", encoding="utf-8") as f: f.write(caption) log.info(f"Wrote captions to {caption_path}") @@ -146,7 +146,7 @@ def empty_return(): for image_file in image_files: caption_file_path = _get_caption_path(image_file, images_dir, caption_ext) if os.path.exists(caption_file_path): - with open(caption_file_path, "r", encoding="utf8") as f: + with open(caption_file_path, "r", encoding="utf-8") as f: caption = f.read() for tag in caption.split(","): tag = tag.strip() @@ -230,7 +230,7 @@ def update_images( caption_file_path = _get_caption_path(image_file, images_dir, caption_ext) if os.path.exists(caption_file_path): - with open(caption_file_path, "r", encoding="utf8") as f: + with open(caption_file_path, "r", encoding="utf-8") as f: caption = f.read() tag_checkboxes = _get_tag_checkbox_updates(caption, quick_tags, quick_tags_set) diff --git a/kohya_gui/merge_lora_gui.py b/kohya_gui/merge_lora_gui.py index a9c798291..dae520859 100644 --- a/kohya_gui/merge_lora_gui.py +++ b/kohya_gui/merge_lora_gui.py @@ -54,12 +54,12 @@ def __init__(self, headless=False, use_shell: bool = False): self.build_tab() def save_inputs_to_json(self, file_path, inputs): - with open(file_path, "w") as file: + with open(file_path, "w", encoding="utf-8") as file: json.dump(inputs, file) log.info(f"Saved inputs to {file_path}") def load_inputs_from_json(self, file_path): - with open(file_path, "r") as file: + with open(file_path, "r", encoding="utf-8") as file: inputs = json.load(file) log.info(f"Loaded inputs from {file_path}") return inputs diff --git a/kohya_gui/textual_inversion_gui.py b/kohya_gui/textual_inversion_gui.py index 549df5200..4e7b6a45a 100644 --- a/kohya_gui/textual_inversion_gui.py +++ b/kohya_gui/textual_inversion_gui.py @@ -160,7 +160,7 @@ def save_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -319,7 +319,7 @@ def open_configuration( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -352,7 +352,7 @@ def open_configuration( if not file_path == "" and not file_path == None: # load variables from JSON file - with open(file_path, "r") as f: + with open(file_path, "r", encoding="utf-8") as f: my_data = json.load(f) log.info("Loading config...") # Update values to fix deprecated use_8bit_adam checkbox and set appropriate optimizer if it is set to True @@ -471,7 +471,7 @@ def train_model( save_every_n_steps, save_last_n_steps, save_last_n_steps_state, - use_wandb, + log_with, wandb_api_key, wandb_run_name, log_tracker_name, @@ -800,7 +800,7 @@ def train_model( "token_string": token_string, "train_batch_size": train_batch_size, "train_data_dir": train_data_dir, - "use_wandb": use_wandb, + "log_with": log_with, "v2": v2, "v_parameterization": v_parameterization, "v_pred_like_loss": v_pred_like_loss if v_pred_like_loss != 0 else None, @@ -829,7 +829,7 @@ def train_model( tmpfilename = "./outputs/tmpfileti.toml" # Save the updated TOML data back to the file - with open(tmpfilename, "w") as toml_file: + with open(tmpfilename, "w", encoding="utf-8") as toml_file: toml.dump(config_toml_data, toml_file) if not os.path.exists(toml_file.name): @@ -1167,7 +1167,7 @@ def list_embedding_files(path): advanced_training.save_every_n_steps, advanced_training.save_last_n_steps, advanced_training.save_last_n_steps_state, - advanced_training.use_wandb, + advanced_training.log_with, advanced_training.wandb_api_key, advanced_training.wandb_run_name, advanced_training.log_tracker_name, diff --git a/kohya_gui/wd14_caption_gui.py b/kohya_gui/wd14_caption_gui.py index 91bf7584f..32153e5b0 100644 --- a/kohya_gui/wd14_caption_gui.py +++ b/kohya_gui/wd14_caption_gui.py @@ -15,7 +15,7 @@ # Set up logging log = setup_logging() - +old_onnx_value = True def caption_images( train_data_dir: str, @@ -50,12 +50,16 @@ def caption_images( if caption_extension == "": msgbox("Please provide an extension for the caption files.") return + + repo_id_converted = repo_id.replace("/", "_") + if not os.path.exists(f"./wd14_tagger_model/{repo_id_converted}"): + force_download = True log.info(f"Captioning files in {train_data_dir}...") run_cmd = [ - fr'"{get_executable_path("accelerate")}"', + fr'{get_executable_path("accelerate")}', "launch", - fr'"{scriptdir}/sd-scripts/finetune/tag_images_by_wd14_tagger.py"', + fr"{scriptdir}/sd-scripts/finetune/tag_images_by_wd14_tagger.py", ] # Uncomment and modify if needed @@ -112,21 +116,21 @@ def caption_images( run_cmd.append("--use_rating_tags_as_last_tag") # Add the directory containing the training data - run_cmd.append(fr'"{train_data_dir}"') + run_cmd.append(fr'{train_data_dir}') env = os.environ.copy() env["PYTHONPATH"] = ( - f"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" + fr"{scriptdir}{os.pathsep}{scriptdir}/sd-scripts{os.pathsep}{env.get('PYTHONPATH', '')}" ) # Adding an example of an environment variable that might be relevant env["TF_ENABLE_ONEDNN_OPTS"] = "0" # Reconstruct the safe command string for display command_to_run = " ".join(run_cmd) - log.info(f"Executing command: {command_to_run} with shell={use_shell}") + log.info(f"Executing command: {command_to_run}") # Run the command in the sd-scripts folder context - subprocess.run(command_to_run, env=env, shell=use_shell) + subprocess.run(run_cmd, env=env) # Add prefix and postfix @@ -359,6 +363,21 @@ def list_train_dirs(path): label="Max dataloader workers", interactive=True, ) + + def repo_id_changes(repo_id, onnx): + global old_onnx_value + + if "-v3" in repo_id: + old_onnx_value = onnx + return gr.Checkbox(value=True, interactive=False) + else: + return gr.Checkbox(value=old_onnx_value, interactive=True) + + repo_id.change( + repo_id_changes, + inputs=[repo_id, onnx], + outputs=[onnx] + ) caption_button = gr.Button("Caption images")