From 81b3d81cf7a04d9e6a989356ec89358ef04a9e0a Mon Sep 17 00:00:00 2001 From: Hina Chen Date: Tue, 27 Feb 2024 15:51:06 +0800 Subject: [PATCH 1/2] Update and cleanup localization for zh-TW --- localizations/zh-TW.json | 41 ++++++++++++++++++++++++++++------------ 1 file changed, 29 insertions(+), 12 deletions(-) diff --git a/localizations/zh-TW.json b/localizations/zh-TW.json index df172cb9f..7c4cfac92 100644 --- a/localizations/zh-TW.json +++ b/localizations/zh-TW.json @@ -24,7 +24,6 @@ "network dim for conv layer in fixed mode": "固定模式下卷積層的網路維度", "Sparsity for sparse bias": "稀疏偏差的稀疏度", "path for the file to save...": "儲存檔案的路徑...", - "Verify LoRA": "驗證 LoRA", "Verify": "驗證", "Verification output": "驗證輸出", "Verification error": "驗證錯誤", @@ -144,6 +143,7 @@ "If the weight is not more than this value, the LoRA module is not created. The default is 0.": "如果權重不超過此值,則不會創建 LoRA 模組。預設為 0。", "Blocks": "區塊", "Block dims": "區塊維度", + "Block alphas": "區塊 Alphas", "(Optional) eg: 2,2,2,2,4,4,4,4,6,6,6,6,8,6,6,6,6,4,4,4,4,2,2,2,2": " (選填) 例如:2,2,2,2,4,4,4,4,6,6,6,6,8,6,6,6,6,4,4,4,4,2,2,2,2", "Specify the dim (rank) of each block. Specify 25 numbers.": "指定每個區塊的維度 (Rank)。指定 25 個數字。", "Specify the alpha of each block. Specify 25 numbers as with block_dims. If omitted, the value of network_alpha is used.": "指定每個區塊的 Alpha。與區塊維度一樣,指定 25 個數字。如果省略,則使用網路 Alpha 的值。", @@ -185,7 +185,7 @@ "Conv Dimension": "卷積維度", "Convert model": "轉換模型", "Copy info to Folders Tab": "複製資訊到資料夾頁籤", - "cosine_with_restarts": "餘弦函數並使用重啟", + "cosine_with_restarts": "餘弦函數並使用重啟 (cosine_with_restarts)", "cosine": "餘弦函數 (cosine)", "CrossAttention": "交叉注意力", "DANGER!!! -- Insecure folder renaming -- DANGER!!!": "危險!!! -- 不安全的資料夾重新命名 -- 危險!!!", @@ -259,10 +259,13 @@ "Install Location": "安裝位置", "Installation": "安裝", "Instance prompt": "實例 (Instance) 提示詞", + "is SDXL": "SDXL 模型", + "is v2": "SD2 模型", "Keep n tokens": "保留 N 個提示詞", "Launching the GUI on Linux and macOS": "在 Linux/macOS 上啟動 GUI", "Launching the GUI on Windows": "在 Windows 上啟動 GUI", "Learning rate": "學習率", + "Load precision": "讀取精度", "adafactor": "自適應學習 (adafactor)", "linear": "線性 (linear)", "Linux and macOS Upgrade": "Linux/macOS 升級", @@ -270,6 +273,8 @@ "Linux Pre-requirements": "Linux Pre-requirements", "Load": "載入", "Loading...": "載入中...", + "Load finetuned model to": "將微調模型讀取至", + "Load Stable Diffusion base model to": "將穩定擴散基礎模型讀取至", "Local docker build": "Docker 建構", "Logging folder": "記錄資料夾", "LoRA model \"A\"": "LoRA 模型 \"A\"", @@ -283,7 +288,7 @@ "LR power": "學習率乘冪", "LR scheduler extra arguments": "學習率調度器額外參數", "LR Scheduler": "學習率調度器", - "LR warmup (% of steps)": "學習率預熱 (% 的步數)", + "LR warmup (% of total steps)": "學習率預熱 (總步數的百分比)", "LyCORIS model": "LyCORIS 模型", "Macos is not great at the moment.": "目前 MacOS 支援並不是很好。", "Manual Captioning": "手動標記文字", @@ -291,6 +296,11 @@ "Max bucket resolution": "最大資料儲存桶解析度", "Max length": "最大長度", "Max num workers for DataLoader": "資料工作載入的最大工作數量", + "Number of processes": "進程數量", + "Number of machines": "機器數量", + "Multi GPU": "多個 GPU", + "GPU IDs": "GPU ID", + "example: 0,1": "例如:0,1", "Max resolution": "最大解析度", "Max Timestep": "最大時序步數", "Max Token Length": "最大標記長度", @@ -336,8 +346,10 @@ "Number of CPU threads per core": "每個 CPU 核心的線程數", "Number of images to group together": "要一起分組的圖片數量", "Number of updates steps to accumulate before performing a backward/update pass": "執行反向/更新傳遞之前,需要累積的更新步驟數", + "Number of workers": "Worker 數量", "object template": "物件樣版", "Only for SD v2 models. By scaling the loss according to the time step, the weights of global noise prediction and local noise prediction become the same, and the improvement of details may be expected.": "僅適用於 SD v2 模型。通過根據時序步數的縮放損失,整體的噪聲預測與局部的噪聲預測的權重會變得相同,以此希望能改善細節。", + "only for SDXL": "僅適用於 SDXL", "Open": "開啟", "Optimizer extra arguments": "優化器額外參數", "Optimizer": "優化器", @@ -433,7 +445,7 @@ "Start training": "開始訓練", "Starting GUI Service": "啟動 GUI 服務", "Stop tensorboard": "停止 TensorBoard", - "Stop text encoder training": "停止文字編碼器訓練", + "Stop text encoder training (% of total steps)": "停止文字編碼器訓練(總步數的百分比)", "Stop training": "停止訓練", "style template": "風格樣版", "sv_fro": "sv_fro", @@ -443,12 +455,17 @@ "Target model type": "目標模型類型", "Template": "樣版", "Text Encoder learning rate": "文字編碼器學習率", + "SDXL Specific Parameters": "SDXL 特定參數", + "Cache the outputs of the text encoders. This option is useful to reduce the GPU memory usage. This option cannot be used with options for shuffling or dropping the captions.": "暫存文字編碼器的輸出。這個選項有助於減少 GPU 記憶體的使用。這個選項不能與打亂或丟棄提示詞的選項一起使用。", + "Cache text encoder outputs": "暫存文字編碼器輸出", + "Disable the half-precision (mixed-precision) VAE. VAE for SDXL seems to produce NaNs in some cases. This option is useful to avoid the NaNs.": "停用半精度 (混合精度) VAE。在某些情況下,SDXL 的 VAE 似乎會產生 NaN。這個選項有助於避免 NaN。", + "No half VAE": "不使用半精度 VAE", "The fine-tuning can be done with 24GB GPU memory with the batch size of 1.": "微調可以再使用 1 個批次大小的情況下,在 24GB GPU 記憶體的狀態下完成。", "The GUI allows you to set the training parameters and generate and run the required CLI commands to train the model.": "此 GUI 允許你設定訓練參數,並產生執行模型訓練所需要的 CLI 指令。", "This guide is a resource compilation to facilitate the development of robust LoRA models.": "該指南是一個資源彙整,以促進強大LoRA模型的開發。", - "This section provide Dreambooth tools to help setup your dataset…": "這些選擇幫助設置自己的資料集", - "This section provide LoRA tools to help setup your dataset…": "本節提供 LoRA 工具以幫助您設置資料集...", - "This section provide Various Finetuning guides and information…": "本節提供各種微調指南和訊息", + "This section provide Dreambooth tools to help setup your dataset...": "這些選擇幫助設置自己的資料集", + "This section provide LoRA tools to help setup your dataset...": "本節提供 LoRA 工具以幫助您設置資料集...", + "This section provide Various Finetuning guides and information...": "本節提供各種微調指南和訊息", "This utility allows quick captioning and tagging of images.": "此工具允許快速地為圖像添加標題和標籤。", "This utility allows you to create simple caption files for each image in a folder.": "此工具允許您為資料夾中的每個圖片建立簡單的標籤文件。", "This utility can be used to convert from one stable diffusion model format to another.": "該工具可用於將一個穩定擴散模型格式轉換為另一種格式", @@ -468,8 +485,8 @@ "This utility will use WD14 to caption files for each images in a folder.": "此工具使用 WD14 為資料夾中的每張圖像添加標籤。", "Tips for SDXL training": "SDXL 訓練提示", "Token string": "標記符號", - "Train a custom model using kohya finetune python code": "使用 kohya finetune Python 程式訓練自定義模型", - "Train a custom model using kohya train network LoRA python code…": "使用 kohya LoRA Python 程式訓練自定義模型", + "Train a custom model using kohya finetune python code...": "使用 kohya finetune Python 程式訓練自定義模型", + "Train a custom model using kohya train network LoRA python code...": "使用 kohya LoRA Python 程式訓練自定義模型", "Train batch size": "訓練批次大小", "Train Network": "訓練網絡", "Train text encoder": "訓練文字編碼器", @@ -490,6 +507,7 @@ "Use gradient checkpointing.": "使用梯度檢查點。", "Use latent files": "使用潛空間檔案", "Use sparse biais": "使用使用稀疏偏差", + "Use onnx": "使用 ONNX", "Users can obtain and/or generate an api key in the their user settings on the website: https://wandb.ai/login": "使用者可以在以下網站的用戶設定中取得,或產生 API 金鑰:https://wandb.ai/login", "V Pred like loss": "V 預測損失", "Values greater than 0 will make the model more img2img focussed. 0 = image only": "大於 0 的數值會使模型更加聚焦在 img2img 上。0 表示僅關注於圖像生成", @@ -501,8 +519,7 @@ "WARNING! The use of this utility on the wrong folder can lead to unexpected folder renaming!!!": "警告!在錯誤的資料夾使用此工具,可能會意外導致資料夾被重新命名!!!", "WD14 Captioning": "WD14 提詞", "Windows Upgrade": "Windows 升级", - "Train a custom model using kohya dreambooth python code…": "使用 kohya dreambooth Python 程式訓練自定義模型", + "Train a custom model using kohya dreambooth python code...": "使用 kohya dreambooth Python 程式訓練自定義模型", "Training comment": "訓練註解", - "Train a TI using kohya textual inversion python code…": "使用 kohya textual inversion Python 程式訓練 TI 模型", - "Train a custom model using kohya finetune python code…": "使用 kohya finetune Python 程式訓練自定義模型" + "Train a TI using kohya textual inversion python code...": "使用 kohya textual inversion Python 程式訓練 TI 模型" } From ffb6430c9fababcce8c7c1f5cb66998be69c170e Mon Sep 17 00:00:00 2001 From: Hina Chen Date: Mon, 18 Mar 2024 10:11:01 +0800 Subject: [PATCH 2/2] Cleanup and bump localization to 23.0.11 --- kohya_gui/blip_caption_gui.py | 2 +- kohya_gui/class_advanced_training.py | 10 +- kohya_gui/extract_lycoris_locon_gui.py | 4 +- kohya_gui/git_caption_gui.py | 6 +- kohya_gui/lora_gui.py | 10 +- kohya_gui/manual_caption_gui.py | 2 +- kohya_gui/merge_lycoris_gui.py | 2 +- kohya_gui/resize_lora_gui.py | 2 +- kohya_gui/wd14_caption_gui.py | 4 +- localizations/zh-TW.json | 1004 +++++++++++------------- 10 files changed, 501 insertions(+), 545 deletions(-) diff --git a/kohya_gui/blip_caption_gui.py b/kohya_gui/blip_caption_gui.py index 525c537ea..e35d8add5 100644 --- a/kohya_gui/blip_caption_gui.py +++ b/kohya_gui/blip_caption_gui.py @@ -123,7 +123,7 @@ def list_train_dirs(path): with gr.Row(): caption_file_ext = gr.Textbox( label="Caption file extension", - placeholder="Extension for caption file, e.g., .caption, .txt", + placeholder="Extension for caption file (e.g., .caption, .txt)", value=".txt", interactive=True, ) diff --git a/kohya_gui/class_advanced_training.py b/kohya_gui/class_advanced_training.py index bc4adc278..2a55f35e9 100644 --- a/kohya_gui/class_advanced_training.py +++ b/kohya_gui/class_advanced_training.py @@ -127,7 +127,7 @@ def full_options_update(full_fp16, full_bf16): ], value='75', ) - + with gr.Row(): if training_type == "lora": self.fp8_base = gr.Checkbox( @@ -144,7 +144,7 @@ def full_options_update(full_fp16, full_bf16): value=False, info='Required bitsandbytes >= 0.36.0', ) - + self.full_fp16.change( full_options_update, inputs=[self.full_fp16, self.full_bf16], @@ -249,7 +249,7 @@ def full_options_update(full_fp16, full_bf16): minimum=0, maximum=1, step=0.01, - info='recommended values are 0.05 - 0.15', + info='Recommended values are 0.05 - 0.15', ) self.adaptive_noise_scale = gr.Slider( label='Adaptive noise scale', @@ -266,7 +266,7 @@ def full_options_update(full_fp16, full_bf16): minimum=0, maximum=64, step=1, - info='enable multires noise (recommended values are 6-10)', + info='Enable multires noise (recommended values are 6-10)', ) self.multires_noise_discount = gr.Slider( label='Multires noise discount', @@ -274,7 +274,7 @@ def full_options_update(full_fp16, full_bf16): minimum=0, maximum=1, step=0.01, - info='recommended values are 0.8. For LoRAs with small datasets, 0.1-0.3', + info='Recommended values are 0.8. For LoRAs with small datasets, 0.1-0.3', ) self.noise_offset_type.change( noise_offset_type_change, diff --git a/kohya_gui/extract_lycoris_locon_gui.py b/kohya_gui/extract_lycoris_locon_gui.py index feb00ce36..8ed643b9e 100644 --- a/kohya_gui/extract_lycoris_locon_gui.py +++ b/kohya_gui/extract_lycoris_locon_gui.py @@ -110,7 +110,7 @@ def extract_lycoris_locon( # Run the command subprocess.run(run_cmd, shell=True, env=env) - + log.info('Done extracting...') @@ -166,7 +166,7 @@ def list_save_to(path): current_save_dir = path return list(list_files(path, exts=[".safetensors"], all=True)) - with gr.Tab("Extract LyCORIS LoCON"): + with gr.Tab("Extract LyCORIS LoCon"): gr.Markdown( "This utility can extract a LyCORIS LoCon network from a finetuned model." ) diff --git a/kohya_gui/git_caption_gui.py b/kohya_gui/git_caption_gui.py index 268f1f3d3..7f9d1aa4c 100644 --- a/kohya_gui/git_caption_gui.py +++ b/kohya_gui/git_caption_gui.py @@ -104,19 +104,19 @@ def list_train_dirs(path): with gr.Row(): caption_ext = gr.Textbox( label='Caption file extension', - placeholder='Extention for caption file. eg: .caption, .txt', + placeholder='Extension for caption file (e.g., .caption, .txt)', value='.txt', interactive=True, ) prefix = gr.Textbox( - label='Prefix to add to BLIP caption', + label='Prefix to add to GIT caption', placeholder='(Optional)', interactive=True, ) postfix = gr.Textbox( - label='Postfix to add to BLIP caption', + label='Postfix to add to GIT caption', placeholder='(Optional)', interactive=True, ) diff --git a/kohya_gui/lora_gui.py b/kohya_gui/lora_gui.py index 25fbdf613..bcf34a569 100644 --- a/kohya_gui/lora_gui.py +++ b/kohya_gui/lora_gui.py @@ -844,8 +844,8 @@ def train_model( return text_encoder_lr_float = float(text_encoder_lr) unet_lr_float = float(unet_lr) - - + + # Determine the training configuration based on learning rate values if text_encoder_lr_float == 0 and unet_lr_float == 0: @@ -1035,7 +1035,7 @@ def lora_tab( def list_presets(path): json_files = [] - + # Insert an empty string at the beginning json_files.insert(0, "none") @@ -1133,7 +1133,7 @@ def list_presets(path): minimum=0, maximum=1, ) - + unet_lr = gr.Number( label="Unet learning rate", value="0.0001", @@ -1199,7 +1199,7 @@ def list_presets(path): decompose_both = gr.Checkbox( value=False, label="LoKr decompose both", - info=" Controls whether both input and output dimensions of the layer's weights are decomposed into smaller matrices for reparameterization.", + info="Controls whether both input and output dimensions of the layer's weights are decomposed into smaller matrices for reparameterization.", visible=False, ) train_on_input = gr.Checkbox( diff --git a/kohya_gui/manual_caption_gui.py b/kohya_gui/manual_caption_gui.py index 778305068..8b677a1af 100644 --- a/kohya_gui/manual_caption_gui.py +++ b/kohya_gui/manual_caption_gui.py @@ -303,7 +303,7 @@ def list_images_dirs(path): load_images_button = gr.Button('Load', elem_id='open_folder') caption_ext = gr.Textbox( label='Caption file extension', - placeholder='Extension for caption file. eg: .caption, .txt', + placeholder='Extension for caption file (e.g., .caption, .txt)', value='.txt', interactive=True, ) diff --git a/kohya_gui/merge_lycoris_gui.py b/kohya_gui/merge_lycoris_gui.py index 317f815ec..8e562753b 100644 --- a/kohya_gui/merge_lycoris_gui.py +++ b/kohya_gui/merge_lycoris_gui.py @@ -210,7 +210,7 @@ def list_save_to(path): ) with gr.Row(): - is_sdxl = gr.Checkbox(label='is sdxl', value=False, interactive=True) + is_sdxl = gr.Checkbox(label='is SDXL', value=False, interactive=True) is_v2 = gr.Checkbox(label='is v2', value=False, interactive=True) merge_button = gr.Button('Merge model') diff --git a/kohya_gui/resize_lora_gui.py b/kohya_gui/resize_lora_gui.py index 948fd0b25..6856b8198 100644 --- a/kohya_gui/resize_lora_gui.py +++ b/kohya_gui/resize_lora_gui.py @@ -183,7 +183,7 @@ def list_save_to(path): ) with gr.Row(): - verbose = gr.Checkbox(label='Verbose', value=True) + verbose = gr.Checkbox(label='Verbose logging', value=True) save_precision = gr.Radio( label='Save precision', choices=['fp16', 'bf16', 'float'], diff --git a/kohya_gui/wd14_caption_gui.py b/kohya_gui/wd14_caption_gui.py index 6c5095f75..6b9a94b8b 100644 --- a/kohya_gui/wd14_caption_gui.py +++ b/kohya_gui/wd14_caption_gui.py @@ -132,7 +132,7 @@ def list_train_dirs(path): caption_extension = gr.Textbox( label='Caption file extension', - placeholder='Extention for caption file. eg: .caption, .txt', + placeholder='Extension for caption file (e.g., .caption, .txt)', value='.txt', interactive=True, ) @@ -230,7 +230,7 @@ def list_train_dirs(path): character_threshold = gr.Slider( value=0.35, label='Character threshold', - info='useful if you want to train with character', + info='Useful if you want to train with character', minimum=0, maximum=1, step=0.05, diff --git a/localizations/zh-TW.json b/localizations/zh-TW.json index 7c4cfac92..8011bfce8 100644 --- a/localizations/zh-TW.json +++ b/localizations/zh-TW.json @@ -1,525 +1,481 @@ - - { - "-Need to add resources here": "-需要在此添加資料", - "(Experimental, Optional) Since the latent is close to a normal distribution, it may be a good idea to specify a value around 1/10 the noise offset.": " (選填,實驗性功能) 由於潛空間接近常態分布,或許指定一個噪聲偏移約 1/10 的數值是個不錯的作法。", - "(Optional) Add training comment to be included in metadata": " (選填) 在訓練的後設資料加入註解。", - "(Optional) Enforce number of epoch": " (選填) 強制指定一個週期 (Epoch) 數量", - "(Optional) Enforce number of steps": " (選填) 強制指定一個總步數數量", - "(Optional) Save only the specified number of models (old models will be deleted)": " (選填) 僅儲存指定數量的模型 (舊有模型將被刪除) ", - "(Optional) Save only the specified number of states (old models will be deleted)": " (選填) 僅儲存指定數量的訓練資料 (舊有訓練資料將被刪除) ", - "(Optional) Stable Diffusion base model": " (選填) 穩定擴散基礎模型", - "(Optional) Stable Diffusion model": " (選填) 穩定擴散模型", - "(Optional) The model is saved every specified steps": " (選填) 模型會在指定的間隔步數後儲存", - "(Optional)": " (選填) ", - "Optional": "選填", - "Optional. Se": "選填", - "(Optional) Directory containing the regularisation images": " (選填) 含有正規化圖片的資料夾", - "Eg: asd": "例如:asd", - "Eg: person": "例如:person", - "Folder containing the concepts folders to balance...": "含有要平衡的概念資料夾的資料夾路徑...", - "Balance dataset": "平衡資料集", - "Clamp Quantile": "夾取分位數", - "Minimum difference": "最小化差異", - "network dim for linear layer in fixed mode": "固定模式下線性層的網路維度", - "network dim for conv layer in fixed mode": "固定模式下卷積層的網路維度", - "Sparsity for sparse bias": "稀疏偏差的稀疏度", - "path for the file to save...": "儲存檔案的路徑...", - "Verify": "驗證", - "Verification output": "驗證輸出", - "Verification error": "驗證錯誤", - "New Rank": "新維度 (Network Rank)", - "New Conv Rank": "新卷積維度 (Conv Rank)", - "Directory containing the images to group": "含有要分組的圖片的資料夾路徑", - "Directory where the grouped images will be stored": "要儲存分組圖片的資料夾路徑", - "Group images": "分組圖片", - "Group Images": "分組圖片", - "Captioning": "標記文字", - "Caption images": "標記圖片", - "(Optional) model id for GIT in Hugging Face": " (選填) Hugging Face 中 GIT 的模型 ID", - "Undesired tags": "不需要的標籤", - "(Optional) Separate `undesired_tags` with comma `(,)` if you want to remove multiple tags, e.g. `1girl,solo,smile`.": " (選填) 如果要移除多個標籤,請使用逗號 `(,)` 分隔 `undesired_tags`,例如:`1girl,solo,smile`。", - "Prefix to add to WD14 caption": "要加入到 WD14 標記文字的前綴", - "Postfix to add to WD14 caption": "要加入到 WD14 標記文字的後綴", - "This option appends the tags to the existing tags, instead of replacing them.": "此選項將標籤附加到現有標籤,而不是取代它們。", - "Append TAGs": "附加標籤", - "Replace underscores in filenames with spaces": "將檔案名稱中的底線替換為空格", - "Tag subfolders images as well": "標記子資料夾中的圖片", - "Recursive": "遞迴標記", - "Debug while tagging, it will print your image file with general tags and character tags.": "標記時除錯,它會列印出你的圖片檔案與一般標籤和角色標籤。", - "Verbose logging": "詳細記錄", - "Show frequency of tags for images.": "顯示圖片的標籤頻率。", - "Show tags frequency": "顯示標籤頻率", - "Model": "模型", - "Useful to force model re download when switching to onnx": "切換到 onnx 時,強制重新下載模型", - "Force model re-download": "強制重新下載模型", - "General threshold": "一般閾值", - "Adjust `general_threshold` for pruning tags (less tags, less flexible)": "調整 `general_threshold` 以修剪標籤 (標籤越少,彈性越小)", - "Character threshold": "角色閾值", - "useful if you want to train with character": "如果你想要使用角色訓練,這很有用", - "Max dataloader workers": "最大資料載入工作數", - "Comma separated list of tags": "逗號分隔的標籤列表", - "Load 💾": "讀取 💾", - "Import 📄": "匯入 📄", - "Options": "選項", - "Caption Separator": "標記文字分隔符號", - "VAE batch size": "VAE 批次大小", - "Max grad norm": "最大梯度規範 (Max grad norm)", - "Learning rate Unet": "Unet 學習率", - "Set to 0 to not train the Unet": "設為 0 以不訓練 Unet", - "Learning rate TE": "文字編碼器學習率", - "Set to 0 to not train the Text Encoder": "設為 0 以不訓練文字編碼器", - "Tools": "工具", - "Convert to LCM": "轉換模型到 LCM", - "This utility convert a model to an LCM model.": "此工具將模型轉換為 LCM 模型。", - "Stable Diffusion model to convert to LCM": "要轉換為 LCM 的穩定擴散模型", - "Name of the new LCM model": "新 LCM 模型的名稱", - "Path to the LCM file to create": "要建立的 LCM 檔案的路徑", - "type the configuration file path or use the 'Open' button above to select it...": "輸入設定檔案的路徑,或使用上方的「Open 📂」按鈕來選擇它...", - "Adjusts the scale of the rank dropout to maintain the average dropout rate, ensuring more consistent regularization across different layers.": "調整維度 (Rank) 捨棄的比例,以維持平均捨棄率,確保在不同層之間更一致的正規化。", - "Rank Dropout Scale": "維度 (Rank) 捨棄比例", - "Selects trainable layers in a network, but trains normalization layers identically across methods as they lack matrix decomposition.": "選擇網路中可訓練的層,但由於缺乏矩陣分解,因此以相同的方式訓練正規化層。", - "Train Norm": "訓練正規化", - "LyCORIS Preset": "LyCORIS 預設範本", - "Presets": "預設範本", - "Efficiently decompose tensor shapes, resulting in a sequence of convolution layers with varying dimensions and Hadamard product implementation through multiplication of two distinct tensors.": "有效地分解張量形狀,從而產生一系列具有不同維度的卷積層,並通過兩個不同張量的乘法實現 Hadamard 乘積。", - "Use Tucker decomposition": "使用 Tucker 分解", - "Train an additional scalar in front of the weight difference, use a different weight initialization strategy.": "在權重差異前訓練額外的標量,使用不同的權重初始化策略。", - "Use Scalar": "使用標量", - "applies an additional scaling factor to the oft_blocks, allowing for further adjustment of their impact on the model's transformations.": "對 oft_blocks 應用額外的縮放因子,從而進一步調整其對模型轉換的影響。", - "Rescaled OFT": "重新縮放 OFT", - "Constrain OFT": "限制 OFT", - "Limits the norm of the oft_blocks, ensuring that their magnitude does not exceed a specified threshold, thus controlling the extent of the transformation applied.": "限制 oft_blocks 的規範,確保其大小不超過指定的閾值,從而控制應用的轉換程度。", - "LoKr factor": "LoKr 因子", - "Set if we change the information going into the system (True) or the information coming out of it (False).": "選用後會改變進入系統的訓練資料,若不選則會改變輸出系統的訓練資料。", - "iA3 train on input": "iA3 訓練輸入", - "Controls whether both input and output dimensions of the layer's weights are decomposed into smaller matrices for reparameterization.": "控制層權重的輸入和輸出維度是否被分解為較小的矩陣以進行重新參數化。", - "LoKr decompose both": "LoKr 同時分解", - "Strength of the LCM": "LCM 的強度", - "folder where the training configuration files will be saved": "訓練設定檔案將會被儲存的資料夾路徑", - "folder where the training images are located": "訓練圖片的資料夾路徑", - "folder where the model will be saved": "模型將會被儲存的資料夾路徑", - "Model type": "模型類型", - "Extract LCM": "提取 LCM", - "Verify LoRA": "驗證 LoRA", - "Path to an existing LoRA network weights to resume training from": "要從中繼續訓練的現有 LoRA 網路權重的路徑", - "Seed": "種子", - "(Optional) eg:1234": " (選填) 例如:1234", - "(Optional) eg: \"milestones=[1,10,30,50]\" \"gamma=0.1\"": " (選填) 例如: \"milestones=[1,10,30,50]\" \"gamma=0.1\"", - "(Optional) eg: relative_step=True scale_parameter=True warmup_init=True": " (選填) 例如:relative_step=True scale_parameter=True warmup_init=True", - "(Optional) For Cosine with restart and polynomial only": " (選填) 只適用於餘弦函數並使用重啟 (cosine_with_restart) 和多項式 (polynomial)", - "Network Rank (Dimension)": "網路維度 (Rank)", - "Network Alpha": "網路 Alpha", - "alpha for LoRA weight scaling": "LoRA 權重縮放的 Alpha 值", - "Convolution Rank (Dimension)": "卷積維度 (Rank)", - "Convolution Alpha": "卷積 Alpha", - "Max Norm Regularization is a technique to stabilize network training by limiting the norm of network weights. It may be effective in suppressing overfitting of LoRA and improving stability when used with other LoRAs. See PR #545 on kohya_ss/sd_scripts repo for details. Recommended setting: 1. Higher is weaker, lower is stronger.": "最大規範正規化是一種穩定網路訓練的技術,通過限制網路權重的規範來實現。當與其他 LoRA 一起使用時,它可能會有效地抑制 LoRA 的過度擬合並提高穩定性。詳細資料請見 kohya_ss/sd_scripts Github 上的 PR#545。建議設置:1.0 越高越弱,越低越強。", - "Is a normal probability dropout at the neuron level. In the case of LoRA, it is applied to the output of down. Recommended range 0.1 to 0.5": "是神經元級的正常概率捨棄。在 LoRA 的情況下,它被應用於 Down Sampler 的輸出。建議範圍 0.1 到 0.5", - "can specify `rank_dropout` to dropout each rank with specified probability. Recommended range 0.1 to 0.3": "可以指定 `rank_dropout` 以指定的概率捨棄每個維度。建議範圍 0.1 到 0.3", - "can specify `module_dropout` to dropout each rank with specified probability. Recommended range 0.1 to 0.3": "可以指定 `module_dropout` 以指定的概率捨棄每個維度。建議範圍 0.1 到 0.3", - "Folder where the training folders containing the images are located": "訓練資料夾的資料夾路徑,包含圖片", - "(Optional) Folder where where the regularization folders containing the images are located": " (選填) 正規化資料夾的資料夾路徑,包含圖片", - "Folder to output trained model": "輸出訓練模型的資料夾路徑", - "Optional: enable logging and output TensorBoard log to this folder": "選填:啟用記錄並將 TensorBoard 記錄輸出到此資料夾", - "Pretrained model name or path": "預訓練模型名稱或路徑", - "enter the path to custom model or name of pretrained model": "輸入自訂模型的路徑或預訓練模型的名稱", - "(Name of the model to output)": " (輸出的模型名稱)", - "LoRA type": "LoRA 類型", - "(Optional) path to checkpoint of vae to replace for training": " (選填) 要替換訓練的 VAE checkpoint 的路徑", - "(Optional) Use to provide additional parameters not handled by the GUI. Eg: --some_parameters \"value\"": " (選填) 用於提供 GUI 未處理的額外參數。例如:--some_parameters \"value\"", - "Automates the processing of noise, allowing for faster model fitting, as well as balancing out color issues": "自動處理噪聲,可以更快地擬合模型,同時平衡顏色問題", - "Debiased Estimation loss": "偏差估算損失 (Debiased Estimation loss)", - "(Optional) Override number of epoch. Default: 8": " (選填) 覆蓋週期 (Epoch) 數量。預設:8", - "Weights": "權重", - "Down LR weights": "Down LR 權重", - "Mid LR weights": "Mid LR 權重", - "Up LR weights": "Up LR 權重", - "Blocks LR zero threshold": "區塊 LR 零閾值", - "(Optional) eg: 0,0,0,0,0,0,1,1,1,1,1,1": " (選填) 例如:0,0,0,0,0,0,1,1,1,1,1,1", - "(Optional) eg: 0.5": " (選填) 例如:0.5", - "(Optional) eg: 0.1": " (選填) 例如:0.1", - "Specify the learning rate weight of the down blocks of U-Net.": "指定 U-Net 下區塊的學習率權重。", - "Specify the learning rate weight of the mid block of U-Net.": "指定 U-Net 中區塊的學習率權重。", - "Specify the learning rate weight of the up blocks of U-Net. The same as down_lr_weight.": "指定 U-Net 上區塊的學習率權重。與 down_lr_weight 相同。", - "If the weight is not more than this value, the LoRA module is not created. The default is 0.": "如果權重不超過此值,則不會創建 LoRA 模組。預設為 0。", - "Blocks": "區塊", - "Block dims": "區塊維度", - "Block alphas": "區塊 Alphas", - "(Optional) eg: 2,2,2,2,4,4,4,4,6,6,6,6,8,6,6,6,6,4,4,4,4,2,2,2,2": " (選填) 例如:2,2,2,2,4,4,4,4,6,6,6,6,8,6,6,6,6,4,4,4,4,2,2,2,2", - "Specify the dim (rank) of each block. Specify 25 numbers.": "指定每個區塊的維度 (Rank)。指定 25 個數字。", - "Specify the alpha of each block. Specify 25 numbers as with block_dims. If omitted, the value of network_alpha is used.": "指定每個區塊的 Alpha。與區塊維度一樣,指定 25 個數字。如果省略,則使用網路 Alpha 的值。", - "Conv": "卷積", - "Conv dims": "卷積維度 (dims)", - "Conv alphas": "卷積 Alphas", - "Extend LoRA to Conv2d 3x3 and specify the dim (rank) of each block. Specify 25 numbers.": "將 LoRA 擴展到 Conv2d 3x3,並指定每個區塊的維度 (Rank)。指定 25 個數字。", - "Specify the alpha of each block when expanding LoRA to Conv2d 3x3. Specify 25 numbers. If omitted, the value of conv_alpha is used.": "將 LoRA 擴展到 Conv2d 3x3 時,指定每個區塊的 Alpha。指定 25 個數字。如果省略,則使用卷積 Alpha 的值。", - "Weighted captions": "加權標記文字", - "About SDXL training": "關於 SDXL 訓練", - "Adaptive noise scale": "自適應噪聲比例", - "Additional parameters": "額外參數", - "Advanced options": "進階選項", - "Advanced parameters": "進階參數", - "Advanced": "進階", - "ashleykleynhans runpod docker builds": "ashleykleynhans runpod docker 建構", - "Automatically determine the dim(rank) from the weight file.": "從指定的權重檔案自動決定 dim(rank)。", - "Autosave": "自動儲存", - "Basic Captioning": "基本標記", - "Basic": "基本", - "Batch size": "批次大小", - "BLIP Captioning": "BLIP 標記", - "Bucket resolution steps": "分桶解析度間隔", - "Built with Gradio": "使用 Gradio 建構", - "Cache latents to disk": "暫存潛空間資料到硬碟", - "Cache latents": "暫存潛空間資料", - "Caption file extension": "標記檔案副檔名", - "Caption Extension": "標記檔案副檔名", - "(Optional) Extension for caption files. default: .caption": " (選填) 標記檔案的副檔名。預設:.caption", - "Caption text": "標記文字", - "caption": "標記", - "Change History": "變更記錄", - "Class prompt": "類 (Class) 提詞", - "Color augmentation": "顏色增強", - "Configuration file": "設定檔", - "constant_with_warmup": "常數並使用預熱 (constant_with_warmup)", - "constant": "常數 (constant)", - "Conv Dimension (Rank)": "卷積維度 (Rank)", - "Conv Dimension": "卷積維度", - "Convert model": "轉換模型", - "Copy info to Folders Tab": "複製資訊到資料夾頁籤", - "cosine_with_restarts": "餘弦函數並使用重啟 (cosine_with_restarts)", - "cosine": "餘弦函數 (cosine)", - "CrossAttention": "交叉注意力", - "DANGER!!! -- Insecure folder renaming -- DANGER!!!": "危險!!! -- 不安全的資料夾重新命名 -- 危險!!!", - "Dataset folder": "資料集資料夾", - "Dataset preparation": "資料集準備", - "Dataset Preparation": "資料集準備", - "Dataset repeats": "資料集重複數", - "Desired LoRA rank": "希望 LoRA 的維度 (Rank)", - "Destination training directory": "訓練結果資料夾", - "Device": "裝置", - "DIM from weights": "從權重讀取 DIM", - "Directory containing the images to caption": "含有需標記的圖片資料夾", - "Directory containing the training images": "訓練的圖片資料夾", - "Directory where formatted training and regularisation folders will be placed": "訓練與正規化資料夾將會被取代", - "Disable CP decomposition": "停用 CP 分解法", - "Do not copy other files in the input folder to the output folder": "不要將輸入資料夾中的其他檔案,複製到輸出資料夾", - "Do not copy other files": "不複製其他檔案", - "Don't upscale bucket resolution": "不要放大分桶解析度", - "Dreambooth/LoRA Dataset balancing": "Dreambooth/LoRA 資料集平衡", - "Dreambooth/LoRA Folder preparation": "Dreambooth/LoRA 準備資料夾", - "Dropout caption every n epochs": "在每 N 個週期 (Epoch) 丟棄標記", - "DyLoRA model": "DyLoRA 模型", - "Dynamic method": "壓縮演算法", - "Dynamic parameter": "壓縮參數", - "e.g., \"by some artist\". Leave empty if you only want to add a prefix or postfix.": "例如,\"由某個藝術家創作\"。如果你只想加入前綴或後綴,請留空白。", - "e.g., \"by some artist\". Leave empty if you want to replace with nothing.": "例如,\"由某個藝術家創作\"。如果你想用空值取代,請留空白。", - "Enable buckets": "啟用資料桶", - "Enable for Hugging Face's stabilityai models": "啟用 HuggingFace 的 stabilityai 模型", - "Enter one sample prompt per line to generate multiple samples per cycle. Optional specifiers include: --w (width), --h (height), --d (seed), --l (cfg scale), --s (sampler steps) and --n (negative prompt). To modify sample prompts during training, edit the prompt.txt file in the samples directory.": "每行輸入一個提示詞來生成每個訓練週期的輸出範本。可以選擇指定的參數,包括:--w (寬度) ,--h (高度) ,--d (種子) ,--l (CFG 比例) ,--s (採樣器步驟) 和 --n (負面提示詞) 。如果要在訓練週期中修改提示詞,請修改範本目錄中的 prompt.txt 檔案。", - "Epoch": "週期 (Epoch)", - "Error": "錯誤", - "Example of the optimizer settings for Adafactor with the fixed learning rate:": "固定學習率 Adafactor 優化器的設定範例:", - "Extract DyLoRA": "提取 DyLoRA", - "Extract LoRA model": "提取 LoRA模型", - "Extract LoRA": "提取 LoRA", - "Extract LyCORIS LoCon": "提取 LyCORIS LoCon", - "Extract LyCORIS LoCON": "提取 LyCORIS LoCON", - "FileNotFoundError": "錯誤!檔案找不到", - "Find text": "尋找文字", - "Finetune": "微調", - "Finetuned model": "微調模型", - "Finetuning Resource Guide": "微調資源指南", - "fixed": "固定", - "Flip augmentation": "翻轉增強", - "float16": "float16", - "Folders": "資料夾", - "U-Net and Text Encoder can be trained with fp8 (experimental)": "U-Net 與 Text Encoder 可以使用 fp8 訓練 (實驗性功能)", - "fp8 base training (experimental)": "使用 fp8 基礎訓練 (實驗性功能)", - "Full bf16 training (experimental)": "完整使用 bf16 訓練 (實驗性功能)", - "Full fp16 training (experimental)": "完整使用 fp16 訓練 (實驗性功能)", - "Generate caption files for the grouped images based on their folder name": "根據圖片的資料夾名稱生成標記文字檔案", - "Generate caption metadata": "生成標記文字後設資料", - "Generate Captions": "生成標記文字", - "Generate image buckets metadata": "生成圖像分桶後設資料", - "GIT Captioning": "GIT 標記文字", - "Gradient accumulate steps": "梯度累加步數", - "Gradient checkpointing": "梯度檢查點", - "Group size": "群組大小", - "Guidelines for SDXL Finetuning": "SDXL 微調指南", - "Guides": "指南", - "How to Create a LoRA Part 1: Dataset Preparation:": "如何建立 LoRA 第 1 部份:資料集準備:", - "If unchecked, tensorboard will be used as the default for logging.": "如果不勾選,Tensorboard 將會使用預設的紀錄方式。", - "If you have valuable resources to add, kindly create a PR on Github.": "如果你有有價值的資源要增加,請在 Github 上建立一個 PR。", - "Ignore Imported Tags Above Word Count": "略過高於字數數量的標記標籤", - "Image folder to caption": "要加入標記的圖片資料夾", - "Image folder": "圖片資料夾", - "Include images in subfolders as well": "包含子資料夾中的圖片", - "Include Subfolders": "包含子資料夾", - "Init word": "初始化標記文字", - "Input folder": "輸入資料夾", - "Install Location": "安裝位置", - "Installation": "安裝", - "Instance prompt": "實例 (Instance) 提示詞", - "is SDXL": "SDXL 模型", - "is v2": "SD2 模型", - "Keep n tokens": "保留 N 個提示詞", - "Launching the GUI on Linux and macOS": "在 Linux/macOS 上啟動 GUI", - "Launching the GUI on Windows": "在 Windows 上啟動 GUI", - "Learning rate": "學習率", - "Load precision": "讀取精度", - "adafactor": "自適應學習 (adafactor)", - "linear": "線性 (linear)", - "Linux and macOS Upgrade": "Linux/macOS 升級", - "Linux and macOS": "Linux/macOS", - "Linux Pre-requirements": "Linux Pre-requirements", - "Load": "載入", - "Loading...": "載入中...", - "Load finetuned model to": "將微調模型讀取至", - "Load Stable Diffusion base model to": "將穩定擴散基礎模型讀取至", - "Local docker build": "Docker 建構", - "Logging folder": "記錄資料夾", - "LoRA model \"A\"": "LoRA 模型 \"A\"", - "LoRA model \"B\"": "LoRA 模型 \"B\"", - "LoRA model \"C\"": "LoRA 模型 \"C\"", - "LoRA model \"D\"": "LoRA 模型 \"D\"", - "LoRA model": "LoRA 模型", - "LoRA network weights": "LoRA 網路權重", - "LoRA": "LoRA", - "LR number of cycles": "學習率週期數", - "LR power": "學習率乘冪", - "LR scheduler extra arguments": "學習率調度器額外參數", - "LR Scheduler": "學習率調度器", - "LR warmup (% of total steps)": "學習率預熱 (總步數的百分比)", - "LyCORIS model": "LyCORIS 模型", - "Macos is not great at the moment.": "目前 MacOS 支援並不是很好。", - "Manual Captioning": "手動標記文字", - "Manual installation": "手動安裝", - "Max bucket resolution": "最大資料儲存桶解析度", - "Max length": "最大長度", - "Max num workers for DataLoader": "資料工作載入的最大工作數量", - "Number of processes": "進程數量", - "Number of machines": "機器數量", - "Multi GPU": "多個 GPU", - "GPU IDs": "GPU ID", - "example: 0,1": "例如:0,1", - "Max resolution": "最大解析度", - "Max Timestep": "最大時序步數", - "Max Token Length": "最大標記長度", - "Max train epoch": "最大訓練週期 (Epoch) 數", - "Max train steps": "最大訓練總步數", - "Maximum bucket resolution": "最大資料儲存桶解析度", - "Maximum size in pixel a bucket can be (>= 64)": "最大資料儲存桶解析度可達 (>= 64) ", - "Memory efficient attention": "高效記憶體注意力區塊處理", - "Merge LoRA (SVD)": "合併 LoRA (SVD) ", - "Merge LoRA": "合併 LoRA", - "Merge LyCORIS": "合併 LyCORIS", - "Merge model": "合併模型", - "Merge precision": "合併精度", - "Merge ratio model A": "模型 A 合併比例", - "Merge ratio model B": "模型 B 合併比例", - "Merge ratio model C": "模型 C 合併比例", - "Merge ratio model D": "模型 D 合併比例", - "Min bucket resolution": "最小資料儲存桶解析度", - "Min length": "最小長度", - "Min SNR gamma": "最小 SNR gamma", - "Min Timestep": "最小時序步數", - "Minimum bucket resolution": "最小資料儲存桶解析度", - "Minimum size in pixel a bucket can be (>= 64)": "最小資料儲存桶解析度可達 (>= 64) ", - "Mixed precision": "混合精度", - "Mnimum difference": "最小化差異", - "Mode": "模式", - "Model A merge ratio (eg: 0.5 mean 50%)": "模型 A 合併比率 (例如:0.5 指的是 50%) ", - "Model B merge ratio (eg: 0.5 mean 50%)": "模型 B 合併比率 (例如:0.5 指的是 50%) ", - "Model C merge ratio (eg: 0.5 mean 50%)": "模型 C 合併比率 (例如:0.5 指的是 50%) ", - "Model D merge ratio (eg: 0.5 mean 50%)": "模型 D 合併比率 (例如:0.5 指的是 50%) ", - "Model output folder": "模型輸出資料夾", - "Model output name": "模型輸出資料夾", - "Model Quick Pick": "快速選擇模型", - "Module dropout": "模型捨棄", - "Network Dimension (Rank)": "網路維度 (Rank)", - "Network Dimension": "網路維度", - "Network dropout": "網路捨棄", - "No module called tkinter": "沒有名稱為 tkinter 的模組", - "No token padding": "不做提示詞填充", - "Noise offset type": "噪聲偏移類型", - "Noise offset": "噪聲偏移", - "Number of beams": "beam 的數量", - "Number of CPU threads per core": "每個 CPU 核心的線程數", - "Number of images to group together": "要一起分組的圖片數量", - "Number of updates steps to accumulate before performing a backward/update pass": "執行反向/更新傳遞之前,需要累積的更新步驟數", - "Number of workers": "Worker 數量", - "object template": "物件樣版", - "Only for SD v2 models. By scaling the loss according to the time step, the weights of global noise prediction and local noise prediction become the same, and the improvement of details may be expected.": "僅適用於 SD v2 模型。通過根據時序步數的縮放損失,整體的噪聲預測與局部的噪聲預測的權重會變得相同,以此希望能改善細節。", - "only for SDXL": "僅適用於 SDXL", - "Open": "開啟", - "Optimizer extra arguments": "優化器額外參數", - "Optimizer": "優化器", - "Optional: CUDNN 8.6": "可選:CUDNN 8.6", - "Original": "原始", - "Output folder": "輸出資料夾", - "Output": "輸出", - "Overwrite existing captions in folder": "覆蓋資料夾中現有的提示詞", - "Page File Limit": "分頁檔案限制", - "PagedAdamW8bit": "PagedAdamW8bit", - "PagedLion8bit": "PagedLion8bit", - "Parameters": "參數", - "path for the checkpoint file to save...": "儲存 checkpoint 檔案路徑...", - "path for the LoRA file to save...": "儲存 LoRA 檔案路徑...", - "path for the new LoRA file to save...": "儲存新 LoRA 檔案路徑...", - "path to \"last-state\" state folder to resume from": "用來繼續訓練的 \"最後狀態\" 資料夾路徑", - "Path to the DyLoRA model to extract from": "要提取 DyLoRA 模型的路徑", - "Path to the finetuned model to extract": "要提取的微調模型的路徑", - "Path to the LoRA A model": "LoRA A 模型的路徑", - "Path to the LoRA B model": "LoRA B 模型的路徑", - "Path to the LoRA C model": "LoRA C 模型的路徑", - "Path to the LoRA D model": "LoRA D 模型的路徑", - "Path to the LoRA model to verify": "要驗證的 LoRA 模型的路徑", - "Path to the LoRA to resize": "要調整大小的 LoRA 的路徑", - "Path to the LyCORIS model": "LyCORIS 模型的路徑", - "path where to save the extracted LoRA model...": "儲存提取出的 LoRA 模型的路徑...", - "Persistent data loader": "持續資料載入器", - "polynomial": "多項式 (polynomial)", - "Postfix to add to BLIP caption": "添加到 BLIP 提示詞的後綴", - "Postfix to add to caption": "添加到提示詞的後綴", - "Pre-built Runpod template": "預先建構的 Runpod 樣版", - "Prefix to add to BLIP caption": "添加到 BLIP 提示詞的前綴", - "Prefix to add to caption": "添加到提示詞的前綴", - "Prepare training data": "準備訓練資料集", - "Print training command": "印出訓練指令", - "Prior loss weight": "正規化驗證損失權重", - "Prodigy": "Prodigy", - "Provide a SD file path IF you want to merge it with LoRA files": "如果你要合併 LoRA 檔案,請提供 SD 檔案資料夾路徑", - "Provide a SD file path that you want to merge with the LyCORIS file": "請提供你想要與 LyCORIS 檔案合併的 SD 檔案資料夾路徑", - "PyTorch 2 seems to use slightly less GPU memory than PyTorch 1.": "PyTorch 2 似乎使用的 GPU 記憶體比 PyTorch 1 略少。", - "Quick Tags": "快速標記", - "Random crop instead of center crop": "使用隨機裁切 (而非中心裁切)", - "Rank dropout": "維度捨棄", - "Rate of caption dropout": "提示詞捨棄比例", - "Recommended value of 0.5 when used": "若使用時,建議使用 0.5", - "Recommended value of 5 when used": "若使用時,建議使用 5", - "recommended values are 0.05 - 0.15": "若使用時,建議使用 0.05 - 0.15", - "Regularisation folder": "正規化資料夾", - "Regularisation images": "正規化圖片", - "Repeats": "重複", - "Replacement text": "取代文字", - "Required bitsandbytes >= 0.36.0": "需要 bitsandbytes >= 0.36.0", - "Resize LoRA": "調整 LoRA 尺寸", - "Resize model": "調整模型大小", - "Resolution (width,height)": "解析度 (寬度, 高度) ", - "Resource Contributions": "資源貢獻者", - "Resume from saved training state": "從儲存的狀態繼續訓練", - "Resume TI training": "恢復 TI 訓練", - "Runpod": "Runpod", - "Sample every n epochs": "每 N 個時期 (Epoch) 進行範本取樣", - "Sample every n steps": "每 N 個步數進行範本取樣", - "Sample image generation during training": "在訓練期間生成取樣圖片", - "Sample prompts": "取樣範本提示詞提示", - "Sample sampler": "取樣範本採樣器", - "Samples": "範本", - "Save dtype": "儲存數據類型", - "Save every N epochs": "每 N 個週期 (Epoch) 儲存", - "Save every N steps": "每 N 個步驟儲存", - "Save last N steps state": "儲存最後 N 個步驟的訓練狀態", - "Save last N steps": "儲存最後 N 個步驟", - "Save precision": "儲存精度", - "Save to": "儲存到", - "Save trained model as": "儲存訓練模型為", - "Save training state": "儲存訓練狀態", - "Save": "儲存", - "Scale v prediction loss": "縮放 v 預測損失 (v prediction loss)", - "Scale weight norms": "縮放權重標準", - "SD Model": "SD 模型", - "SDXL model": "SDXL 模型", - "Set the Max resolution to at least 1024x1024, as this is the standard resolution for SDXL. ": "最大解析度最少設定為 1024x1024,因為這是 SDXL 的標準解析度。", - "Set the Max resolution to at least 1024x1024, as this is the standard resolution for SDXL.": "最大解析度最少設定為 1024x1024,因為這是 SDXL 的標準解析度。", - "Setup": "設定", - "SGDNesterov": "SGDNesterov", - "SGDNesterov8bit": "SGDNesterov8bit", - "Shuffle caption": "打亂提示詞", - "Source LoRA": "來源 LoRA", - "Source model type": "來源模型類型", - "Source model": "來源模型", - "Sparsity": "稀疏性", - "Stable Diffusion base model": "穩定擴散基礎模型", - "Stable Diffusion original model: ckpt or safetensors file": "穩定擴散原始模型:ckpt 或 safetensors 檔案", - "Start tensorboard": "啟動 TensorBoard", - "Start training": "開始訓練", - "Starting GUI Service": "啟動 GUI 服務", - "Stop tensorboard": "停止 TensorBoard", - "Stop text encoder training (% of total steps)": "停止文字編碼器訓練(總步數的百分比)", - "Stop training": "停止訓練", - "style template": "風格樣版", - "sv_fro": "sv_fro", - "Target model folder": "目標模型資料夾", - "Target model name": "目標模型名稱", - "Target model precision": "目標模型精度", - "Target model type": "目標模型類型", - "Template": "樣版", - "Text Encoder learning rate": "文字編碼器學習率", - "SDXL Specific Parameters": "SDXL 特定參數", - "Cache the outputs of the text encoders. This option is useful to reduce the GPU memory usage. This option cannot be used with options for shuffling or dropping the captions.": "暫存文字編碼器的輸出。這個選項有助於減少 GPU 記憶體的使用。這個選項不能與打亂或丟棄提示詞的選項一起使用。", - "Cache text encoder outputs": "暫存文字編碼器輸出", - "Disable the half-precision (mixed-precision) VAE. VAE for SDXL seems to produce NaNs in some cases. This option is useful to avoid the NaNs.": "停用半精度 (混合精度) VAE。在某些情況下,SDXL 的 VAE 似乎會產生 NaN。這個選項有助於避免 NaN。", - "No half VAE": "不使用半精度 VAE", - "The fine-tuning can be done with 24GB GPU memory with the batch size of 1.": "微調可以再使用 1 個批次大小的情況下,在 24GB GPU 記憶體的狀態下完成。", - "The GUI allows you to set the training parameters and generate and run the required CLI commands to train the model.": "此 GUI 允許你設定訓練參數,並產生執行模型訓練所需要的 CLI 指令。", - "This guide is a resource compilation to facilitate the development of robust LoRA models.": "該指南是一個資源彙整,以促進強大LoRA模型的開發。", - "This section provide Dreambooth tools to help setup your dataset...": "這些選擇幫助設置自己的資料集", - "This section provide LoRA tools to help setup your dataset...": "本節提供 LoRA 工具以幫助您設置資料集...", - "This section provide Various Finetuning guides and information...": "本節提供各種微調指南和訊息", - "This utility allows quick captioning and tagging of images.": "此工具允許快速地為圖像添加標題和標籤。", - "This utility allows you to create simple caption files for each image in a folder.": "此工具允許您為資料夾中的每個圖片建立簡單的標籤文件。", - "This utility can be used to convert from one stable diffusion model format to another.": "該工具可用於將一個穩定擴散模型格式轉換為另一種格式", - "This utility can extract a DyLoRA network from a finetuned model.": "該工具可以從微調模型中提取 DyLoRA 網絡。", - "This utility can extract a LoRA network from a finetuned model.": "該工具可以從微調模型中提取 LoRA 網絡。", - "This utility can extract a LyCORIS LoCon network from a finetuned model.": "工具可以從微調模型中提取 LyCORIS LoCon 網絡。", - "This utility can merge a LyCORIS model into a SD checkpoint.": "該工具可以將 LyCORIS 模型合並到 SD 模型中。", - "This utility can merge two LoRA networks together into a new LoRA.": "該工具可以將兩個 LoRA 網絡合並為一個新的 LoRA。", - "This utility can merge up to 4 LoRA together or alternatively merge up to 4 LoRA into a SD checkpoint.": "該工具可以合並多達 4 個LoRA,或者選擇性地將多達 4 個 LoRA 合並到 SD 模型中。", - "This utility can resize a LoRA.": "該工具可以調整 LoRA 的大小。", - "This utility can verify a LoRA network to make sure it is properly trained.": "該工具可以驗證 LoRA 網絡以確保其得到適當的訓練。", - "This utility uses BLIP to caption files for each image in a folder.": "此工具使用 BLIP 為資料夾中的每張圖像添加標籤。", - "This utility will create the necessary folder structure for the training images and optional regularization images needed for the kohys_ss Dreambooth/LoRA method to function correctly.": "此工具將為 kohys_ss Dreambooth/LoRA 方法正常運行所需的訓練圖片和正規化圖片(可選)建立必要的資料夾結構。", - "This utility will ensure that each concept folder in the dataset folder is used equally during the training process of the dreambooth machine learning model, regardless of the number of images in each folder. It will do this by renaming the concept folders to indicate the number of times they should be repeated during training.": "此工具將確保在訓練 dreambooth 機器學習模型的過程中,資料集資料夾中的每個概念資料夾都將被平等地使用,無論每個資料夾中有多少圖像。它將通過重命名概念資料夾來指示在訓練期間應重覆使用它們的次數。", - "This utility will group images in a folder based on their aspect ratio.": "此工具將根據它們的縱橫比將文件夾中的圖像分組。", - "This utility will use GIT to caption files for each images in a folder.": "此工具使用 GIT 為資料夾中的每張圖像添加標籤。", - "This utility will use WD14 to caption files for each images in a folder.": "此工具使用 WD14 為資料夾中的每張圖像添加標籤。", - "Tips for SDXL training": "SDXL 訓練提示", - "Token string": "標記符號", - "Train a custom model using kohya finetune python code...": "使用 kohya finetune Python 程式訓練自定義模型", - "Train a custom model using kohya train network LoRA python code...": "使用 kohya LoRA Python 程式訓練自定義模型", - "Train batch size": "訓練批次大小", - "Train Network": "訓練網絡", - "Train text encoder": "訓練文字編碼器", - "Train U-Net only.": "僅訓練 U-Net", - "Training config folder": "訓練設定資料夾", - "Training Image folder": "訓練圖片資料夾", - "Training images": "訓練圖片", - "Training steps per concept per epoch": "每個週期每個概念的訓練步驟", - "Training": "訓練", - "Troubleshooting": "故障排除", - "Tutorials": "教學", - "Unet learning rate": "UNet 學習率", - "UNet linear projection": "UNet 線性投影", - "Upgrading": "升级", - "Use --cache_text_encoder_outputs option and caching latents.": "使用 --cache_text_encoder_outputs 選項來暫存潛空間。", - "Use Adafactor optimizer. RMSprop 8bit or Adagrad 8bit may work. AdamW 8bit doesn’t seem to work.": "使用 Adafactor 優化器。 RMSprop 8bit 或 Adagrad 8bit 可能有效。 AdamW 8bit 好像無法運作。", - "Use beam search": "使用 beam 搜尋", - "Use gradient checkpointing.": "使用梯度檢查點。", - "Use latent files": "使用潛空間檔案", - "Use sparse biais": "使用使用稀疏偏差", - "Use onnx": "使用 ONNX", - "Users can obtain and/or generate an api key in the their user settings on the website: https://wandb.ai/login": "使用者可以在以下網站的用戶設定中取得,或產生 API 金鑰:https://wandb.ai/login", - "V Pred like loss": "V 預測損失", - "Values greater than 0 will make the model more img2img focussed. 0 = image only": "大於 0 的數值會使模型更加聚焦在 img2img 上。0 表示僅關注於圖像生成", - "Values lower than 1000 will make the model more img2img focussed. 1000 = noise only": "小於 1000 的數值會使模型更加聚焦在 img2img 上。1000 表示僅使用噪聲生成圖片", - "Vectors": "向量", - "Verbose": "詳細輸出", - "WANDB API Key": "WANDB API 金鑰", - "WANDB Logging": "WANDB 紀錄", - "WARNING! The use of this utility on the wrong folder can lead to unexpected folder renaming!!!": "警告!在錯誤的資料夾使用此工具,可能會意外導致資料夾被重新命名!!!", - "WD14 Captioning": "WD14 提詞", - "Windows Upgrade": "Windows 升级", - "Train a custom model using kohya dreambooth python code...": "使用 kohya dreambooth Python 程式訓練自定義模型", - "Training comment": "訓練註解", - "Train a TI using kohya textual inversion python code...": "使用 kohya textual inversion Python 程式訓練 TI 模型" +{ + "WARNING! The use of this utility on the wrong folder can lead to unexpected folder renaming!!!": "警告!在錯誤的資料夾上使用此工具可能導致意外的資料夾重新命名!!!", + "(Experimental, Optional) Since the latent is close to a normal distribution, it may be a good idea to specify a value around 1/10 the noise offset.": " (選填,實驗性功能) 由於潛空間接近常態分布,或許指定一個噪聲偏移約 1/10 的數值是個不錯的作法。", + "(Name of the model to output)": "(要輸出的模型名稱)", + "(Optional) Add training comment to be included in metadata": "(選填) 在訓練的後設資料 (metadata) 加入註解。", + "(Optional) Enforce number of epoch": " (選填) 強制指定一個週期 (Epoch) 數量", + "(Optional) Enforce number of steps": " (選填) 強制指定一個總步數數量", + "(Optional) Extension for caption files. default: .caption": " (選填) 標記檔案的副檔名。預設:.caption", + "(Optional) For Cosine with restart and polynomial only": " (選填) 只適用於餘弦函數並使用重啟 (cosine_with_restart) 和多項式 (polynomial)", + "(Optional) Override number of epoch. Default: 8": " (選填) 覆蓋週期 (Epoch) 數量。預設:8", + "(Optional) Save only the specified number of models (old models will be deleted)": " (選填) 僅儲存指定數量的模型 (舊有模型將被刪除) ", + "(Optional) Save only the specified number of states (old models will be deleted)": " (選填) 僅儲存指定數量的訓練資料 (舊有訓練資料將被刪除) ", + "(Optional) Separate `undesired_tags` with comma `(,)` if you want to remove multiple tags, e.g. `1girl,solo,smile`.": " (選填) 如果要移除多個標籤,請使用逗號 `(,)` 分隔不需要的標籤,例如:`1girl,solo,smile`。", + "(Optional) The model is saved every specified steps": " (選填) 模型會在指定的間隔步數後儲存", + "(Optional) Use to provide additional parameters not handled by the GUI. Eg: --some_parameters \"value\"": " (選填) 用於提供 GUI 未提供的額外參數。例如:--some_parameters \"value\"", + "(Optional) eg: \"milestones=[1,10,30,50]\" \"gamma=0.1\"": " (選填) 例如: \"milestones=[1,10,30,50]\" \"gamma=0.1\"", + "(Optional) eg: 0,0,0,0,0,0,1,1,1,1,1,1": " (選填) 例如:0,0,0,0,0,0,1,1,1,1,1,1", + "(Optional) eg: 0.1": " (選填) 例如:0.1", + "(Optional) eg: 0.5": " (選填) 例如:0.5", + "(Optional) eg: 2,2,2,2,4,4,4,4,6,6,6,6,8,6,6,6,6,4,4,4,4,2,2,2,2": " (選填) 例如:2,2,2,2,4,4,4,4,6,6,6,6,8,6,6,6,6,4,4,4,4,2,2,2,2", + "(Optional) eg: relative_step=True scale_parameter=True warmup_init=True": " (選填) 例如:relative_step=True scale_parameter=True warmup_init=True", + "(Optional) eg:1234": " (選填) 例如:1234", + "(Optional) model id for GIT in Hugging Face": " (選填) Hugging Face 中 GIT 的模型 ID", + "(Optional)": "(選填)", + "< Prev": "< 上一個", + "A two-step approach utilizing tensor decomposition and fine-tuning to accelerate convolution layers in large neural networks, resulting in significant CPU speedups with minor accuracy drops.": "一種利用張量分解和微調的兩步方法,以加速大型神經網路中的卷積層,從而實現顯著的 CPU 加速和輕微的精度下降。", + "Adaptive noise scale": "自適應噪聲比例", + "Additional parameters": "額外參數", + "Adjust `general_threshold` for pruning tags (less tags, less flexible)": "調整一般閾值以修剪標籤 (標籤越少,越不靈活)", + "Adjusts the scale of the rank dropout to maintain the average dropout rate, ensuring more consistent regularization across different layers.": "調整維度 (Rank) 丟棄比例的比例,以保持平均丟棄率,確保在不同層之間更一致的正規化。", + "Advanced Configuration": "進階設定", + "Advanced options": "進階選項", + "Advanced parameters": "進階參數", + "Advanced": "進階", + "Appebnd TAGs": "附加標籤", + "Autosave": "自動儲存", + "Automates the processing of noise, allowing for faster model fitting, as well as balancing out color issues": "自動處理噪聲,可以更快地擬合模型,同時平衡顏色問題", + "Automatically determine the dim(rank) from the weight file.": "從權重檔案自動取用維度 DIM(Rank)。", + "BLIP Captioning": "BLIP 標記", + "Balance dataset": "平衡資料集", + "Basic Captioning": "基本標記", + "Batch size": "批次大小", + "Block LR (SDXL)": "區塊學習率", + "Block alphas": "區塊 Alphas", + "Block dims": "區塊維度", + "Blocks LR zero threshold": "區塊 LR 零閾值", + "Blocks": "區塊", + "Bucket resolution steps need to be greater than 0": "資料儲存桶解析度步數需要大於 0", + "Bucket resolution steps": "分桶解析度間隔", + "Cache latents to disk": "暫存潛空間資料到硬碟", + "Cache latents": "暫存潛空間資料", + "Cache text encoder outputs": "暫存文本編碼器輸出", + "Cache the outputs of the text encoders. This option is useful to reduce the GPU memory usage. This option cannot be used with options for shuffling or dropping the captions.": "暫存文本編碼器的輸出。此選項有助於減少 GPU 記憶體的使用。此選項不能與打亂或丟棄提示詞 (Shuffle/Dropout caption) 的選項一起使用。", + "Caption Extension": "標記檔案副檔名", + "Caption Separator": "標記文字分隔符號", + "Caption file extension (e.g., .txt)": "標記文字檔案副檔名 (例如:.txt)", + "Caption file extension": "標記檔案副檔名", + "Caption images": "標記圖片", + "Caption metadata filename": "標記文字後設資料檔案名稱", + "Caption text": "標記文字", + "Captioning": "標記文字", + "Captions": "標記文字", + "Character threshold": "角色閾值", + "Clamp Quantile": "夾取分位數 (Clamp Quantile)", + "Class prompt missing...": "缺少類別提示詞...", + "Class prompt": "類別提示詞", + "Clip skip": "Clip skip", + "Color augmentation": "色彩增強 (Color augmentation)", + "Configuration": "設定", + "Comma separated list of tags": "逗號分隔的標籤列表", + "Constrain OFT": "限制 OFT", + "Controls whether both input and output dimensions of the layer's weights are decomposed into smaller matrices for reparameterization.": "控制層權重的輸入和輸出維度是否被分解為更小的矩陣以進行重新參數化。", + "Conv Dimension (Rank)": "卷積維度 (Rank)", + "Conv Dimension": "卷積維度", + "Conv alphas": "卷積 Alphas", + "Conv dims": "卷積維度 (dims)", + "Conv quantile": "卷積分位數 (Conv quantile)", + "Conv ratio": "卷積比率 (Conv ratio)", + "Conv threshold": "卷積閾值 (Conv threshold)", + "Conv": "卷積", + "Convert to LCM": "轉換為 LCM", + "Convert model": "轉換模型", + "Convolution Alpha": "卷積 Alpha", + "Convolution Rank (Dimension)": "卷積維度 (Rank)", + "Copy info to Folders Tab": "複製資訊到資料夾區塊", + "CrossAttention": "交叉注意力", + "DANGER!!! -- Insecure folder renaming -- DANGER!!!": "危險!!! -- 不安全的資料夾重新命名 -- 危險!!!", + "DIM from weights": "從權重讀取 DIM", + "Dataset Preparation": "資料集準備", + "Dataset folder (folder containing the concepts folders to balance...)": "資料集資料夾 (含有要平衡的概念資料夾的資料夾路徑...)", + "Dataset repeats": "資料集重複數", + "Debiased Estimation loss": "偏差估算損失 (Debiased Estimation loss)", + "Debug while tagging, it will print your image file with general tags and character tags.": "標記時進行調試,它將打印您的圖片檔案與一般標籤和角色標籤。", + "Desired LoRA rank": "所需的 LoRA 維度 (Rank)", + "Destination training directory (where formatted training and regularisation folders will be placed)": "訓練的目標資料夾 (格式化的訓練和正規化資料夾將被放置的資料夾)", + "Device": "裝置", + "Disable CP decomposition": "禁用 CP 分解 (CP decomposition)", + "Disable the half-precision (mixed-precision) VAE. VAE for SDXL seems to produce NaNs in some cases. This option is useful to avoid the NaNs.": "禁用半精度 (混合精度) VAE。對於 SDXL,VAE 在某些情況下似乎會產生 NaN。此選項有助於避免 NaN。", + "Do not copy other files in the input folder to the output folder": "不複製輸入資料夾中的其他檔案到輸出資料夾", + "Do not copy other files": "不複製其他檔案", + "Don't upscale bucket resolution": "不要放大分桶解析度", + "Down LR weights": "Down LR 權重", + "Dreambooth/LoRA Dataset balancing": "Dreambooth/LoRA 資料集平衡", + "Dreambooth/LoRA Folder preparation": "Dreambooth/LoRA 資料夾準備", + "Dropout caption every n epochs": "在每 N 個週期 (Epoch) 丟棄標記", + "DyLoRA Unit / Block size": "DyLoRA 單元 / 區塊大小", + "DyLoRA model (path to the DyLoRA model to extract from)": "DyLoRA 模型 (要從中提取的 DyLoRA 模型的檔案路徑)", + "Dynamic method": "動態方法", + "Dynamic parameters": "動態參數", + "Efficiently decompose tensor shapes, resulting in a sequence of convolution layers with varying dimensions and Hadamard product implementation through multiplication of two distinct tensors.": "高效地分解張量形狀,從而產生一系列具有不同維度的卷積層,並通過兩個不同張量的乘法實現哈達瑪乘積。", + "Eg: asd": "例如:asd", + "Eg: person": "例如:person", + "Enable buckets": "啟用資料儲存桶", + "Enable multires noise (recommended values are 6-10)": "啟用多解析度噪聲 (建議使用 6-10)", + "Enter one sample prompt per line to generate multiple samples per cycle. Optional specifiers include: --w (width), --h (height), --d (seed), --l (cfg scale), --s (sampler steps) and --n (negative prompt). To modify sample prompts during training, edit the prompt.txt file in the samples directory.": "每行輸入一個提示詞來生成每個訓練週期的輸出範本。可以選擇指定的參數,包括:--w (寬度) ,--h (高度) ,--d (種子) ,--l (CFG 比例) ,--s (採樣器步驟) 和 --n (負面提示詞) 。如果要在訓練週期中修改提示詞,請修改範本目錄中的 prompt.txt 檔案。", + "Epoch": "週期 (Epoch)", + "Error": "錯誤", + "Extend LoRA to Conv2d 3x3 and specify the dim (rank) of each block. Specify 25 numbers.": "將 LoRA 擴展到 Conv2d 3x3,並指定每個區塊的維度 (Rank)。指定 25 個數字。", + "Extension for caption file (e.g., .caption, .txt)": "標記檔案的副檔名(例如: .caption, .txt)", + "Extract DyLoRA": "提取 DyLoRA", + "Extract LCM": "提取 LCM", + "Extract LoRA model": "提取 LoRA 模型", + "Extract LoRA": "提取 LoRA", + "Extract LyCORIS LoCon": "提取 LyCORIS LoCon", + "Find text": "尋找文字", + "Finetuned model (path to the finetuned model to extract)": "微調模型 (Finetuned model)", + "Flip augmentation": "翻轉增強 (Flip augmentation)", + "Folders": "資料夾", + "Force model re-download": "強制重新下載模型", + "Full bf16 training (experimental)": "完整使用 bf16 訓練 (實驗性功能)", + "Full bf16": "完整使用 bf16", + "Full fp16 training (experimental)": "完整使用 fp16 訓練 (實驗性功能)", + "GIT Captioning": "GIT 標記文字", + "GPU IDs": "GPU ID", + "General threshold": "一般閾值", + "Generate Captions": "生成標記文字", + "Generate caption files for the grouped images based on their folder name": "根據資料夾名稱為分組的圖片生成標記文字檔案", + "Generate caption metadata": "生成標記文字後設資料", + "Generate image buckets metadata": "生成圖像分桶後設資料", + "Go >": "前往 >", + "Goto page": "前往頁面", + "Gradient accumulate steps": "梯度累加步數 (Gradient accumulate steps)", + "Gradient checkpointing": "梯度檢查點 (Gradient checkpointing)", + "Group Images": "分組圖片", + "Group images": "分組圖片", + "Group size": "分組大小", + "Guides": "指南", + "If the weight is not more than this value, the LoRA module is not created. The default is 0.": "如果權重不超過此值,則不會創建 LoRA 模組。預設為 0。", + "If unchecked, tensorboard will be used as the default for logging.": "如果不勾選,Tensorboard 將會使用預設的紀錄方式。", + "Ignore Imported Tags Above Word Count": "忽略上面單詞計數的匯入標籤", + "Image folder (containing training images subfolders)": "圖片資料夾 (包含訓練圖片與子資料夾)", + "Image folder (containing training images)": "圖片資料夾 (含有訓練圖片)", + "Image folder is missing...": "圖片資料夾遺失...", + "Image folder to caption (containing the images to caption)": "要加入標記的圖片資料夾", + "Import": "匯入", + "Include Subfolders": "包含子資料夾", + "Include images in subfolders as well": "也包含子資料夾中的圖片", + "Input captions": "輸入標記文字", + "Input folder (containing the images to group)": "輸入資料夾 (含有要分組的圖片的資料夾路徑)", + "Input folder is missing...": "輸入資料夾遺失...", + "Instance prompt": "實例提示詞", + "Invalid base model file": "無效的基礎模型檔案", + "Invalid model A file": "無效的模型 A 檔案", + "Invalid model file": "無效的模型檔案", + "Is a normal probability dropout at the neuron level. In the case of LoRA, it is applied to the output of down. Recommended range 0.1 to 0.5": "是神經元級的正常概率捨棄。在 LoRA 的情況下,它被應用於 Down Sampler 的輸出。建議範圍 0.1 到 0.5", + "Keep n tokens": "保留 N 個提示詞", + "LR Scheduler": "學習率調度器 (LR Scheduler)", + "LR number of cycles": "學習率重啟週期數 (LR number of cycles)", + "LR power": "學習率乘冪 (LR power)", + "LR scheduler extra arguments": "學習率調度器額外參數", + "LR warmup (% of total steps)": "學習率預熱 (LR warmup, 總步數的 %)", + "Latent metadata filename": "潛空間後設資料檔案名稱", + "Learning rate TE": "文本編碼器學習率", + "Learning rate TE1": "文本編碼器 1 學習率", + "Learning rate TE2": "文本編碼器 2 學習率", + "Learning rate Unet": "U-Net 學習率", + "Learning rate": "學習率", + "Limits the norm of the oft_blocks, ensuring that their magnitude does not exceed a specified threshold, thus controlling the extent of the transformation applied.": "限制 oft_blocks 的規範,確保它們的大小不超過指定的閾值,從而控制應用的轉換程度。", + "Linear quantile": "線性分位數 (Linear quantile)", + "Linear ratio": "線性比率 (Linear ratio)", + "Linear threshold": "線性閾值 (Linear threshold)", + "LoKr decompose both": "LoKr 同時分解", + "LoKr factor": "LoKr 因子", + "LoRA model \"A\" (path to the LoRA A model)": "LoRA 模型 \"A\" (LoRA A 模型的檔案路徑)", + "LoRA model \"B\" (path to the LoRA B model)": "LoRA 模型 \"B\" (LoRA B 模型的檔案路徑)", + "LoRA model \"C\" (path to the LoRA C model)": "LoRA 模型 \"C\" (LoRA C 模型的檔案路徑)", + "LoRA model \"D\" (path to the LoRA D model)": "LoRA 模型 \"D\" (LoRA D 模型的檔案路徑)", + "LoRA model (path to the LoRA model to verify)": "LoRA 模型 (要驗證的 LoRA 模型的檔案路徑)", + "LoRA model types": "LoRA 模型類型", + "LoRA network weights": "LoRA 網路權重", + "Load": "載入", + "Load Stable Diffusion base model to": "載入穩定擴散基礎模型到", + "Load finetuned model to": "載入微調模型到", + "Load precision": "讀取精度", + "Load/Save Config file": "讀取/儲存設定檔案", + "Logging folder (Optional. to enable logging and output Tensorboard log)": "紀錄資料夾(選填,啟用紀錄和輸出 Tensorboard 紀錄)", + "LyCORIS model (path to the LyCORIS model)": "LyCORIS 模型 (LyCORIS 模型的檔案路徑)", + "Manual Captioning": "手動標記文字", + "Max Norm Regularization is a technique to stabilize network training by limiting the norm of network weights. It may be effective in suppressing overfitting of LoRA and improving stability when used with other LoRAs. See PR #545 on kohya_ss/sd_scripts repo for details. Recommended setting: 1. Higher is weaker, lower is stronger.": "最大規範正規化是一種穩定網路訓練的技術,通過限制網路權重的規範來實現。當與其他 LoRA 一起使用時,它可能會有效地抑制 LoRA 的過度擬合並提高穩定性。詳細資料請見 kohya_ss/sd_scripts Github 上的 PR#545。建議設置:1.0 越高越弱,越低越強。", + "Max Timestep": "最大時序步數", + "Max Token Length": "最大標記數量", + "Max bucket resolution": "最大資料儲存桶解析度", + "Max dataloader workers": "最大資料加載器工作數", + "Max grad norm": "最大梯度規範 (Max grad norm)", + "Max length": "最大長度", + "Max num workers for DataLoader": "資料工作載入的最大工作數量", + "Max resolution": "最大解析度", + "Max train epoch": "最大訓練週期 (Epoch) 數", + "Max train steps": "最大訓練總步數", + "Maximum bucket resolution": "最大資料儲存桶解析度", + "Maximum size in pixel a bucket can be (>= 64)": "最大資料儲存桶解析度可達 (>= 64) ", + "Memory efficient attention": "高效記憶體注意力區塊處理 (Memory efficient attention)", + "Merge LoRA (SVD)": "合併 LoRA (SVD)", + "Merge LoRA": "合併 LoRA", + "Merge LyCORIS": "合併 LyCORIS", + "Merge model": "合併模型", + "Merge precision": "合併精度", + "Merge ratio model A": "合併比例模型 A", + "Merge ratio model B": "合併比例模型 B", + "Merge ratio model C": "合併比例模型 C", + "Merge ratio model D": "合併比例模型 D", + "Mid LR weights": "Mid LR 權重", + "Min SNR gamma": "Min SNR gamma", + "Min Timestep": "最小時序步數", + "Min bucket resolution": "最小資料儲存桶解析度", + "Min length": "最小長度", + "Minimum bucket resolution": "最小資料儲存桶解析度", + "Minimum difference": "最小化差異 (Minimum difference)", + "Minimum size in pixel a bucket can be (>= 64)": "最小資料儲存桶解析度可達 (>= 64) ", + "Mixed precision": "混合精度", + "Mode": "模式", + "Model A merge ratio (eg: 0.5 mean 50%)": "模型 A 合併比例 (例如:0.5 表示 50%)", + "Model B merge ratio (eg: 0.5 mean 50%)": "模型 B 合併比例 (例如:0.5 表示 50%)", + "Model C merge ratio (eg: 0.5 mean 50%)": "模型 C 合併比例 (例如:0.5 表示 50%)", + "Model D merge ratio (eg: 0.5 mean 50%)": "模型 D 合併比例 (例如:0.5 表示 50%)", + "Model type": "模型類型", + "Model": "模型", + "Module dropout": "模型捨棄", + "Multi GPU": "多個 GPU", + "Multires noise iterations": "多解析度噪聲迭代", + "Name of the new LCM model": "新 LCM 模型的名稱", + "Network Alpha": "網路 Alpha", + "Network Dimension (Rank)": "網路維度 (Rank)", + "Network Rank (Dimension)": "網路維度 (Rank)", + "Network Dimension": "網路維度", + "Network dropout": "網路捨棄", + "New Conv Rank": "新卷積維度 (Conv Rank)", + "New Rank": "新維度 (Network Rank)", + "Next >": "下一個 >", + "No half VAE": "不使用半精度 VAE", + "No token padding": "不做提示詞填充 (No token padding)", + "No, get me out of here": "不,讓我離開這裡", + "Noise offset need to be a value between 0 and 1": "噪聲偏移需要是 0 到 1 之間的數值", + "Noise offset type": "噪聲偏移類型", + "Noise offset": "噪聲偏移", + "Number of CPU threads per core": "每個 CPU 核心的線程數", + "Number of beams": "beam 的數量", + "Number of images to group together": "要一起分組的圖片數量", + "Number of machines": "機器數量", + "Number of processes": "進程數量", + "Number of updates steps to accumulate before performing a backward/update pass": "執行反向/更新傳遞之前,需要累積的更新步驟數", + "Number of workers": "Worker 數量", + "Only for SD v2 models. By scaling the loss according to the time step, the weights of global noise prediction and local noise prediction become the same, and the improvement of details may be expected.": "僅適用於 SD v2 模型。通過根據時序步數的縮放損失,整體的噪聲預測與局部的噪聲預測的權重會變得相同,以此希望能改善細節。", + "Options": "選項", + "Optimizer extra arguments": "優化器額外參數", + "Optimizer": "優化器 (Optimizer)", + "Optional": "選填", + "Output \"stop text encoder training\" is not yet supported. Ignoring": "輸出「停止文本編碼器訓練」尚未支援。忽略", + "Output": "輸出", + "Output folder (where the grouped images will be stored)": "輸出資料夾 (存放分組的圖片)", + "Output folder to output trained model": "輸出資料夾以輸出訓練模型", + "Overwrite existing captions in folder": "覆蓋資料夾中現有的提示詞", + "Page Number": "頁碼", + "Parameters": "參數", + "Path to an existing LoRA network weights to resume training from": "現有 LoRA 檔案路徑,從現有 LoRA 中繼續訓練", + "Persistent data loader": "持續資料載入器 (Persistent data loader)", + "Please input learning rate values.": "請輸入學習率數值。", + "Please input valid Text Encoder learning rate (between 0 and 1)": "請輸入有效的文本編碼器學習率 (在 0 到 1 之間)", + "Please input valid Unet learning rate (between 0 and 1)": "請輸入有效的 U-Net 學習率 (在 0 到 1 之間)", + "Please provide an extension for the caption files.": "請為標記文字檔案提供一個副檔名。", + "Please provide an extension for the caption files...": "請為標記文字檔案提供一個副檔名...", + "Please provide an output folder...": "請提供一個輸出資料夾...", + "Postfix to add to BLIP caption": "要添加到 BLIP 標記文字的後綴", + "Postfix to add to GIT caption": "要加入到 GIT 標記文字的後綴", + "Postfix to add to WD14 caption": "要加入到 WD14 標記文字的後綴", + "Postfix to add to caption": "添加到提示詞的後綴", + "Prefix to add to BLIP caption": "要添加到 BLIP 標記文字的前綴", + "Prefix to add to GIT caption": "要加入到 GIT 標記文字的前綴", + "Prefix to add to WD14 caption": "要加入到 WD14 標記文字的前綴", + "Prefix to add to caption": "添加到提示詞的前綴", + "Prepare training data": "準備訓練資料", + "Presets": "預設範本", + "Print training command": "印出訓練命令", + "Prior loss weight": "正規化驗證損失權重 (Prior loss weight)", + "Provide a SD file path that you want to merge with the LyCORIS file": "提供您想要與 LyCORIS 檔案合併的 SD 檔案路徑", + "Pretrained model name or path": "預訓練模型名稱或路徑", + "Quick Tags": "快速標記", + "Random crop instead of center crop": "使用隨機裁切 (而非中心裁切)", + "Rank Dropout Scale": "維度 (Rank) 丟棄比例", + "Rank dropout": "維度捨棄", + "Rate of caption dropout": "提示詞捨棄比例", + "Recommended value of 0.5 when used": "若使用時,建議使用 0.5", + "Recommended value of 5 when used": "若使用時,建議使用 5", + "Recommended values are 0.05 - 0.15": "若使用時,建議使用 0.05 - 0.15", + "Recommended values are 0.8. For LoRAs with small datasets, 0.1-0.3": "建議使用 0.8。對於小數據集的 LoRA,建議使用 0.1-0.3", + "Recursive": "遞迴", + "Regularisation folder (Optional. containing reqularization images)": "正規化資料夾(選填,包含正規化圖片)", + "Regularisation images (Optional. directory containing the regularisation images)": "正規化圖片 (選填,含有正規化圖片的資料夾)", + "Regularisation images are used... Will double the number of steps required...": "使用了正規化圖片... 將使所需的步數加倍...", + "Repeats": "重複次數", + "Replace underscores in filenames with spaces": "將檔案名稱中的底線替換為空格", + "Replacement text": "取代文字", + "Required bitsandbytes >= 0.36.0": "需要 bitsandbytes >= 0.36.0", + "Rescaled OFT": "重新調整 OFT", + "Resize LoRA": "調整 LoRA 大小", + "Resize model": "調整模型", + "Resolution (width,height)": "解析度 (寬度, 高度) ", + "Resume from saved training state (path to \"last-state\" state folder)": "從儲存的狀態繼續訓練(最後一個儲存的狀態的資料夾路徑)", + "Resume TI training (Optional. Path to existing TI embedding file to keep training)": "繼續 TI 訓練(選填,現有 TI 嵌入檔案的路徑以繼續訓練)", + "Token string": "提示詞字串", + "Init word": "初始化單詞", + "Vectors": "向量", + "Template": "範本", + "SD Model (Optional Stable Diffusion base model)": "SD 模型 (選填,穩定擴散基礎模型)", + "SD Model (Optional. Stable Diffusion model path, if you want to merge it with LoRA files)": "SD 模型 (選填,穩定擴散模型路徑,如果您想將其與 LoRA 檔案合併)", + "SDXL model": "SDXL 模型", + "Sample every n epochs": "每 N 個週期 (Epoch) 取樣", + "Sample prompts": "取樣提示詞", + "Sample sampler": "取樣取樣器", + "Samples": "範本", + "Save dtype": "儲存 dtype", + "Save every N epochs": "每 N 個週期 (Epoch) 儲存", + "Save every N steps": "每 N 個步驟儲存", + "Save last N steps state": "儲存最後 N 個步驟的訓練狀態", + "Save last N steps": "儲存最後 N 個步驟", + "Save precision": "儲存精度", + "Save to (path for the LoRA file to save...)": "儲存到 (要儲存的 LoRA 檔案的路徑...)", + "Save to (path for the new LoRA file to save...)": "儲存到 (要儲存的新 LoRA 檔案的路徑...)", + "Save to (path for the checkpoint file to save...)": "儲存到 (要儲存的模型檔案的路徑...)", + "Save to (path for the file to save...)": "儲存到 (要儲存的檔案的路徑...)", + "Save to (path where to save the extracted LoRA model...)": "儲存到 (要儲存提取的 LoRA 模型的檔案路徑...)", + "Save trained model as": "儲存訓練模型類型為", + "Save training state": "儲存訓練狀態", + "Scale v prediction loss": "縮放 v 預測損失 (v prediction loss)", + "Scale weight norms": "縮放權重標準", + "Seed": "種子 (Seed)", + "Selects trainable layers in a network, but trains normalization layers identically across methods as they lack matrix decomposition.": "選擇網路中的可訓練層,但由於缺乏矩陣分解,因此在各種方法中都以相同方式訓練規範化層。", + "Set if we change the information going into the system (True) or the information coming out of it (False).": "設定為 True,若我們改變進入系統的資訊,否則由系統輸出則設定為 False。", + "Set to 0 to not train the Text Encoder 1": "設為 0 以不訓練文本編碼器 1", + "Set to 0 to not train the Text Encoder 2": "設為 0 以不訓練文本編碼器 2", + "Set to 0 to not train the Text Encoder": "設為 0 以不訓練文本編碼器", + "Set to 0 to not train the Unet": "設為 0 以不訓練 U-Net", + "Show frequency of tags for images.": "顯示圖片的標籤頻率。", + "Show tags frequency": "顯示標籤頻率", + "Shuffle caption": "打亂提示詞", + "Source LoRA (path to the LoRA to resize)": "來源 LoRA (要調整大小的 LoRA 的檔案路徑)", + "Source model (path to source model folder of file to convert...)": "來源模型 (要轉換的來源模型的檔案路徑...)", + "Source model type": "來源模型類型", + "Sparsity for sparse bias": "稀疏偏差的稀疏度", + "Sparsity": "稀疏度", + "Specify the alpha of each block when expanding LoRA to Conv2d 3x3. Specify 25 numbers. If omitted, the value of conv_alpha is used.": "將 LoRA 擴展到 Conv2d 3x3 時,指定每個區塊的 Alpha。指定 25 個數字。如果省略,則使用卷積 Alpha 的值。", + "Specify the alpha of each block. Specify 25 numbers as with block_dims. If omitted, the value of network_alpha is used.": "指定每個區塊的 Alpha。與區塊維度一樣,指定 25 個數字。如果省略,則使用網路 Alpha 的值。", + "Specify the different learning rates for each U-Net block. Specify 23 values separated by commas like 1e-3,1e-3 ... 1e-3": "為每個 U-Net 區塊指定不同的學習率。輸入 23 個以逗號分隔的數值,例如:1e-3,1e-3 ... 1e-3", + "Specify the dim (rank) of each block. Specify 25 numbers.": "指定每個區塊的維度 (Rank)。指定 25 個數字。", + "Specify the learning rate weight of the down blocks of U-Net.": "指定 U-Net 下區塊的學習率權重。", + "Specify the learning rate weight of the mid block of U-Net.": "指定 U-Net 中區塊的學習率權重。", + "Specify the learning rate weight of the up blocks of U-Net. The same as down_lr_weight.": "指定 U-Net 上區塊的學習率權重。與 down_lr_weight 相同。", + "Stable Diffusion base model (original model: ckpt or safetensors file)": "穩定擴散基礎模型 (basemodel: ckpt 或 safetensors 檔案)", + "Stable Diffusion model to convert to LCM": "要轉換為 LCM 的穩定擴散模型", + "Start training": "開始訓練", + "Start tensorboard": "開始 Tensorboard", + "Stop text encoder training (% of total steps)": "停止文本編碼器訓練(總步數的 %)", + "Stop training": "停止訓練", + "Stop tensorboard": "停止 Tensorboard", + "Strength of the LCM": "LCM 的強度", + "Tag subfolders images as well": "標記子資料夾中的圖片", + "Tags": "標籤", + "Target model folder (path to target model folder of file name to create...)": "目標模型 (要創建的目標模型的檔案路徑...)", + "Target model name": "目標模型名稱", + "Target model type": "目標模型類型", + "Target model precision": "目標模型精度", + "Enable for Hugging Face's stabilityai models": "啟用 Hugging Face 的 stabilityai 模型", + "UNet linear projection": "U-Net 線性投影", + "Tensorboard is already running. Terminating existing process before starting new one...": "Tensorboard 已經在運行。在啟動新進程之前終止現有進程...", + "Text Encoder learning rate": "文本編碼器學習率", + "The higher the value, the larger the file. Recommended starting value: 0.75": "數值越高,檔案越大。建議的起始數值:0.75", + "The higher the value, the smaller the file. Recommended starting value: 0.65": "數值越高,檔案越小。建議的起始數值:0.65", + "The higher the value, the smaller the file. Recommended starting value: 0.75": "數值越高,檔案越小。建議的起始數值:0.75", + "The provided DyLoRA model is not a file": "提供的 DyLoRA 模型不是檔案", + "The provided base model is not a file": "提供的基礎模型不是檔案", + "The provided finetuned model is not a file": "提供的微調模型不是檔案", + "The provided model A is not a file": "提供的模型 A 不是檔案", + "The provided model B is not a file": "提供的模型 B 不是檔案", + "The provided model C is not a file": "提供的模型 C 不是檔案", + "The provided model D is not a file": "提供的模型 D 不是檔案", + "The provided model is not a file": "提供的模型不是檔案", + "This option appends the tags to the existing tags, instead of replacing them.": "此選項將標籤附加到現有標籤,而不是替換它們。", + "This section provide Various Finetuning guides and information...": "此部分提供各種微調指南和資訊...", + "This section provide various LoRA tools...": "此部分提供各種 LoRA 工具...", + "This section provide Various LoRA guides and information...": "此部分提供各種 LoRA 指南和資訊...", + "This section provide Dreambooth tools to help setup your dataset...": "此部分提供 Dreambooth 工具,以幫助設置您的資料集...", + "This utility allows quick captioning and tagging of images.": "此工具允許快速標記圖片的標記文字和標籤。", + "This utility allows you to create simple caption files for each image in a folder.": "此工具允許您為資料夾中的每個圖片建立簡單的標籤文件。", + "This utility can extract a DyLoRA network from a finetuned model.": "此工具可以從一個微調模型中提取 DyLoRA 網路。", + "This utility can extract a LoRA network from a finetuned model.": "此工具可以從一個微調模型中提取 LoRA 網路。", + "This utility can extract a LyCORIS LoCon network from a finetuned model.": "此工具可以從一個微調模型中提取 LyCORIS LoCon 網路。", + "This utility can merge a LyCORIS model into a SD checkpoint.": "此工具可以將 LyCORIS 模型合併到一個 SD 模型。", + "This utility can merge two LoRA networks together into a new LoRA.": "此工具可以將兩個 LoRA 網路合併成一個新的 LoRA。", + "This utility can merge up to 4 LoRA together or alternatively merge up to 4 LoRA into a SD checkpoint.": "此工具可以將最多 4 個 LoRA 合併在一起,或者將最多 4 個 LoRA 合併到一個 SD 模型。", + "This utility can resize a LoRA.": "此工具可以調整 LoRA 的大小。", + "This utility can verify a LoRA network to make sure it is properly trained.": "此工具可以驗證 LoRA 網路,以確保它已經得到正確的訓練。", + "This utility convert a model to an LCM model.": "此工具將模型轉換為 LCM 模型。", + "This utility uses BLIP to caption files for each image in a folder.": "此工具使用 BLIP 為資料夾中的每張圖像添加標籤。", + "This utility will create the necessary folder structure for the training images and optional regularization images needed for the kohys_ss Dreambooth/LoRA method to function correctly.": "此工具將為訓練圖片和 kohys_ss Dreambooth/LoRA 方法正確運行所需的正規化圖片創建必要的資料夾結構。", + "This utility will ensure that each concept folder in the dataset folder is used equally during the training process of the dreambooth machine learning model, regardless of the number of images in each folder. It will do this by renaming the concept folders to indicate the number of times they should be repeated during training.": "此工具將確保資料集資料夾中的每個概念資料夾在訓練過程中被平等使用,而不管每個資料夾中的圖片數量。它將通過重新命名概念資料夾來指示它們在訓練期間應該重複的次數。", + "This utility will group images in a folder based on their aspect ratio.": "此工具將根據圖片的長寬比將資料夾中的圖片分組。", + "This utility will use GIT to caption files for each images in a folder.": "此工具將使用 GIT 為資料夾中的每個圖片檔案標記文字。", + "This utility will use WD14 to caption files for each images in a folder.": "此工具將使用 WD14 為資料夾中的每個圖片檔案標記文字。", + "Top p": "Top p", + "Train batch size": "訓練批次大小", + "Train Norm": "訓練規範 (Norm)", + "Train a custom model using kohya dreambooth python code...": "使用 kohya Dreambooth Python 程式訓練自定義模型", + "Train a custom model using kohya train network LoRA python code...": "使用 kohya LoRA Python 程式訓練自定義模型", + "Train a custom model using kohya finetune python code...": "使用 kohya 微調 Python 程式訓練自定義模型", + "Train a TI using kohya textual inversion python code...": "使用 kohya 文本反轉 Python 程式訓練 TI", + "Train an additional scalar in front of the weight difference, use a different weight initialization strategy.": "在權重差異前訓練一個額外的標量,使用不同的權重初始化策略。", + "Train config folder (Optional. where config files will be saved)": "訓練設定資料夾(選填,設定檔案將會被儲存的資料夾)", + "Train text encoder": "訓練文本編碼器", + "Trained Model output name": "訓練模型輸出名稱", + "Training comment": "訓練註解", + "Training images (directory containing the training images)": "訓練圖片 (含有訓練圖片的資料夾)", + "Training steps per concept per epoch": "每個週期 (Epoch) 每個概念的訓練步數", + "Training": "訓練", + "U-Net and Text Encoder can be trained with fp8 (experimental)": "U-Net 與 Text Encoder 使用 fp8 訓練 (實驗性功能)", + "Undesired tags": "不需要的標籤", + "Unet learning rate": "U-Net 學習率", + "Up LR weights": "Up LR 權重", + "Use CP decomposition": "使用 CP 分解 (CP decomposition)", + "Use Scalar": "使用標量", + "Use Tucker decomposition": "使用 Tucker 分解 (Tucker decomposition)", + "Use beam search": "使用 beam 搜尋", + "Use full path": "使用完整路徑", + "Use latent files": "使用潛空間檔案", + "Use onnx": "使用 ONNX", + "Use sparse biais": "使用稀疏偏差 (sparse biais)", + "Useful if you want to train with character": "如果您想要使用角色訓練,這是有用的", + "Useful to force model re download when switching to onnx": "在切換到 ONNX 時強制重新下載模型", + "Users can obtain and/or generate an api key in the their user settings on the website: https://wandb.ai/login": "使用者可以在以下網站的用戶設定中取得,或產生 API 金鑰:https://wandb.ai/login", + "V Pred like loss": "V 預測損失 (V Pred like loss)", + "VAE (Optional. path to checkpoint of vae to replace for training)": "VAE (選填,選擇要替換訓練的 VAE checkpoint 的檔案路徑)", + "VAE batch size": "VAE 批次大小", + "Value for the dynamic method selected.": "選擇的動態方法的數值。", + "Values greater than 0 will make the model more img2img focussed. 0 = image only": "大於 0 的數值會使模型更加聚焦在 img2img 上。0 表示僅關注於圖像生成", + "Values lower than 1000 will make the model more img2img focussed. 1000 = noise only": "小於 1000 的數值會使模型更加聚焦在 img2img 上。1000 表示僅使用噪聲生成圖片", + "Verbose logging": "詳細日誌", + "Verification error": "驗證錯誤", + "Verification output": "驗證輸出", + "Verify LoRA": "驗證 LoRA", + "Verify": "驗證", + "WANDB API Key": "WANDB API 金鑰", + "WANDB Logging": "WANDB 紀錄", + "WD14 Captioning": "WD14 標記文字", + "Weighted captions": "加權標記文字 (Weighted captions)", + "Weights": "權重", + "Yes, I like danger": "是的,我喜歡危險", + "alpha for LoRA weight scaling": "LoRA 權重縮放的 alpha", + "applies an additional scaling factor to the oft_blocks, allowing for further adjustment of their impact on the model's transformations.": "對 oft_blocks 應用額外的縮放因子,從而進一步調整它們對模型轉換的影響。", + "can specify `module_dropout` to dropout each rank with specified probability. Recommended range 0.1 to 0.3": "可以指定 `module_dropout` 以指定的概率捨棄每個維度。建議範圍 0.1 到 0.3", + "can specify `rank_dropout` to dropout each rank with specified probability. Recommended range 0.1 to 0.3": "可以指定 `rank_dropout` 以指定的概率捨棄每個維度。建議範圍 0.1 到 0.3", + "e.g., \"by some artist\". Leave empty if you only want to add a prefix or postfix.": "例如: \"by some artist\"。如果您只想添加前綴或後綴,請留空。", + "e.g., \"by some artist\". Leave empty if you want to replace with nothing.": "例如: \"by some artist\"。如果您想用空值取代,請留空。", + "eg: cat": "例如:cat", + "example: 0,1": "例如:0,1", + "fp8 base training (experimental)": "使用 fp8 基礎訓練 (實驗性功能)", + "iA3 train on input": "iA3 輸入訓練", + "is SDXL": "是 SDXL", + "is v2": "是 v2", + "network dim for linear layer in fixed mode": "固定模式中線性層的網路維度", + "network dim for conv layer in fixed mode": "固定模式中卷積層的網路維度", + "only for SDXL": "僅適用於 SDXL" }