diff --git a/.gitignore b/.gitignore index ccb515218..b77214960 100644 --- a/.gitignore +++ b/.gitignore @@ -19,3 +19,5 @@ test/output test/logs test/*.json test/ft +requirements_tmp_for_setup.txt +0.13.3 diff --git a/Dockerfile b/Dockerfile index a1377f7db..cfa0b5df9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -26,8 +26,8 @@ RUN python3 -m pip install wheel ## RUN python3 -m pip install -v -U git+https://github.com/facebookresearch/xformers.git@main#egg=xformers # Install requirements -COPY requirements_unix.txt setup.py ./ -RUN python3 -m pip install --use-pep517 -r requirements_unix.txt xformers +COPY requirements_linux.txt ./setup/setup.py ./ +RUN python3 -m pip install --use-pep517 -r requirements_linux.txt xformers # Replace pillow with pillow-simd RUN python3 -m pip uninstall -y pillow && \ diff --git a/README.md b/README.md index ef225b284..36f8d9ff2 100644 --- a/README.md +++ b/README.md @@ -71,7 +71,17 @@ These dependencies are taken care of via `setup.sh` in the installation section. ## Installation ### Runpod -Follow the instructions found in this discussion: https://github.com/bmaltais/kohya_ss/discussions/379 +- Select the pytorch 2.0.1 template +- ssh into the runpod + +``` +cd /workspace +git clone https://github.com/bmaltais/kohya_ss.git +git checkout dev2 +./setup.sh -p +``` + +Connect to the public URL displayed ### Docker Docker is supported on Windows and Linux distributions. However this method currently only supports Nvidia GPUs. @@ -102,7 +112,11 @@ If you run on Linux, there is an alternative docker container port with less lim venv support need to be pre-installed. Can be done on ubuntu 22.04 with `apt install python3.10-venv` -Make sure to use a version of python >= 3.10.6 and < 3.11.0 +Install the cudaNN drivers following the instructions from: `https://developer.nvidia.com/cuda-downloads?target_os=Linux&target_arch=x86_64` + +Use a version of python >= 3.10.6 and < 3.11.0 + +On WSL2, make sure to `export LD_LIBRARY_PATH=/usr/lib/wsl/lib/` or else AdamW8bit will not work. #### Setup @@ -170,11 +184,6 @@ cd kohya_ss .\setup.bat ``` -If this is a 1st install answer No when asked `Do you want to uninstall previous versions of torch and associated files before installing`. - - -Then configure accelerate with the same answers as in the MacOS instructions when prompted. - ### Optional: CUDNN 8.6 This step is optional but can improve the learning speed for NVIDIA 30X0/40X0 owners. It allows for larger training batch size and faster training speed. @@ -196,26 +205,20 @@ Once the commands have completed successfully you should be ready to use the new ## Upgrading The following commands will work from the root directory of the project if you'd prefer to not run scripts. -These commands will work on any OS. -```bash -git pull - -.\venv\Scripts\activate - -pip install --use-pep517 --upgrade -r requirements.txt -``` ### Windows Upgrade When a new release comes out, you can upgrade your repo with the following commands in the root directory: ```powershell -upgrade.bat +git pull +.\setup.bat ``` ### Linux and macOS Upgrade You can cd into the root directory and simply run ```bash +git pull # Refresh and update everything ./setup.sh @@ -279,13 +282,7 @@ You can find the train network solution specific here: [Train network README](tr ## LoRA -Training a LoRA currently uses the `train_network.py` code. You can create a LoRA network by using the all-in-one `gui.cmd` or by running the dedicated LoRA training GUI with: - -``` -.\venv\Scripts\activate - -python lora_gui.py -``` +Training a LoRA currently uses the `train_network.py` code. You can create a LoRA network by using the all-in-one gui. Once you have created the LoRA network, you can generate images via auto1111 by installing [this extension](https://github.com/kohya-ss/sd-webui-additional-networks). @@ -353,6 +350,13 @@ This will store a backup file with your current locally installed pip packages a ## Change History +* 2023/06/24 (v21.7.12) +- Significantly improved the setup process on all platforms +- Better support for runpod +* 2023/06/23 (v21.7.11) +- This is a significant update to how setup work across different platform. It might be causing issues... especially for linux env like runpod. If you encounter problems please report them in the issues so I can try to address them. You can revert to the previous release with `git checkout v21.7.10` + +The setup solution is now much more modulat and will simplify requirements support across different environments... hoping this will make it easier to run on different OS. * 2023/06/19 (v21.7.10) - Quick fix for linux GUI startup where it would try to install darwin requirements on top of linux. Ugly fix but work. Hopefulle some linux user will improve via a PR. * 2023/06/18 (v21.7.9) diff --git a/gui.bat b/gui.bat index 23768b525..43b7d926a 100644 --- a/gui.bat +++ b/gui.bat @@ -1,18 +1,26 @@ @echo off + :: Deactivate the virtual environment call .\venv\Scripts\deactivate.bat :: Calling external python program to check for local modules -python .\tools\check_local_modules.py --no_question +python .\setup\check_local_modules.py --no_question :: Activate the virtual environment call .\venv\Scripts\activate.bat set PATH=%PATH%;%~dp0venv\Lib\site-packages\torch\lib :: Validate requirements -python.exe .\tools\validate_requirements.py +python.exe .\setup\validate_requirements.py :: If the exit code is 0, run the kohya_gui.py script with the command-line arguments if %errorlevel% equ 0 ( - python.exe kohya_gui.py %* -) \ No newline at end of file + REM Check if the batch was started via double-click + IF /i "%comspec% /c %~0 " equ "%cmdcmdline:"=%" ( + REM echo This script was started by double clicking. + cmd /k python.exe kohya_gui.py %* + ) ELSE ( + REM echo This script was started from a command prompt. + python.exe kohya_gui.py %* + ) +) diff --git a/gui.ps1 b/gui.ps1 index 8044e9304..2947343db 100644 --- a/gui.ps1 +++ b/gui.ps1 @@ -29,10 +29,10 @@ if ($pipOutput) { $env:PATH += ";$($MyInvocation.MyCommand.Path)\venv\Lib\site-packages\torch\lib" # Debug info about system -# python.exe .\tools\debug_info.py +# python.exe .\setup\debug_info.py # Validate the requirements and store the exit code -python.exe .\tools\validate_requirements.py +python.exe .\setup\validate_requirements.py # If the exit code is 0, read arguments from gui_parameters.txt (if it exists) # and run the kohya_gui.py script with the command-line arguments diff --git a/gui.sh b/gui.sh index 03ed5b00b..a85f91eea 100755 --- a/gui.sh +++ b/gui.sh @@ -1,8 +1,24 @@ #!/usr/bin/env bash +# Checks to see if variable is set and non-empty. +# This is defined first, so we can use the function for some default variable values +env_var_exists() { + if [[ -n "${!1}" ]]; then + return 0 + else + return 1 + fi +} + +# Need RUNPOD to have a default value before first access +RUNPOD=false +if env_var_exists RUNPOD_POD_ID || env_var_exists RUNPOD_API_KEY; then + RUNPOD=true +fi + # If it is run with the sudo command, get the complete LD_LIBRARY_PATH environment variable of the system and assign it to the current environment, # because it will be used later. -if [ -n "$SUDO_USER" ] || [ -n "$SUDO_COMMAND" ] ; then +if [ -n "$SUDO_USER" ] || [ -n "$SUDO_COMMAND" ]; then echo "The sudo command resets the non-essential environment variables, we keep the LD_LIBRARY_PATH variable." export LD_LIBRARY_PATH=$(sudo -i printenv LD_LIBRARY_PATH) fi @@ -11,12 +27,42 @@ fi SCRIPT_DIR=$(cd -- "$(dirname -- "$0")" && pwd) # Step into GUI local directory -cd "$SCRIPT_DIR" +cd "$SCRIPT_DIR" || exit 1 + +if [ "$RUNPOD" = false ]; then + # Activate the virtual environment + source "$SCRIPT_DIR/venv/bin/activate" || exit 1 +fi + +# Check if LD_LIBRARY_PATH environment variable exists +if [[ -z "${LD_LIBRARY_PATH}" ]]; then + # Set the ANSI escape sequence for yellow text + YELLOW='\033[0;33m' + # Set the ANSI escape sequence to reset text color + RESET='\033[0m' + + echo -e "${YELLOW}Warning: LD_LIBRARY_PATH environment variable is not set.${RESET}" + echo -e "${YELLOW}Certain functionalities may not work correctly.${RESET}" + echo -e "${YELLOW}Please ensure that the required libraries are properly configured.${RESET}" + echo -e " " +fi -# Activate the virtual environment -source "$SCRIPT_DIR/venv/bin/activate" +# Determine the requirements file based on the system +if [[ "$OSTYPE" == "darwin"* ]]; then + if [[ "$(uname -m)" == "arm64" ]]; then + REQUIREMENTS_FILE="$SCRIPT_DIR/requirements_macos_arm64.txt" + else + REQUIREMENTS_FILE="$SCRIPT_DIR/requirements_macos_amd64.txt" + fi +else + if [ "$RUNPOD" = true ]; then + REQUIREMENTS_FILE="$SCRIPT_DIR/requirements_linux.txt" + else + REQUIREMENTS_FILE="$SCRIPT_DIR/requirements_runpod.txt" + fi +fi -# If the requirements are validated, run the kohya_gui.py script with the command-line arguments -if python "$SCRIPT_DIR"/tools/validate_requirements_unix.py -r "$SCRIPT_DIR"/requirements_unix.txt; then +# Validate the requirements and run the script if successful +if python "$SCRIPT_DIR/setup/validate_requirements.py" -r "$REQUIREMENTS_FILE"; then python "$SCRIPT_DIR/kohya_gui.py" "$@" fi diff --git a/library/convert_model_gui.py b/library/convert_model_gui.py index 2f3b94c1c..739a02ef7 100644 --- a/library/convert_model_gui.py +++ b/library/convert_model_gui.py @@ -174,6 +174,10 @@ def gradio_convert_model_tab(headless=False): gr.Markdown( 'This utility can be used to convert from one stable diffusion model format to another.' ) + + model_ext = gr.Textbox(value='*.safetensors *.ckpt', visible=False) + model_ext_name = gr.Textbox(value='Model types', visible=False) + with gr.Row(): source_model_input = gr.Textbox( label='Source model', @@ -198,7 +202,7 @@ def gradio_convert_model_tab(headless=False): ) button_source_model_file.click( get_file_path, - inputs=[source_model_input], + inputs=[source_model_input, model_ext, model_ext_name], outputs=source_model_input, show_progress=False, ) diff --git a/library/custom_logging.py b/library/custom_logging.py index f50aa1801..ee7e5e208 100644 --- a/library/custom_logging.py +++ b/library/custom_logging.py @@ -1,6 +1,7 @@ import os import logging import time +import sys from rich.theme import Theme from rich.logging import RichHandler @@ -23,7 +24,10 @@ def setup_logging(clean=False, debug=False): except: pass - logging.basicConfig(level=logging.DEBUG, format='%(asctime)s | %(levelname)s | %(pathname)s | %(message)s', filename='setup.log', filemode='a', encoding='utf-8', force=True) + if sys.version_info >= (3, 9): + logging.basicConfig(level=logging.DEBUG, format='%(asctime)s | %(levelname)s | %(pathname)s | %(message)s', filename='setup.log', filemode='a', encoding='utf-8', force=True) + else: + logging.basicConfig(level=logging.DEBUG, format='%(asctime)s | %(levelname)s | %(pathname)s | %(message)s', filename='setup.log', filemode='a', force=True) console = Console(log_time=True, log_time_format='%H:%M:%S-%f', theme=Theme({ "traceback.border": "black", diff --git a/lora_gui.py b/lora_gui.py index bccaf31fe..888433d91 100644 --- a/lora_gui.py +++ b/lora_gui.py @@ -1293,7 +1293,15 @@ def update_LoRA_settings(LoRA_type): 'decompose_both': ({'LyCORIS/LoKr'}, gr.Slider), 'train_on_input': ({'LyCORIS/iA3'}, gr.Slider), 'scale_weight_norms': ( - {'Kohya DyLoRA', 'Kohya LoCon'}, + { + 'LoCon', + 'Kohya DyLoRA', + 'Kohya LoCon', + 'LyCORIS/DyLoRA', + 'LyCORIS/LoHa', + 'LyCORIS/LoCon', + 'LyCORIS/LoKr', + }, gr.Slider, ), 'network_dropout': ( diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..83201610c --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,19 @@ +[build-system] +requires = ["poetry-core>=1.0.0"] +build-backend = "poetry.core.masonry.api" + +[tool.poetry] +name = "library" +version = "1.0.3" +description = "Libraries required to run kohya_ss GUI" +authors = ["Bernard Maltais "] +license = "Apache-2.0" # Apache Software License + +[[tool.poetry.source]] +name = "library" +path = "library" + +[tool.poetry.dependencies] +python = ">=3.9,<3.11" + +[tool.poetry.dev-dependencies] diff --git a/requirements_Ubuntu_20.04.txt b/requirements.txt similarity index 51% rename from requirements_Ubuntu_20.04.txt rename to requirements.txt index dd5fc461e..7dddb9253 100644 --- a/requirements_Ubuntu_20.04.txt +++ b/requirements.txt @@ -1,17 +1,13 @@ -accelerate==0.19.0 albumentations==1.3.0 altair==4.2.2 -bitsandbytes==0.35.0 dadaptation==3.1 diffusers[torch]==0.10.2 easygui==0.98.3 einops==0.6.0 fairscale==0.4.13 ftfy==6.1.1 -gradio==3.23.0; sys_platform == 'darwin' -gradio==3.32.0; sys_platform != 'darwin' -huggingface-hub==0.13.3; sys_platform == 'darwin' -huggingface-hub==0.13.3; sys_platform != 'darwin' +gradio==3.33.1 +huggingface-hub>=0.13.3 lion-pytorch==0.0.6 lycoris_lora==0.1.6 opencv-python==4.7.0.68 @@ -19,9 +15,6 @@ prodigyopt==1.0 pytorch-lightning==1.9.0 rich==13.4.1 safetensors==0.2.6 -tensorboard==2.10.1 ; sys_platform != 'darwin' -tensorboard==2.12.1 ; sys_platform == 'darwin' -tensorflow==2.10.1; sys_platform != 'darwin' timm==0.6.12 tk==0.1.0 toml==0.10.2 @@ -29,4 +22,4 @@ transformers==4.26.0 voluptuous==0.13.1 wandb==0.15.0 # for kohya_ss library -. +-e . # no_verify leave this to specify not checking this a verification stage diff --git a/requirements_linux.txt b/requirements_linux.txt new file mode 100644 index 000000000..9eec87e95 --- /dev/null +++ b/requirements_linux.txt @@ -0,0 +1,4 @@ +torch==2.0.1+cu118 torchvision==0.15.2+cu118 --extra-index-url https://download.pytorch.org/whl/cu118 # no_verify leave this to specify not checking this a verification stage +xformers==0.0.20 bitsandbytes==0.39.1 +accelerate==0.19.0 tensorboard==2.12.1 tensorflow==2.12.0 +-r requirements.txt diff --git a/requirements_macos_amd64.txt b/requirements_macos_amd64.txt new file mode 100644 index 000000000..6aff7e405 --- /dev/null +++ b/requirements_macos_amd64.txt @@ -0,0 +1,4 @@ +torch==2.0.0 torchvision==0.15.1 -f https://download.pytorch.org/whl/cpu/torch_stable.html +xformers bitsandbytes==0.35.0 +accelerate==0.19.0 tensorflow-macos tensorboard==2.12.1 +-r requirements.txt diff --git a/requirements_macos_arm64.txt b/requirements_macos_arm64.txt new file mode 100644 index 000000000..d1bc2b3a3 --- /dev/null +++ b/requirements_macos_arm64.txt @@ -0,0 +1,4 @@ +torch==2.0.0 torchvision==0.15.1 -f https://download.pytorch.org/whl/cpu/torch_stable.html +xformers bitsandbytes==0.35.0 +accelerate==0.19.0 tensorflow-metal tensorboard==2.12.1 +-r requirements.txt diff --git a/requirements_runpod.txt b/requirements_runpod.txt new file mode 100644 index 000000000..5d333484f --- /dev/null +++ b/requirements_runpod.txt @@ -0,0 +1,5 @@ +torch==2.0.1+cu117 torchvision==0.15.2+cu117 --extra-index-url https://download.pytorch.org/whl/cu117 # no_verify leave this to specify not checking this a verification stage +xformers==0.0.20 bitsandbytes==0.39.1 +accelerate==0.19.0 tensorboard==2.12.1 tensorflow==2.12.0 +tensorrt +-r requirements.txt diff --git a/requirements_unix.txt b/requirements_unix.txt deleted file mode 100644 index fd10892ab..000000000 --- a/requirements_unix.txt +++ /dev/null @@ -1,31 +0,0 @@ -accelerate==0.15.0 -albumentations==1.3.0 -altair==4.2.2 -bitsandbytes==0.35.0 -dadaptation==3.1 -diffusers[torch]==0.10.2 -easygui==0.98.3 -einops==0.6.0 -fairscale==0.4.13 -ftfy==6.1.1 -gradio==3.23.0; sys_platform == 'darwin' -gradio==3.32.0; sys_platform != 'darwin' -huggingface-hub==0.13.0; sys_platform == 'darwin' -huggingface-hub==0.13.3; sys_platform != 'darwin' -lion-pytorch==0.0.6 -lycoris_lora==0.1.6 -opencv-python==4.7.0.68 -pytorch-lightning==1.9.0 -rich==13.4.1 -safetensors==0.2.6 -tensorboard==2.10.1 ; sys_platform != 'darwin' -tensorboard==2.12.1 ; sys_platform == 'darwin' -tensorflow==2.10.1; sys_platform != 'darwin' -timm==0.6.12 -tk==0.1.0 -toml==0.10.2 -transformers==4.26.0 -voluptuous==0.13.1 -wandb==0.15.0 -# for kohya_ss library -. \ No newline at end of file diff --git a/requirements_windows_torch1.txt b/requirements_windows_torch1.txt index 2c38150c1..68c6de1cb 100644 --- a/requirements_windows_torch1.txt +++ b/requirements_windows_torch1.txt @@ -1,29 +1,32 @@ -accelerate==0.15.0 -albumentations==1.3.0 -altair==4.2.2 -bitsandbytes==0.35.0 -dadaptation==3.1 -diffusers[torch]==0.10.2 -easygui==0.98.3 -einops==0.6.0 -fairscale==0.4.13 -ftfy==6.1.1 -gradio==3.32.0 -huggingface-hub==0.13.3 -lion-pytorch==0.0.6 -lycoris_lora==0.1.6 -opencv-python==4.7.0.68 -prodigyopt==1.0 -pytorch-lightning==1.9.0 -rich==13.4.1 -safetensors==0.2.6 -tensorboard==2.10.1 -tensorflow==2.10.1 -timm==0.6.12 -tk==0.1.0 -toml==0.10.2 -transformers==4.26.0 -voluptuous==0.13.1 -wandb==0.15.0 -# for kohya_ss library -. +torch==1.12.1+cu116 torchvision==0.13.1+cu116 --index-url https://download.pytorch.org/whl/cu116 # no_verify +https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl -U -I --no-deps # no_verify +bitsandbytes-windows +accelerate==0.15.0 tensorboard==2.10.1 tensorflow==2.10.1 +-r requirements.txt + +# albumentations==1.3.0 +# altair==4.2.2 +# bitsandbytes==0.35.0 +# dadaptation==3.1 +# diffusers[torch]==0.10.2 +# easygui==0.98.3 +# einops==0.6.0 +# fairscale==0.4.13 +# ftfy==6.1.1 +# gradio==3.32.0 +# huggingface-hub==0.13.3 +# lion-pytorch==0.0.6 +# lycoris_lora==0.1.6 +# opencv-python==4.7.0.68 +# prodigyopt==1.0 +# pytorch-lightning==1.9.0 +# rich==13.4.1 +# safetensors==0.2.6 +# timm==0.6.12 +# tk==0.1.0 +# toml==0.10.2 +# transformers==4.26.0 +# voluptuous==0.13.1 +# wandb==0.15.0 +# # for kohya_ss library +# . diff --git a/requirements_windows_torch2.txt b/requirements_windows_torch2.txt index 1ae250593..58d9a8a77 100644 --- a/requirements_windows_torch2.txt +++ b/requirements_windows_torch2.txt @@ -1,30 +1,32 @@ -accelerate==0.19.0 -albumentations==1.3.0 -altair==4.2.2 -bitsandbytes==0.35.0 -dadaptation==3.1 -diffusers[torch]==0.10.2 -easygui==0.98.3 -einops==0.6.0 -fairscale==0.4.13 -ftfy==6.1.1 -gradio==3.33.1 -huggingface-hub==0.15.1 -lion-pytorch==0.0.6 -lycoris_lora==0.1.6 -opencv-python==4.7.0.68 -prodigyopt==1.0 -pytorch-lightning==1.9.0 -rich==13.4.1 -safetensors==0.2.6 -tensorboard==2.12.3 -tensorflow==2.12.0 -timm==0.6.12 -tk==0.1.0 -toml==0.10.2 -transformers==4.26.0 -voluptuous==0.13.1 -wandb==0.15.0 -xformers==0.0.20 -# for kohya_ss library -. +torch==2.0.1+cu118 torchvision==0.15.2+cu118 --index-url https://download.pytorch.org/whl/cu118 # no_verify +xformers==0.0.20 bitsandbytes-windows +accelerate==0.19.0 tensorboard==2.12.3 tensorflow==2.12.0 +-r requirements.txt + +# albumentations==1.3.0 +# altair==4.2.2 +# bitsandbytes==0.35.0 +# dadaptation==3.1 +# diffusers[torch]==0.10.2 +# easygui==0.98.3 +# einops==0.6.0 +# fairscale==0.4.13 +# ftfy==6.1.1 +# gradio==3.33.1 +# huggingface-hub==0.15.1 +# lion-pytorch==0.0.6 +# lycoris_lora==0.1.6 +# opencv-python==4.7.0.68 +# prodigyopt==1.0 +# pytorch-lightning==1.9.0 +# rich==13.4.1 +# safetensors==0.2.6 +# timm==0.6.12 +# tk==0.1.0 +# toml==0.10.2 +# transformers==4.26.0 +# voluptuous==0.13.1 +# wandb==0.15.0 +# xformers==0.0.20 +# # for kohya_ss library +# . diff --git a/setup.bat b/setup.bat index 95d069e59..dc68cbd34 100644 --- a/setup.bat +++ b/setup.bat @@ -20,11 +20,18 @@ mkdir ".\logs\setup" > nul 2>&1 call .\venv\Scripts\deactivate.bat :: Calling external python program to check for local modules -python .\tools\check_local_modules.py +python .\setup\check_local_modules.py call .\venv\Scripts\activate.bat -python .\tools\setup_windows.py +REM Check if the batch was started via double-click +IF /i "%comspec% /c %~0 " equ "%cmdcmdline:"=%" ( + REM echo This script was started by double clicking. + cmd /k python .\setup\setup_windows.py +) ELSE ( + REM echo This script was started from a command prompt. + python .\setup\setup_windows.py +) :: Deactivate the virtual environment call .\venv\Scripts\deactivate.bat \ No newline at end of file diff --git a/setup.ps1 b/setup.ps1 index 71c5d9245..df5f92e68 100644 --- a/setup.ps1 +++ b/setup.ps1 @@ -17,11 +17,11 @@ $null = New-Item -ItemType Directory -Force -Path ".\logs\setup" & .\venv\Scripts\deactivate.bat # Calling external python program to check for local modules -& .\venv\Scripts\python.exe .\tools\check_local_modules.py +& .\venv\Scripts\python.exe .\setup\check_local_modules.py & .\venv\Scripts\activate.bat -& .\venv\Scripts\python.exe .\tools\setup_windows.py +& .\venv\Scripts\python.exe .\setup\setup_windows.py # Deactivate the virtual environment & .\venv\Scripts\deactivate.bat diff --git a/setup.py b/setup.py deleted file mode 100644 index 8aa7c075d..000000000 --- a/setup.py +++ /dev/null @@ -1,10 +0,0 @@ -from setuptools import setup, find_packages -import subprocess -import os -import sys - -# Call the create_user_files.py script -script_path = os.path.join("tools", "create_user_files.py") -subprocess.run([sys.executable, script_path]) - -setup(name="library", version="1.0.3", packages=find_packages()) diff --git a/setup.sh b/setup.sh index 7f1a850c8..e8bd61117 100755 --- a/setup.sh +++ b/setup.sh @@ -3,8 +3,6 @@ # This file will be the host environment setup file for all operating systems other than base Windows. # Set the required package versions here. -# They will be appended to the requirements_unix.txt file in the installation directory. -TENSORFLOW_VERSION="2.12.0" TENSORFLOW_MACOS_VERSION="2.12.0" TENSORFLOW_METAL_VERSION="0.8.0" @@ -58,7 +56,7 @@ SCRIPT_DIR="$(cd -- $(dirname -- "$0") && pwd)" # Variables defined before the getopts loop, so we have sane default values. # Default installation locations based on OS and environment -if [[ "$OSTYPE" == "linux-gnu"* ]]; then +if [[ "$OSTYPE" == "lin"* ]]; then if [ "$RUNPOD" = true ]; then DIR="/workspace/kohya_ss" elif [ -d "$SCRIPT_DIR/.git" ]; then @@ -90,7 +88,7 @@ GIT_REPO="https://github.com/bmaltais/kohya_ss.git" INTERACTIVE=false PUBLIC=false SKIP_SPACE_CHECK=false -SKIP_GIT_UPDATE=false +SKIP_GIT_UPDATE=true SKIP_GUI=false while getopts ":vb:d:g:inprus-:" opt; do @@ -100,6 +98,7 @@ while getopts ":vb:d:g:inprus-:" opt; do OPTARG="${OPTARG#$opt}" # extract long option argument (may be empty) OPTARG="${OPTARG#=}" # if long option argument, remove assigning `=` fi + case $opt in b | branch) BRANCH="$OPTARG" ;; d | dir) DIR="$OPTARG" ;; @@ -194,28 +193,34 @@ size_available() { # The expected usage is create_symlinks symlink target_file create_symlinks() { + local symlink="$1" + local target_file="$2" + echo "Checking symlinks now." - # Next line checks for valid symlink - if [ -L "$1" ]; then + + # Check if the symlink exists + if [ -L "$symlink" ]; then # Check if the linked file exists and points to the expected file - if [ -e "$1" ] && [ "$(readlink "$1")" == "$2" ]; then - echo "$(basename "$1") symlink looks fine. Skipping." + if [ -e "$symlink" ] && [ "$(readlink "$symlink")" == "$target_file" ]; then + echo "$(basename "$symlink") symlink looks fine. Skipping." else - if [ -f "$2" ]; then - echo "Broken symlink detected. Recreating $(basename "$1")." - rm "$1" && - ln -s "$2" "$1" + if [ -f "$target_file" ]; then + echo "Broken symlink detected. Recreating $(basename "$symlink")." + rm "$symlink" && ln -s "$target_file" "$symlink" else - echo "$2 does not exist. Nothing to link." + echo "$target_file does not exist. Nothing to link." fi fi else - echo "Linking $(basename "$1")." - ln -s "$2" "$1" + echo "Linking $(basename "$symlink")." + ln -s "$target_file" "$symlink" fi } + install_python_dependencies() { + local TEMP_REQUIREMENTS_FILE + # Switch to local virtual env echo "Switching to virtual Python environment." if ! inDocker; then @@ -234,63 +239,51 @@ install_python_dependencies() { fi # Updating pip if there is one - echo "Checking for pip updates before Python operations." - pip install --upgrade pip >&3 + # echo "Checking for pip updates before Python operations." + # pip install --upgrade pip + + # echo "Installing python dependencies. This could take a few minutes as it downloads files." + # echo "If this operation ever runs too long, you can rerun this script in verbose mode to check." - echo "Installing python dependencies. This could take a few minutes as it downloads files." - echo "If this operation ever runs too long, you can rerun this script in verbose mode to check." case "$OSTYPE" in - "linux-gnu"*) pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 \ - --extra-index-url https://download.pytorch.org/whl/cu118 >&3 && - pip install -U -I xformers==0.0.20 >&3 ;; - "darwin"*) pip install torch==2.0.0 torchvision==0.15.1 \ - -f https://download.pytorch.org/whl/cpu/torch_stable.html >&3 ;; - "cygwin") - : - ;; - "msys") - : - ;; + "lin"*) + if [ "$RUNPOD" = true ]; then + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_runpod.txt + else + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_linux.txt + fi + ;; + "darwin"*) + if [[ "$(uname -m)" == "arm64" ]]; then + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_macos_arm64.txt + else + python "$SCRIPT_DIR/setup/setup_linux.py" --platform-requirements-file=requirements_macos_amd64.txt + fi + ;; esac - if [ "$RUNPOD" = true ]; then - echo "Installing tenssort." - pip install tensorrt >&3 - fi - # DEBUG ONLY (Update this version number to whatever PyCharm recommends) # pip install pydevd-pycharm~=223.8836.43 - #This will copy our requirements_unix.txt file out and make the khoya_ss lib a dynamic location then cleanup. - local TEMP_REQUIREMENTS_FILE="$DIR/requirements_tmp_for_setup.txt" - echo "Copying $DIR/requirements_unix.txt to $TEMP_REQUIREMENTS_FILE" >&3 - echo "Replacing the . for lib to our DIR variable in $TEMP_REQUIREMENTS_FILE." >&3 - awk -v dir="$DIR" '/#.*kohya_ss.*library/{print; getline; sub(/^\.$/, dir)}1' "$DIR/requirements_unix.txt" >"$TEMP_REQUIREMENTS_FILE" - - # This will check if macOS is running then determine if M1+ or Intel CPU. - # It will append the appropriate packages to the requirements_unix.txt file. - # Other OSs won't be affected and the version variables are at the top of this file. - if [[ "$(uname)" == "Darwin" ]]; then - # Check if the processor is Apple Silicon (arm64) - if [[ "$(uname -m)" == "arm64" ]]; then - echo "tensorflow-macos==$TENSORFLOW_MACOS_VERSION" >>"$TEMP_REQUIREMENTS_FILE" - echo "tensorflow-metal==$TENSORFLOW_METAL_VERSION" >>"$TEMP_REQUIREMENTS_FILE" - # Check if the processor is Intel (x86_64) - elif [[ "$(uname -m)" == "x86_64" ]]; then - echo "tensorflow==$TENSORFLOW_VERSION" >>"$TEMP_REQUIREMENTS_FILE" - fi - fi - - if [ $VERBOSITY == 2 ]; then - python -m pip install --quiet --use-pep517 --upgrade -r "$TEMP_REQUIREMENTS_FILE" >&3 - else - python -m pip install --use-pep517 --upgrade -r "$TEMP_REQUIREMENTS_FILE" >&3 - fi - - echo "Removing the temp requirements file." - if [ -f "$TEMP_REQUIREMENTS_FILE" ]; then - rm -f "$TEMP_REQUIREMENTS_FILE" - fi + # Create a temporary requirements file + # TEMP_REQUIREMENTS_FILE=$(mktemp) + + # if [[ "$OSTYPE" == "darwin"* ]]; then + # echo "Copying $DIR/requirements_macos.txt to $TEMP_REQUIREMENTS_FILE" >&3 + # echo "Replacing the . for lib to our DIR variable in $TEMP_REQUIREMENTS_FILE." >&3 + # awk -v dir="$DIR" '/#.*kohya_ss.*library/{print; getline; sub(/^\.$/, dir)}1' "$DIR/requirements_macos.txt" >"$TEMP_REQUIREMENTS_FILE" + # else + # echo "Copying $DIR/requirements_linux.txt to $TEMP_REQUIREMENTS_FILE" >&3 + # echo "Replacing the . for lib to our DIR variable in $TEMP_REQUIREMENTS_FILE." >&3 + # awk -v dir="$DIR" '/#.*kohya_ss.*library/{print; getline; sub(/^\.$/, dir)}1' "$DIR/requirements_linux.txt" >"$TEMP_REQUIREMENTS_FILE" + # fi + + # # Install the Python dependencies from the temporary requirements file + # if [ $VERBOSITY == 2 ]; then + # python -m pip install --quiet --upgrade -r "$TEMP_REQUIREMENTS_FILE" + # else + # python -m pip install --upgrade -r "$TEMP_REQUIREMENTS_FILE" + # fi if [ -n "$VIRTUAL_ENV" ] && ! inDocker; then if command -v deactivate >/dev/null; then @@ -302,6 +295,7 @@ install_python_dependencies() { fi } + # Attempt to non-interactively install a default accelerate config file unless specified otherwise. # Documentation for order of precedence locations for configuration file for automated installation: # https://huggingface.co/docs/accelerate/basic_tutorials/launch#custom-configurations @@ -414,7 +408,7 @@ update_kohya_ss() { } # Start OS-specific detection and work -if [[ "$OSTYPE" == "linux-gnu"* ]]; then +if [[ "$OSTYPE" == "lin"* ]]; then # Check if root or sudo root=false if [ "$EUID" = 0 ]; then @@ -482,48 +476,66 @@ if [[ "$OSTYPE" == "linux-gnu"* ]]; then echo "Raw detected distro string: $distro" >&4 echo "Raw detected distro family string: $family" >&4 - echo "Installing Python TK if not found on the system." if "$distro" | grep -qi "Ubuntu" || "$family" | grep -qi "Ubuntu"; then echo "Ubuntu detected." if [ $(dpkg-query -W -f='${Status}' python3-tk 2>/dev/null | grep -c "ok installed") = 0 ]; then - if [ "$root" = true ]; then - apt update -y >&3 && apt install -y python3-tk >&3 - else - echo "This script needs to be run as root or via sudo to install packages." + # if [ "$root" = true ]; then + echo "This script needs YOU to install the missing python3-tk packages. Please install with:" + echo " " + if [ "$RUNPOD" = true ]; then + bash apt update -y && apt install -y python3-tk + else + echo "sudo apt update -y && sudo apt install -y python3-tk" + fi exit 1 - fi + # else + # echo "This script needs to be run as root or via sudo to install packages." + # exit 1 + # fi else - echo "Python TK found! Skipping install!" + echo "Python TK found..." fi elif "$distro" | grep -Eqi "Fedora|CentOS|Redhat"; then echo "Redhat or Redhat base detected." if ! rpm -qa | grep -qi python3-tkinter; then - if [ "$root" = true ]; then - dnf install python3-tkinter -y >&3 - else - echo "This script needs to be run as root or via sudo to install packages." + # if [ "$root" = true ]; then + echo "This script needs you to install the missing python3-tk packages. Please install with:\n\n" + echo "sudo dnf install python3-tkinter -y >&3" exit 1 - fi + # else + # echo "This script needs to be run as root or via sudo to install packages." + # exit 1 + # fi + else + echo "Python TK found..." fi elif "$distro" | grep -Eqi "arch" || "$family" | grep -qi "arch"; then echo "Arch Linux or Arch base detected." if ! pacman -Qi tk >/dev/null; then - if [ "$root" = true ]; then - pacman --noconfirm -S tk >&3 - else - echo "This script needs to be run as root or via sudo to install packages." + # if [ "$root" = true ]; then + echo "This script needs you to install the missing python3-tk packages. Please install with:\n\n" + echo "pacman --noconfirm -S tk >&3" exit 1 - fi + # else + # echo "This script needs to be run as root or via sudo to install packages." + # exit 1 + # fi + else + echo "Python TK found..." fi elif "$distro" | grep -Eqi "opensuse" || "$family" | grep -qi "opensuse"; then echo "OpenSUSE detected." if ! rpm -qa | grep -qi python-tk; then - if [ "$root" = true ]; then - zypper install -y python-tk >&3 - else - echo "This script needs to be run as root or via sudo to install packages." + # if [ "$root" = true ]; then + echo "This script needs you to install the missing python3-tk packages. Please install with:\n\n" + echo "zypper install -y python-tk >&3" exit 1 - fi + # else + # echo "This script needs to be run as root or via sudo to install packages." + # exit 1 + # fi + else + echo "Python TK found..." fi elif [ "$distro" = "None" ] || [ "$family" = "None" ]; then if [ "$distro" = "None" ]; then @@ -555,22 +567,22 @@ if [[ "$OSTYPE" == "linux-gnu"* ]]; then libnvinfer_target="$VENV_DIR/lib/python3.10/site-packages/tensorrt/libnvinfer.so.8" libcudart_target="$VENV_DIR/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/libcudart.so.12" - echo "Checking symlinks now." - create_symlinks "$libnvinfer_plugin_symlink" "$libnvinfer_plugin_target" - create_symlinks "$libnvinfer_symlink" "$libnvinfer_target" - create_symlinks "$libcudart_symlink" "$libcudart_target" + # echo "Checking symlinks now." + # create_symlinks "$libnvinfer_plugin_symlink" "$libnvinfer_plugin_target" + # create_symlinks "$libnvinfer_symlink" "$libnvinfer_target" + # create_symlinks "$libcudart_symlink" "$libcudart_target" - if [ -d "${VENV_DIR}/lib/python3.10/site-packages/tensorrt/" ]; then - export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${VENV_DIR}/lib/python3.10/site-packages/tensorrt/" - else - echo "${VENV_DIR}/lib/python3.10/site-packages/tensorrt/ not found; not linking library." - fi + # if [ -d "${VENV_DIR}/lib/python3.10/site-packages/tensorrt/" ]; then + # export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${VENV_DIR}/lib/python3.10/site-packages/tensorrt/" + # else + # echo "${VENV_DIR}/lib/python3.10/site-packages/tensorrt/ not found; not linking library." + # fi - if [ -d "${VENV_DIR}/lib/python3.10/site-packages/tensorrt/" ]; then - export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${VENV_DIR}/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/" - else - echo "${VENV_DIR}/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/ not found; not linking library." - fi + # if [ -d "${VENV_DIR}/lib/python3.10/site-packages/tensorrt/" ]; then + # export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${VENV_DIR}/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/" + # else + # echo "${VENV_DIR}/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/ not found; not linking library." + # fi configure_accelerate @@ -578,19 +590,19 @@ if [[ "$OSTYPE" == "linux-gnu"* ]]; then if [ "$SKIP_GUI" = false ]; then if command -v bash >/dev/null; then if [ "$PUBLIC" = false ]; then - bash "$DIR"/gui.sh + bash "$DIR"/gui.sh --headless exit 0 else - bash "$DIR"/gui.sh --share + bash "$DIR"/gui.sh --headless --share exit 0 fi else # This shouldn't happen, but we're going to try to help. if [ "$PUBLIC" = false ]; then - sh "$DIR"/gui.sh + sh "$DIR"/gui.sh --headless exit 0 else - sh "$DIR"/gui.sh --share + sh "$DIR"/gui.sh --headless --share exit 0 fi fi diff --git a/tools/check_local_modules.py b/setup/check_local_modules.py similarity index 100% rename from tools/check_local_modules.py rename to setup/check_local_modules.py diff --git a/tools/create_user_files.py b/setup/create_user_files.py similarity index 100% rename from tools/create_user_files.py rename to setup/create_user_files.py diff --git a/tools/debug_info.py b/setup/debug_info.py similarity index 100% rename from tools/debug_info.py rename to setup/debug_info.py diff --git a/tools/setup_windows.py b/setup/setup_common.py similarity index 60% rename from tools/setup_windows.py rename to setup/setup_common.py index 6f00cd2ae..e6f036558 100644 --- a/tools/setup_windows.py +++ b/setup/setup_common.py @@ -1,5 +1,6 @@ import subprocess import os +import re import sys import filecmp import logging @@ -12,10 +13,6 @@ errors = 0 # Define the 'errors' variable before using it log = logging.getLogger('sd') -# ANSI escape code for yellow color -YELLOW = '\033[93m' -RESET_COLOR = '\033[0m' - # setup console and file logging def setup_logging(clean=False): # @@ -91,7 +88,7 @@ def setup_logging(clean=False): log.addHandler(rh) -def configure_accelerate(): +def configure_accelerate(run_accelerate=False): # # This function was taken and adapted from code written by jstayco # @@ -112,9 +109,12 @@ def env_var_exists(var_name): ) if not os.path.exists(source_accelerate_config_file): - log.info( - f'Could not find the accelerate configuration file in {source_accelerate_config_file}. Please configure accelerate manually by runningthe option in the menu.' - ) + if run_accelerate: + run_cmd('accelerate config') + else: + log.warning( + f'Could not find the accelerate configuration file in {source_accelerate_config_file}. Please configure accelerate manually by runningthe option in the menu.' + ) log.debug( f'Source accelerate config location: {source_accelerate_config_file}' @@ -158,13 +158,23 @@ def env_var_exists(var_name): shutil.copyfile( source_accelerate_config_file, target_config_location ) - log.debug( + log.info( f'Copied accelerate config file to: {target_config_location}' ) + else: + if run_accelerate: + run_cmd('accelerate config') + else: + log.warning( + 'Could not automatically configure accelerate. Please manually configure accelerate with the option in the menu or with: accelerate config.' + ) else: - log.info( - 'Could not place the accelerate configuration file. Please configure manually with: accelerate config.' - ) + if run_accelerate: + run_cmd('accelerate config') + else: + log.warning( + 'Could not automatically configure accelerate. Please manually configure accelerate with the option in the menu or with: accelerate config.' + ) def check_torch(): @@ -254,34 +264,13 @@ def git(arg: str, folder: str = None, ignore: bool = False): errors += 1 log.error(f'Error running git: {folder} / {arg}') if 'or stash them' in txt: - log.error(f'Local changes detected: check log for details: {log_file}') + log.error(f'Local changes detected: check log for details...') log.debug(f'Git output: {txt}') return txt -def cudann_install(): - cudnn_src = os.path.join( - os.path.dirname(os.path.realpath(__file__)), '..\cudnn_windows' - ) - cudnn_dest = os.path.join(sysconfig.get_paths()['purelib'], 'torch', 'lib') - - log.info(f'Checking for CUDNN files in {cudnn_dest}...') - if os.path.exists(cudnn_src): - if os.path.exists(cudnn_dest): - # check for different files - filecmp.clear_cache() - for file in os.listdir(cudnn_src): - src_file = os.path.join(cudnn_src, file) - dest_file = os.path.join(cudnn_dest, file) - # if dest file exists, check if it's different - if os.path.exists(dest_file): - shutil.copy2(src_file, cudnn_dest) - log.info('Copied CUDNN 8.6 files to destination') - else: - log.error(f'Installation Failed: "{cudnn_src}" could not be found. ') - def pip(arg: str, ignore: bool = False, quiet: bool = False): - arg = arg.replace('>=', '==') + # arg = arg.replace('>=', '==') if not quiet: log.info(f'Installing package: {arg.replace("install", "").replace("--upgrade", "").replace("--no-deps", "").replace("--force", "").replace(" ", " ").strip()}') log.debug(f"Running pip: {arg}") @@ -302,8 +291,11 @@ def installed(package, friendly: str = None): # # This function was adapted from code written by vladimandic: https://github.com/vladmandic/automatic/commits/master # + + # Remove brackets and their contents from the line using regular expressions + # e.g., diffusers[torch]==0.10.2 becomes diffusers==0.10.2 + package = re.sub(r'\[.*?\]', '', package) - ok = True try: if friendly: pkgs = friendly.split() @@ -315,38 +307,40 @@ def installed(package, friendly: str = None): ] pkgs = [ p.split('/')[-1] for p in pkgs - ] # get only package name if installing from url + ] # get only package name if installing from URL + for pkg in pkgs: if '>=' in pkg: - p = pkg.split('>=') + pkg_name, pkg_version = [x.strip() for x in pkg.split('>=')] elif '==' in pkg: - p = pkg.split('==') + pkg_name, pkg_version = [x.strip() for x in pkg.split('==')] else: - p = [pkg] - spec = pkg_resources.working_set.by_key.get( - p[0], None - ) # more reliable than importlib + pkg_name, pkg_version = pkg.strip(), None + + spec = pkg_resources.working_set.by_key.get(pkg_name, None) if spec is None: - spec = pkg_resources.working_set.by_key.get( - p[0].lower(), None - ) # check name variations + spec = pkg_resources.working_set.by_key.get(pkg_name.lower(), None) if spec is None: - spec = pkg_resources.working_set.by_key.get( - p[0].replace('_', '-'), None - ) # check name variations - ok = ok and spec is not None - if ok: - version = pkg_resources.get_distribution(p[0]).version - log.debug(f'Package version found: {p[0]} {version}') - if len(p) > 1: - ok = ok and version == p[1] + spec = pkg_resources.working_set.by_key.get(pkg_name.replace('_', '-'), None) + + if spec is not None: + version = pkg_resources.get_distribution(pkg_name).version + log.debug(f'Package version found: {pkg_name} {version}') + + if pkg_version is not None: + if '>=' in pkg: + ok = version >= pkg_version + else: + ok = version == pkg_version + if not ok: - log.warning( - f'Package wrong version: {p[0]} {version} required {p[1]}' - ) + log.warning(f'Package wrong version: {pkg_name} {version} required {pkg_version}') + return False else: - log.debug(f'Package version not found: {p[0]}') - return ok + log.debug(f'Package version not found: {pkg_name}') + return False + + return True except ModuleNotFoundError: log.debug(f'Package not installed: {pkgs}') return False @@ -362,6 +356,9 @@ def install( ignore: bool = False, reinstall: bool = False, ): + # Remove anything after '#' in the package variable + package = package.split('#')[0].strip() + if reinstall: global quick_allowed # pylint: disable=global-statement quick_allowed = False @@ -369,6 +366,51 @@ def install( pip(f'install --upgrade {package}', ignore=ignore) + +def process_requirements_line(line): + # Remove brackets and their contents from the line using regular expressions + # e.g., diffusers[torch]==0.10.2 becomes diffusers==0.10.2 + package_name = re.sub(r'\[.*?\]', '', line) + install(line, package_name) + + +def install_requirements(requirements_file, check_no_verify_flag=False): + if check_no_verify_flag: + log.info(f'Verifying modules instalation status from {requirements_file}...') + else: + log.info(f'Installing modules from {requirements_file}...') + with open(requirements_file, 'r', encoding='utf8') as f: + # Read lines from the requirements file, strip whitespace, and filter out empty lines, comments, and lines starting with '.' + if check_no_verify_flag: + lines = [ + line.strip() + for line in f.readlines() + if line.strip() != '' + and not line.startswith('#') + and line is not None + and 'no_verify' not in line + ] + else: + lines = [ + line.strip() + for line in f.readlines() + if line.strip() != '' + and not line.startswith('#') + and line is not None + ] + + # Iterate over each line and install the requirements + for line in lines: + # Check if the line starts with '-r' to include another requirements file + if line.startswith('-r'): + # Get the path to the included requirements file + included_file = line[2:].strip() + # Expand the included requirements file recursively + install_requirements(included_file, check_no_verify_flag=check_no_verify_flag) + else: + process_requirements_line(line) + + def ensure_base_requirements(): try: import rich # pylint: disable=unused-import @@ -378,7 +420,7 @@ def ensure_base_requirements(): def run_cmd(run_cmd): try: - subprocess.run(run_cmd, check=True) + subprocess.run(run_cmd, shell=True, check=False, env=os.environ) except subprocess.CalledProcessError as e: print(f'Error occurred while running command: {run_cmd}') print(f'Error: {e}') @@ -417,24 +459,6 @@ def delete_file(file_path): os.remove(file_path) -def install_requirements(requirements_file): - # - # This function was adapted from code written by vladimandic: https://github.com/vladmandic/automatic/commits/master - # - - log.info('Verifying requirements') - with open(requirements_file, 'r', encoding='utf8') as f: - lines = [ - line.strip() - for line in f.readlines() - if line.strip() != '' - and not line.startswith('#') - and line is not None - ] - for line in lines: - install(line) - - def write_to_file(file_path, content): try: with open(file_path, 'w') as file: @@ -444,114 +468,6 @@ def write_to_file(file_path, content): print(f'Error: {e}') -def sync_bits_and_bytes_files(): - import filecmp - - """ - Check for "different" bitsandbytes Files and copy only if necessary. - This function is specific for Windows OS. - """ - - # Only execute on Windows - if os.name != 'nt': - print('This function is only applicable to Windows OS.') - return - - try: - log.info(f'Copying bitsandbytes files...') - # Define source and destination directories - source_dir = os.path.join(os.getcwd(), 'bitsandbytes_windows') - - dest_dir_base = os.path.join( - sysconfig.get_paths()['purelib'], 'bitsandbytes' - ) - - # Clear file comparison cache - filecmp.clear_cache() - - # Iterate over each file in source directory - for file in os.listdir(source_dir): - source_file_path = os.path.join(source_dir, file) - - # Decide the destination directory based on file name - if file in ('main.py', 'paths.py'): - dest_dir = os.path.join(dest_dir_base, 'cuda_setup') - else: - dest_dir = dest_dir_base - - dest_file_path = os.path.join(dest_dir, file) - - # Compare the source file with the destination file - if os.path.exists(dest_file_path) and filecmp.cmp( - source_file_path, dest_file_path - ): - log.debug( - f'Skipping {source_file_path} as it already exists in {dest_dir}' - ) - else: - # Copy file from source to destination, maintaining original file's metadata - log.debug(f'Copy {source_file_path} to {dest_dir}') - shutil.copy2(source_file_path, dest_dir) - - except FileNotFoundError as fnf_error: - log.error(f'File not found error: {fnf_error}') - except PermissionError as perm_error: - log.error(f'Permission error: {perm_error}') - except Exception as e: - log.error(f'An unexpected error occurred: {e}') - - -def install_kohya_ss_torch1(): - check_repo_version() - check_python() - - # Upgrade pip if needed - install('--upgrade pip') - - if check_torch() == 2: - input( - f'{YELLOW}\nTorch 2 is already installed in the venv. To install Torch 1 delete the venv and re-run setup.bat\n\nHit any key to acknowledge.{RESET_COLOR}' - ) - return - - install( - 'torch==1.12.1+cu116 torchvision==0.13.1+cu116 --index-url https://download.pytorch.org/whl/cu116', - 'torch torchvision' - ) - install( - 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl -U -I --no-deps', - 'xformers-0.0.14' - ) - install_requirements('requirements_windows_torch1.txt') - sync_bits_and_bytes_files() - configure_accelerate() - # run_cmd(f'accelerate config') - - -def install_kohya_ss_torch2(): - check_repo_version() - check_python() - - # Upgrade pip if needed - install('--upgrade pip') - - if check_torch() == 1: - input( - f'{YELLOW}\nTorch 1 is already installed in the venv. To install Torch 2 delete the venv and re-run setup.bat\n\nHit any key to acknowledge.{RESET_COLOR}' - ) - return - - install( - 'torch==2.0.1+cu118 torchvision==0.15.2+cu118 --index-url https://download.pytorch.org/whl/cu118', - 'torch torchvision' - ) - install_requirements('requirements_windows_torch2.txt') - # install('https://huggingface.co/r4ziel/xformers_pre_built/resolve/main/triton-2.0.0-cp310-cp310-win_amd64.whl', 'triton', reinstall=reinstall) - sync_bits_and_bytes_files() - configure_accelerate() - # run_cmd(f'accelerate config') - - def clear_screen(): # Check the current operating system to execute the correct clear screen command if os.name == 'nt': # If the operating system is Windows @@ -559,52 +475,3 @@ def clear_screen(): else: # If the operating system is Linux or Mac os.system('clear') - -def main_menu(): - clear_screen() - while True: - print('\nKohya_ss GUI setup menu:\n') - print('1. Install kohya_ss gui') - print('2. Install cudann files') - print('3. Manually configure accelerate') - print('4. Start Kohya_ss GUI in browser') - print('5. Quit') - - choice = input('\nEnter your choice: ') - print('') - - if choice == '1': - while True: - print('1. Torch 1') - print('2. Torch 2') - print('3. Cancel') - choice_torch = input('\nEnter your choice: ') - print('') - - if choice_torch == '1': - install_kohya_ss_torch1() - break - elif choice_torch == '2': - install_kohya_ss_torch2() - break - elif choice_torch == '3': - break - else: - print('Invalid choice. Please enter a number between 1-3.') - elif choice == '2': - cudann_install() - elif choice == '3': - run_cmd('accelerate config') - elif choice == '4': - subprocess.Popen('start cmd /c .\gui.bat --inbrowser', shell=True) - elif choice == '5': - print('Quitting the program.') - break - else: - print('Invalid choice. Please enter a number between 1-5.') - - -if __name__ == '__main__': - ensure_base_requirements() - setup_logging() - main_menu() diff --git a/setup/setup_linux.py b/setup/setup_linux.py new file mode 100644 index 000000000..c324305b4 --- /dev/null +++ b/setup/setup_linux.py @@ -0,0 +1,38 @@ +import argparse +import logging +import setup_common + +errors = 0 # Define the 'errors' variable before using it +log = logging.getLogger('sd') + +# ANSI escape code for yellow color +YELLOW = '\033[93m' +RESET_COLOR = '\033[0m' + + +def install_kohya_ss(platform_requirements_file): + setup_common.check_repo_version() + setup_common.check_python() + + # Upgrade pip if needed + setup_common.install('--upgrade pip') + setup_common.install_requirements(platform_requirements_file, check_no_verify_flag=False) + setup_common.configure_accelerate(run_accelerate=False) + # run_cmd(f'accelerate config') + + +def main_menu(platform_requirements_file): + log.info("Installing python dependencies. This could take a few minutes as it downloads files.") + log.info("If this operation ever runs too long, you can rerun this script in verbose mode to check.") + install_kohya_ss(platform_requirements_file) + + +if __name__ == '__main__': + setup_common.ensure_base_requirements() + setup_common.setup_logging() + + parser = argparse.ArgumentParser() + parser.add_argument('--platform-requirements-file', dest='platform_requirements_file', default='requirements_linux.txt', help='Path to the platform-specific requirements file') + args = parser.parse_args() + + main_menu(args.platform_requirements_file) diff --git a/setup/setup_windows.py b/setup/setup_windows.py new file mode 100644 index 000000000..39197df0a --- /dev/null +++ b/setup/setup_windows.py @@ -0,0 +1,202 @@ +import subprocess +import os +import filecmp +import logging +import shutil +import sysconfig +import setup_common + +errors = 0 # Define the 'errors' variable before using it +log = logging.getLogger('sd') + +# ANSI escape code for yellow color +YELLOW = '\033[93m' +RESET_COLOR = '\033[0m' + + +def cudann_install(): + cudnn_src = os.path.join( + os.path.dirname(os.path.realpath(__file__)), '..\cudnn_windows' + ) + cudnn_dest = os.path.join(sysconfig.get_paths()['purelib'], 'torch', 'lib') + + log.info(f'Checking for CUDNN files in {cudnn_dest}...') + if os.path.exists(cudnn_src): + if os.path.exists(cudnn_dest): + # check for different files + filecmp.clear_cache() + for file in os.listdir(cudnn_src): + src_file = os.path.join(cudnn_src, file) + dest_file = os.path.join(cudnn_dest, file) + # if dest file exists, check if it's different + if os.path.exists(dest_file): + if not filecmp.cmp(src_file, dest_file, shallow=False): + shutil.copy2(src_file, cudnn_dest) + else: + shutil.copy2(src_file, cudnn_dest) + log.info('Copied CUDNN 8.6 files to destination') + else: + log.warning(f'Destination directory {cudnn_dest} does not exist') + else: + log.error(f'Installation Failed: "{cudnn_src}" could not be found.') + + +def sync_bits_and_bytes_files(): + import filecmp + + """ + Check for "different" bitsandbytes Files and copy only if necessary. + This function is specific for Windows OS. + """ + + # Only execute on Windows + if os.name != 'nt': + print('This function is only applicable to Windows OS.') + return + + try: + log.info(f'Copying bitsandbytes files...') + # Define source and destination directories + source_dir = os.path.join(os.getcwd(), 'bitsandbytes_windows') + + dest_dir_base = os.path.join( + sysconfig.get_paths()['purelib'], 'bitsandbytes' + ) + + # Clear file comparison cache + filecmp.clear_cache() + + # Iterate over each file in source directory + for file in os.listdir(source_dir): + source_file_path = os.path.join(source_dir, file) + + # Decide the destination directory based on file name + if file in ('main.py', 'paths.py'): + dest_dir = os.path.join(dest_dir_base, 'cuda_setup') + else: + dest_dir = dest_dir_base + + dest_file_path = os.path.join(dest_dir, file) + + # Compare the source file with the destination file + if os.path.exists(dest_file_path) and filecmp.cmp( + source_file_path, dest_file_path + ): + log.debug( + f'Skipping {source_file_path} as it already exists in {dest_dir}' + ) + else: + # Copy file from source to destination, maintaining original file's metadata + log.debug(f'Copy {source_file_path} to {dest_dir}') + shutil.copy2(source_file_path, dest_dir) + + except FileNotFoundError as fnf_error: + log.error(f'File not found error: {fnf_error}') + except PermissionError as perm_error: + log.error(f'Permission error: {perm_error}') + except Exception as e: + log.error(f'An unexpected error occurred: {e}') + + +def install_kohya_ss_torch1(): + setup_common.check_repo_version() + setup_common.check_python() + + # Upgrade pip if needed + setup_common.install('--upgrade pip') + + if setup_common.check_torch() == 2: + input( + f'{YELLOW}\nTorch 2 is already installed in the venv. To install Torch 1 delete the venv and re-run setup.bat\n\nHit enter to continue...{RESET_COLOR}' + ) + return + + # setup_common.install( + # 'torch==1.12.1+cu116 torchvision==0.13.1+cu116 --index-url https://download.pytorch.org/whl/cu116', + # 'torch torchvision' + # ) + # setup_common.install( + # 'https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl -U -I --no-deps', + # 'xformers-0.0.14' + # ) + setup_common.install_requirements('requirements_windows_torch1.txt', check_no_verify_flag=False) + # sync_bits_and_bytes_files() + setup_common.configure_accelerate(run_accelerate=True) + # run_cmd(f'accelerate config') + + +def install_kohya_ss_torch2(): + setup_common.check_repo_version() + setup_common.check_python() + + # Upgrade pip if needed + setup_common.install('--upgrade pip') + + if setup_common.check_torch() == 1: + input( + f'{YELLOW}\nTorch 1 is already installed in the venv. To install Torch 2 delete the venv and re-run setup.bat\n\nHit any key to acknowledge.{RESET_COLOR}' + ) + return + + # setup_common.install( + # 'torch==2.0.1+cu118 torchvision==0.15.2+cu118 --index-url https://download.pytorch.org/whl/cu118', + # 'torch torchvision' + # ) + setup_common.install_requirements('requirements_windows_torch2.txt', check_no_verify_flag=False) + # install('https://huggingface.co/r4ziel/xformers_pre_built/resolve/main/triton-2.0.0-cp310-cp310-win_amd64.whl', 'triton', reinstall=reinstall) + # sync_bits_and_bytes_files() + setup_common.configure_accelerate(run_accelerate=True) + # run_cmd(f'accelerate config') + + +def main_menu(): + setup_common.clear_screen() + while True: + print('\nKohya_ss GUI setup menu:\n') + print('1. Install kohya_ss gui') + print('2. (Optional) Install cudann files') + print('3. (Optional) Install bitsandbytes-windows') + print('4. (Optional) Manually configure accelerate') + print('5. (Optional) Start Kohya_ss GUI in browser') + print('6. Quit') + + choice = input('\nEnter your choice: ') + print('') + + if choice == '1': + while True: + print('1. Torch 1') + print('2. Torch 2') + print('3. Cancel') + choice_torch = input('\nEnter your choice: ') + print('') + + if choice_torch == '1': + install_kohya_ss_torch1() + break + elif choice_torch == '2': + install_kohya_ss_torch2() + break + elif choice_torch == '3': + break + else: + print('Invalid choice. Please enter a number between 1-3.') + elif choice == '2': + cudann_install() + elif choice == '3': + setup_common.install('--upgrade bitsandbytes-windows', reinstall=True) + elif choice == '4': + setup_common.run_cmd('accelerate config') + elif choice == '5': + subprocess.Popen('start cmd /k .\gui.bat --inbrowser', shell=True) # /k keep the terminal open on quit. /c would close the terminal instead + elif choice == '6': + print('Quitting the program.') + break + else: + print('Invalid choice. Please enter a number between 1-5.') + + +if __name__ == '__main__': + setup_common.ensure_base_requirements() + setup_common.setup_logging() + main_menu() diff --git a/tools/update_bitsandbytes.py b/setup/update_bitsandbytes.py similarity index 100% rename from tools/update_bitsandbytes.py rename to setup/update_bitsandbytes.py diff --git a/tools/validate_requirements_unix.py b/setup/validate_requirements.py similarity index 81% rename from tools/validate_requirements_unix.py rename to setup/validate_requirements.py index c09f91b5a..73e94eb65 100644 --- a/tools/validate_requirements_unix.py +++ b/setup/validate_requirements.py @@ -3,15 +3,14 @@ import sys import shutil import argparse -import subprocess -from setup_windows import check_repo_version +import setup_common # Get the absolute path of the current file's directory (Kohua_SS project directory) project_directory = os.path.dirname(os.path.abspath(__file__)) -# Check if the "tools" directory is present in the project_directory -if "tools" in project_directory: - # If the "tools" directory is present, move one level up to the parent directory +# Check if the "setup" directory is present in the project_directory +if "setup" in project_directory: + # If the "setup" directory is present, move one level up to the parent directory project_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Add the project directory to the beginning of the Python search path @@ -22,7 +21,6 @@ # Set up logging log = setup_logging() - def check_torch(): # Check for nVidia toolkit or AMD toolkit if shutil.which('nvidia-smi') is not None or os.path.exists( @@ -73,13 +71,8 @@ def check_torch(): sys.exit(1) -def install_requirements(requirements_file): - log.info('Verifying requirements') - subprocess.run(f'"{sys.executable}" -m pip install -U -r "{requirements_file}"', shell=True, check=False, env=os.environ) - - def main(): - check_repo_version() + setup_common.check_repo_version() # Parse command line arguments parser = argparse.ArgumentParser( description='Validate that requirements are satisfied.' @@ -93,7 +86,15 @@ def main(): parser.add_argument('--debug', action='store_true', help='Debug on') args = parser.parse_args() - install_requirements(args.requirements) + torch_ver = check_torch() + + if args.requirements: + setup_common.install_requirements(args.requirements, check_no_verify_flag=True) + else: + if torch_ver == 1: + setup_common.install_requirements('requirements_windows_torch1.txt', check_no_verify_flag=True) + else: + setup_common.install_requirements('requirements_windows_torch2.txt', check_no_verify_flag=True) if __name__ == '__main__': diff --git a/setup_legacy.sh b/setup_legacy.sh new file mode 100755 index 000000000..a95939959 --- /dev/null +++ b/setup_legacy.sh @@ -0,0 +1,661 @@ +#!/usr/bin/env bash + +# This file will be the host environment setup file for all operating systems other than base Windows. + +# Set the required package versions here. +TENSORFLOW_MACOS_VERSION="2.12.0" +TENSORFLOW_METAL_VERSION="0.8.0" + +display_help() { + cat <&2" #Don't change anything higher than the maximum verbosity allowed. +done + +for v in $( #From the verbosity level one higher than requested, through the maximum; + seq $((VERBOSITY + 1)) $MAXVERBOSITY +); do + (("$v" > "2")) && eval exec "$v>/dev/null" #Redirect these to bitbucket, provided that they don't match stdout and stderr. +done + +# Example of how to use the verbosity levels. +# printf "%s\n" "This message is seen at verbosity level 1 and above." >&3 +# printf "%s\n" "This message is seen at verbosity level 2 and above." >&4 +# printf "%s\n" "This message is seen at verbosity level 3 and above." >&5 + +# Debug variable dump at max verbosity +echo "BRANCH: $BRANCH +DIR: $DIR +GIT_REPO: $GIT_REPO +INTERACTIVE: $INTERACTIVE +PUBLIC: $PUBLIC +RUNPOD: $RUNPOD +SKIP_SPACE_CHECK: $SKIP_SPACE_CHECK +VERBOSITY: $VERBOSITY +Script directory is ${SCRIPT_DIR}." >&5 + +# This must be set after the getopts loop to account for $DIR changes. +PARENT_DIR="$(dirname "${DIR}")" +VENV_DIR="$DIR/venv" + +if [ -w "$PARENT_DIR" ] && [ ! -d "$DIR" ]; then + echo "Creating install folder ${DIR}." + mkdir "$DIR" +fi + +if [ ! -w "$DIR" ]; then + echo "We cannot write to ${DIR}." + echo "Please ensure the install directory is accurate and you have the correct permissions." + exit 1 +fi + +# Shared functions +# This checks for free space on the installation drive and returns that in Gb. +size_available() { + local folder + if [ -d "$DIR" ]; then + folder="$DIR" + elif [ -d "$PARENT_DIR" ]; then + folder="$PARENT_DIR" + elif [ -d "$(echo "$DIR" | cut -d "/" -f2)" ]; then + folder="$(echo "$DIR" | cut -d "/" -f2)" + else + echo "We are assuming a root drive install for space-checking purposes." + folder='/' + fi + + local FREESPACEINKB + FREESPACEINKB="$(df -Pk "$folder" | sed 1d | grep -v used | awk '{ print $4 "\t" }')" + echo "Detected available space in Kb: $FREESPACEINKB" >&5 + local FREESPACEINGB + FREESPACEINGB=$((FREESPACEINKB / 1024 / 1024)) + echo "$FREESPACEINGB" +} + +# The expected usage is create_symlinks symlink target_file +create_symlinks() { + local symlink="$1" + local target_file="$2" + + echo "Checking symlinks now." + + # Check if the symlink exists + if [ -L "$symlink" ]; then + # Check if the linked file exists and points to the expected file + if [ -e "$symlink" ] && [ "$(readlink "$symlink")" == "$target_file" ]; then + echo "$(basename "$symlink") symlink looks fine. Skipping." + else + if [ -f "$target_file" ]; then + echo "Broken symlink detected. Recreating $(basename "$symlink")." + rm "$symlink" && ln -s "$target_file" "$symlink" + else + echo "$target_file does not exist. Nothing to link." + fi + fi + else + echo "Linking $(basename "$symlink")." + ln -s "$target_file" "$symlink" + fi +} + + +install_python_dependencies() { + local TEMP_REQUIREMENTS_FILE + + # Switch to local virtual env + echo "Switching to virtual Python environment." + if ! inDocker; then + if command -v python3.10 >/dev/null; then + python3.10 -m venv "$DIR/venv" + elif command -v python3 >/dev/null; then + python3 -m venv "$DIR/venv" + else + echo "Valid python3 or python3.10 binary not found." + echo "Cannot proceed with the python steps." + return 1 + fi + + # Activate the virtual environment + source "$DIR/venv/bin/activate" + fi + + # Updating pip if there is one + echo "Checking for pip updates before Python operations." + pip install --upgrade pip + + echo "Installing python dependencies. This could take a few minutes as it downloads files." + echo "If this operation ever runs too long, you can rerun this script in verbose mode to check." + + case "$OSTYPE" in + "linux-gnu"*) + pip install torch==2.0.1+cu118 torchvision==0.15.2+cu118 \ + --extra-index-url https://download.pytorch.org/whl/cu118 + pip install --upgrade xformers==0.0.20 + ;; + "darwin"*) + pip install torch==2.0.0 torchvision==0.15.1 \ + -f https://download.pytorch.org/whl/cpu/torch_stable.html + # Check if the processor is Apple Silicon (arm64) + if [[ "$(uname -m)" == "arm64" ]]; then + pip install tensorflow-metal=="$TENSORFLOW_MACOS_VERSION" + else + pip install tensorflow-macos=="$TENSORFLOW_METAL_VERSION" + fi + ;; + esac + + if [ "$RUNPOD" = true ]; then + echo "Installing tenssort." + pip install tensorrt + fi + + # DEBUG ONLY (Update this version number to whatever PyCharm recommends) + # pip install pydevd-pycharm~=223.8836.43 + + # Create a temporary requirements file + TEMP_REQUIREMENTS_FILE=$(mktemp) + + if [[ "$OSTYPE" == "darwin"* ]]; then + echo "Copying $DIR/requirements_macos.txt to $TEMP_REQUIREMENTS_FILE" >&3 + echo "Replacing the . for lib to our DIR variable in $TEMP_REQUIREMENTS_FILE." >&3 + awk -v dir="$DIR" '/#.*kohya_ss.*library/{print; getline; sub(/^\.$/, dir)}1' "$DIR/requirements_macos.txt" >"$TEMP_REQUIREMENTS_FILE" + else + echo "Copying $DIR/requirements_linux.txt to $TEMP_REQUIREMENTS_FILE" >&3 + echo "Replacing the . for lib to our DIR variable in $TEMP_REQUIREMENTS_FILE." >&3 + awk -v dir="$DIR" '/#.*kohya_ss.*library/{print; getline; sub(/^\.$/, dir)}1' "$DIR/requirements_linux.txt" >"$TEMP_REQUIREMENTS_FILE" + fi + + # Install the Python dependencies from the temporary requirements file + if [ $VERBOSITY == 2 ]; then + python -m pip install --quiet --upgrade -r "$TEMP_REQUIREMENTS_FILE" + else + python -m pip install --upgrade -r "$TEMP_REQUIREMENTS_FILE" + fi + + if [ -n "$VIRTUAL_ENV" ] && ! inDocker; then + if command -v deactivate >/dev/null; then + echo "Exiting Python virtual environment." + deactivate + else + echo "deactivate command not found. Could still be in the Python virtual environment." + fi + fi +} + + +# Attempt to non-interactively install a default accelerate config file unless specified otherwise. +# Documentation for order of precedence locations for configuration file for automated installation: +# https://huggingface.co/docs/accelerate/basic_tutorials/launch#custom-configurations +configure_accelerate() { + echo "Source accelerate config location: $DIR/config_files/accelerate/default_config.yaml" >&3 + if [ "$INTERACTIVE" = true ]; then + accelerate config + else + if env_var_exists HF_HOME; then + if [ ! -f "$HF_HOME/accelerate/default_config.yaml" ]; then + mkdir -p "$HF_HOME/accelerate/" && + echo "Target accelerate config location: $HF_HOME/accelerate/default_config.yaml" >&3 + cp "$DIR/config_files/accelerate/default_config.yaml" "$HF_HOME/accelerate/default_config.yaml" && + echo "Copied accelerate config file to: $HF_HOME/accelerate/default_config.yaml" + fi + elif env_var_exists XDG_CACHE_HOME; then + if [ ! -f "$XDG_CACHE_HOME/huggingface/accelerate" ]; then + mkdir -p "$XDG_CACHE_HOME/huggingface/accelerate" && + echo "Target accelerate config location: $XDG_CACHE_HOME/accelerate/default_config.yaml" >&3 + cp "$DIR/config_files/accelerate/default_config.yaml" "$XDG_CACHE_HOME/huggingface/accelerate/default_config.yaml" && + echo "Copied accelerate config file to: $XDG_CACHE_HOME/huggingface/accelerate/default_config.yaml" + fi + elif env_var_exists HOME; then + if [ ! -f "$HOME/.cache/huggingface/accelerate" ]; then + mkdir -p "$HOME/.cache/huggingface/accelerate" && + echo "Target accelerate config location: $HOME/accelerate/default_config.yaml" >&3 + cp "$DIR/config_files/accelerate/default_config.yaml" "$HOME/.cache/huggingface/accelerate/default_config.yaml" && + echo "Copying accelerate config file to: $HOME/.cache/huggingface/accelerate/default_config.yaml" + fi + else + echo "Could not place the accelerate configuration file. Please configure manually." + sleep 2 + accelerate config + fi + fi +} + +# Offer a warning and opportunity to cancel the installation if < 10Gb of Free Space detected +check_storage_space() { + if [ "$SKIP_SPACE_CHECK" = false ]; then + if [ "$(size_available)" -lt 10 ]; then + echo "You have less than 10Gb of free space. This installation may fail." + MSGTIMEOUT=10 # In seconds + MESSAGE="Continuing in..." + echo "Press control-c to cancel the installation." + for ((i = MSGTIMEOUT; i >= 0; i--)); do + printf "\r${MESSAGE} %ss. " "${i}" + sleep 1 + done + fi + fi +} + +isContainerOrPod() { + local cgroup=/proc/1/cgroup + test -f $cgroup && (grep -qE ':cpuset:/(docker|kubepods)' $cgroup || grep -q ':/docker/' $cgroup) +} + +isDockerBuildkit() { + local cgroup=/proc/1/cgroup + test -f $cgroup && grep -q ':cpuset:/docker/buildkit' $cgroup +} + +isDockerContainer() { + [ -e /.dockerenv ] +} + +inDocker() { + if isContainerOrPod || isDockerBuildkit || isDockerContainer; then + return 0 + else + return 1 + fi +} + +# These are the git operations that will run to update or clone the repo +update_kohya_ss() { + if [ "$SKIP_GIT_UPDATE" = false ]; then + if command -v git >/dev/null; then + # First, we make sure there are no changes that need to be made in git, so no work is lost. + if [ "$(git -C "$DIR" status --porcelain=v1 2>/dev/null | wc -l)" -gt 0 ] && + echo "These files need to be committed or discarded: " >&4 && + git -C "$DIR" status >&4; then + echo "There are changes that need to be committed or discarded in the repo in $DIR." + echo "Commit those changes or run this script with -n to skip git operations entirely." + exit 1 + fi + + echo "Attempting to clone $GIT_REPO." + if [ ! -d "$DIR/.git" ]; then + echo "Cloning and switching to $GIT_REPO:$BRANCH" >&4 + git -C "$PARENT_DIR" clone -b "$BRANCH" "$GIT_REPO" "$(basename "$DIR")" >&3 + git -C "$DIR" switch "$BRANCH" >&4 + else + echo "git repo detected. Attempting to update repository instead." + echo "Updating: $GIT_REPO" + git -C "$DIR" pull "$GIT_REPO" "$BRANCH" >&3 + if ! git -C "$DIR" switch "$BRANCH" >&4; then + echo "Branch $BRANCH did not exist. Creating it." >&4 + git -C "$DIR" switch -c "$BRANCH" >&4 + fi + fi + else + echo "You need to install git." + echo "Rerun this after installing git or run this script with -n to skip the git operations." + fi + else + echo "Skipping git operations." + fi +} + +# Start OS-specific detection and work +if [[ "$OSTYPE" == "linux-gnu"* ]]; then + # Check if root or sudo + root=false + if [ "$EUID" = 0 ]; then + root=true + elif command -v id >/dev/null && [ "$(id -u)" = 0 ]; then + root=true + elif [ "$UID" = 0 ]; then + root=true + fi + + get_distro_name() { + local line + if [ -f /etc/os-release ]; then + # We search for the line starting with ID= + # Then we remove the ID= prefix to get the name itself + line="$(grep -Ei '^ID=' /etc/os-release)" + echo "Raw detected os-release distro line: $line" >&5 + line=${line##*=} + echo "$line" + return 0 + elif command -v python >/dev/null; then + line="$(python -mplatform)" + echo "$line" + return 0 + elif command -v python3 >/dev/null; then + line="$(python3 -mplatform)" + echo "$line" + return 0 + else + line="None" + echo "$line" + return 1 + fi + } + + # We search for the line starting with ID_LIKE= + # Then we remove the ID_LIKE= prefix to get the name itself + # This is the "type" of distro. For example, Ubuntu returns "debian". + get_distro_family() { + local line + if [ -f /etc/os-release ]; then + if grep -Eiq '^ID_LIKE=' /etc/os-release >/dev/null; then + line="$(grep -Ei '^ID_LIKE=' /etc/os-release)" + echo "Raw detected os-release distro family line: $line" >&5 + line=${line##*=} + echo "$line" + return 0 + else + line="None" + echo "$line" + return 1 + fi + else + line="None" + echo "$line" + return 1 + fi + } + + check_storage_space + update_kohya_ss + + distro=get_distro_name + family=get_distro_family + echo "Raw detected distro string: $distro" >&4 + echo "Raw detected distro family string: $family" >&4 + + if "$distro" | grep -qi "Ubuntu" || "$family" | grep -qi "Ubuntu"; then + echo "Ubuntu detected." + if [ $(dpkg-query -W -f='${Status}' python3-tk 2>/dev/null | grep -c "ok installed") = 0 ]; then + # if [ "$root" = true ]; then + echo "This script needs you to install the missing python3-tk packages. Please install with:" + echo " " + echo "sudo apt update -y && sudo apt install -y python3-tk" + exit 1 + # else + # echo "This script needs to be run as root or via sudo to install packages." + # exit 1 + # fi + else + echo "Python TK found..." + fi + elif "$distro" | grep -Eqi "Fedora|CentOS|Redhat"; then + echo "Redhat or Redhat base detected." + if ! rpm -qa | grep -qi python3-tkinter; then + # if [ "$root" = true ]; then + echo "This script needs you to install the missing python3-tk packages. Please install with:\n\n" + echo "sudo dnf install python3-tkinter -y >&3" + exit 1 + # else + # echo "This script needs to be run as root or via sudo to install packages." + # exit 1 + # fi + else + echo "Python TK found..." + fi + elif "$distro" | grep -Eqi "arch" || "$family" | grep -qi "arch"; then + echo "Arch Linux or Arch base detected." + if ! pacman -Qi tk >/dev/null; then + # if [ "$root" = true ]; then + echo "This script needs you to install the missing python3-tk packages. Please install with:\n\n" + echo "pacman --noconfirm -S tk >&3" + exit 1 + # else + # echo "This script needs to be run as root or via sudo to install packages." + # exit 1 + # fi + else + echo "Python TK found..." + fi + elif "$distro" | grep -Eqi "opensuse" || "$family" | grep -qi "opensuse"; then + echo "OpenSUSE detected." + if ! rpm -qa | grep -qi python-tk; then + # if [ "$root" = true ]; then + echo "This script needs you to install the missing python3-tk packages. Please install with:\n\n" + echo "zypper install -y python-tk >&3" + exit 1 + # else + # echo "This script needs to be run as root or via sudo to install packages." + # exit 1 + # fi + else + echo "Python TK found..." + fi + elif [ "$distro" = "None" ] || [ "$family" = "None" ]; then + if [ "$distro" = "None" ]; then + echo "We could not detect your distribution of Linux. Please file a bug report on github with the contents of your /etc/os-release file." + fi + + if [ "$family" = "None" ]; then + echo "We could not detect the family of your Linux distribution. Please file a bug report on github with the contents of your /etc/os-release file." + fi + fi + + install_python_dependencies + + # We need just a little bit more setup for non-interactive environments + if [ "$RUNPOD" = true ]; then + if inDocker; then + # We get the site-packages from python itself, then cut the string, so no other code changes required. + VENV_DIR=$(python -c "import site; print(site.getsitepackages()[0])") + VENV_DIR="${VENV_DIR%/lib/python3.10/site-packages}" + fi + + # Symlink paths + libnvinfer_plugin_symlink="$VENV_DIR/lib/python3.10/site-packages/tensorrt/libnvinfer_plugin.so.7" + libnvinfer_symlink="$VENV_DIR/lib/python3.10/site-packages/tensorrt/libnvinfer.so.7" + libcudart_symlink="$VENV_DIR/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/libcudart.so.11.0" + + #Target file paths + libnvinfer_plugin_target="$VENV_DIR/lib/python3.10/site-packages/tensorrt/libnvinfer_plugin.so.8" + libnvinfer_target="$VENV_DIR/lib/python3.10/site-packages/tensorrt/libnvinfer.so.8" + libcudart_target="$VENV_DIR/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/libcudart.so.12" + + echo "Checking symlinks now." + create_symlinks "$libnvinfer_plugin_symlink" "$libnvinfer_plugin_target" + create_symlinks "$libnvinfer_symlink" "$libnvinfer_target" + create_symlinks "$libcudart_symlink" "$libcudart_target" + + if [ -d "${VENV_DIR}/lib/python3.10/site-packages/tensorrt/" ]; then + export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${VENV_DIR}/lib/python3.10/site-packages/tensorrt/" + else + echo "${VENV_DIR}/lib/python3.10/site-packages/tensorrt/ not found; not linking library." + fi + + if [ -d "${VENV_DIR}/lib/python3.10/site-packages/tensorrt/" ]; then + export LD_LIBRARY_PATH="${LD_LIBRARY_PATH}:${VENV_DIR}/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/" + else + echo "${VENV_DIR}/lib/python3.10/site-packages/nvidia/cuda_runtime/lib/ not found; not linking library." + fi + + configure_accelerate + + # This is a non-interactive environment, so just directly call gui.sh after all setup steps are complete. + if [ "$SKIP_GUI" = false ]; then + if command -v bash >/dev/null; then + if [ "$PUBLIC" = false ]; then + bash "$DIR"/gui.sh + exit 0 + else + bash "$DIR"/gui.sh --share + exit 0 + fi + else + # This shouldn't happen, but we're going to try to help. + if [ "$PUBLIC" = false ]; then + sh "$DIR"/gui.sh + exit 0 + else + sh "$DIR"/gui.sh --share + exit 0 + fi + fi + fi + fi + + echo -e "Setup finished! Run \e[0;92m./gui.sh\e[0m to start." + echo "Please note if you'd like to expose your public server you need to run ./gui.sh --share" +elif [[ "$OSTYPE" == "darwin"* ]]; then + # The initial setup script to prep the environment on macOS + # xformers has been omitted as that is for Nvidia GPUs only + + if ! command -v brew >/dev/null; then + echo "Please install homebrew first. This is a requirement for the remaining setup." + echo "You can find that here: https://brew.sh" + #shellcheck disable=SC2016 + echo 'The "brew" command should be in $PATH to be detected.' + exit 1 + fi + + check_storage_space + + # Install base python packages + echo "Installing Python 3.10 if not found." + if ! brew ls --versions python@3.10 >/dev/null; then + echo "Installing Python 3.10." + brew install python@3.10 >&3 + else + echo "Python 3.10 found!" + fi + echo "Installing Python-TK 3.10 if not found." + if ! brew ls --versions python-tk@3.10 >/dev/null; then + echo "Installing Python TK 3.10." + brew install python-tk@3.10 >&3 + else + echo "Python Tkinter 3.10 found!" + fi + + update_kohya_ss + + if ! install_python_dependencies; then + echo "You may need to install Python. The command for this is brew install python@3.10." + fi + + configure_accelerate + echo -e "Setup finished! Run ./gui.sh to start." +elif [[ "$OSTYPE" == "cygwin" ]]; then + # Cygwin is a standalone suite of Linux utilities on Windows + echo "This hasn't been validated on cygwin yet." +elif [[ "$OSTYPE" == "msys" ]]; then + # MinGW has the msys environment which is a standalone suite of Linux utilities on Windows + # "git bash" on Windows may also be detected as msys. + echo "This hasn't been validated in msys (mingw) on Windows yet." +fi diff --git a/test/config/dreambooth-AdamW.json b/test/config/dreambooth-AdamW.json new file mode 100644 index 000000000..1f41f7d36 --- /dev/null +++ b/test/config/dreambooth-AdamW.json @@ -0,0 +1,73 @@ +{ + "adaptive_noise_scale": 0, + "additional_parameters": "", + "bucket_no_upscale": true, + "bucket_reso_steps": 64, + "cache_latents": true, + "cache_latents_to_disk": false, + "caption_dropout_every_n_epochs": 0.0, + "caption_dropout_rate": 0.05, + "caption_extension": "", + "clip_skip": 2, + "color_aug": false, + "enable_bucket": true, + "epoch": 1, + "flip_aug": false, + "full_fp16": false, + "gradient_accumulation_steps": 1.0, + "gradient_checkpointing": false, + "keep_tokens": "0", + "learning_rate": 5e-05, + "logging_dir": "./test/logs", + "lr_scheduler": "constant", + "lr_warmup": 0, + "max_data_loader_n_workers": "0", + "max_resolution": "512,512", + "max_token_length": "75", + "max_train_epochs": "", + "mem_eff_attn": false, + "min_snr_gamma": 0, + "mixed_precision": "bf16", + "model_list": "runwayml/stable-diffusion-v1-5", + "multires_noise_discount": 0, + "multires_noise_iterations": 0, + "no_token_padding": false, + "noise_offset": "0.05", + "noise_offset_type": "Original", + "num_cpu_threads_per_process": 2, + "optimizer": "AdamW", + "optimizer_args": "", + "output_dir": "./test/output", + "output_name": "db-AdamW", + "persistent_data_loader_workers": false, + "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", + "prior_loss_weight": 1.0, + "random_crop": false, + "reg_data_dir": "", + "resume": "", + "sample_every_n_epochs": 0, + "sample_every_n_steps": 25, + "sample_prompts": "a painting of a gas mask , by darius kawasaki", + "sample_sampler": "euler_a", + "save_every_n_epochs": 1, + "save_every_n_steps": 0, + "save_last_n_steps": 0, + "save_last_n_steps_state": 0, + "save_model_as": "safetensors", + "save_precision": "fp16", + "save_state": false, + "scale_v_pred_loss_like_noise_pred": false, + "seed": "1234", + "shuffle_caption": false, + "stop_text_encoder_training": 0, + "train_batch_size": 4, + "train_data_dir": "./test/img", + "use_wandb": false, + "v2": false, + "v_parameterization": false, + "vae": "", + "vae_batch_size": 0, + "wandb_api_key": "", + "weighted_captions": false, + "xformers": true +} \ No newline at end of file diff --git a/test/config/dreambooth-AdamW8bit.json b/test/config/dreambooth-AdamW8bit.json new file mode 100644 index 000000000..09865620f --- /dev/null +++ b/test/config/dreambooth-AdamW8bit.json @@ -0,0 +1,73 @@ +{ + "adaptive_noise_scale": 0, + "additional_parameters": "", + "bucket_no_upscale": true, + "bucket_reso_steps": 64, + "cache_latents": true, + "cache_latents_to_disk": false, + "caption_dropout_every_n_epochs": 0.0, + "caption_dropout_rate": 0.05, + "caption_extension": "", + "clip_skip": 2, + "color_aug": false, + "enable_bucket": true, + "epoch": 1, + "flip_aug": false, + "full_fp16": false, + "gradient_accumulation_steps": 1.0, + "gradient_checkpointing": false, + "keep_tokens": "0", + "learning_rate": 5e-05, + "logging_dir": "./test/logs", + "lr_scheduler": "constant", + "lr_warmup": 0, + "max_data_loader_n_workers": "0", + "max_resolution": "512,512", + "max_token_length": "75", + "max_train_epochs": "", + "mem_eff_attn": false, + "min_snr_gamma": 0, + "mixed_precision": "bf16", + "model_list": "runwayml/stable-diffusion-v1-5", + "multires_noise_discount": 0, + "multires_noise_iterations": 0, + "no_token_padding": false, + "noise_offset": "0.05", + "noise_offset_type": "Original", + "num_cpu_threads_per_process": 2, + "optimizer": "AdamW8bit", + "optimizer_args": "", + "output_dir": "./test/output", + "output_name": "db-AdamW8bit", + "persistent_data_loader_workers": false, + "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", + "prior_loss_weight": 1.0, + "random_crop": false, + "reg_data_dir": "", + "resume": "", + "sample_every_n_epochs": 0, + "sample_every_n_steps": 25, + "sample_prompts": "a painting of a gas mask , by darius kawasaki", + "sample_sampler": "euler_a", + "save_every_n_epochs": 1, + "save_every_n_steps": 0, + "save_last_n_steps": 0, + "save_last_n_steps_state": 0, + "save_model_as": "safetensors", + "save_precision": "fp16", + "save_state": false, + "scale_v_pred_loss_like_noise_pred": false, + "seed": "1234", + "shuffle_caption": false, + "stop_text_encoder_training": 0, + "train_batch_size": 4, + "train_data_dir": "./test/img", + "use_wandb": false, + "v2": false, + "v_parameterization": false, + "vae": "", + "vae_batch_size": 0, + "wandb_api_key": "", + "weighted_captions": false, + "xformers": true +} \ No newline at end of file diff --git a/test/config/dreambooth-DAdaptAdam.json b/test/config/dreambooth-DAdaptAdam.json new file mode 100644 index 000000000..de04bef1d --- /dev/null +++ b/test/config/dreambooth-DAdaptAdam.json @@ -0,0 +1,73 @@ +{ + "adaptive_noise_scale": 0, + "additional_parameters": "", + "bucket_no_upscale": true, + "bucket_reso_steps": 1, + "cache_latents": true, + "cache_latents_to_disk": false, + "caption_dropout_every_n_epochs": 0.0, + "caption_dropout_rate": 0, + "caption_extension": "", + "clip_skip": 2, + "color_aug": false, + "enable_bucket": true, + "epoch": 1, + "flip_aug": false, + "full_fp16": false, + "gradient_accumulation_steps": 1.0, + "gradient_checkpointing": false, + "keep_tokens": "0", + "learning_rate": 1.0, + "logging_dir": "./test/logs", + "lr_scheduler": "cosine", + "lr_warmup": 0, + "max_data_loader_n_workers": "0", + "max_resolution": "512,512", + "max_token_length": "75", + "max_train_epochs": "", + "mem_eff_attn": false, + "min_snr_gamma": 0, + "mixed_precision": "bf16", + "model_list": "runwayml/stable-diffusion-v1-5", + "multires_noise_discount": 0.2, + "multires_noise_iterations": 8, + "no_token_padding": false, + "noise_offset": "0.05", + "noise_offset_type": "Multires", + "num_cpu_threads_per_process": 2, + "optimizer": "DAdaptAdam", + "optimizer_args": "decouple=True weight_decay=0.6 betas=0.9,0.99 use_bias_correction=True", + "output_dir": "./test/output", + "output_name": "db-DAdaptAdam", + "persistent_data_loader_workers": false, + "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", + "prior_loss_weight": 1.0, + "random_crop": false, + "reg_data_dir": "", + "resume": "", + "sample_every_n_epochs": 0, + "sample_every_n_steps": 25, + "sample_prompts": "a painting of a gas mask , by darius kawasaki", + "sample_sampler": "euler_a", + "save_every_n_epochs": 1, + "save_every_n_steps": 0, + "save_last_n_steps": 0, + "save_last_n_steps_state": 0, + "save_model_as": "safetensors", + "save_precision": "fp16", + "save_state": false, + "scale_v_pred_loss_like_noise_pred": false, + "seed": "1234", + "shuffle_caption": false, + "stop_text_encoder_training": 0, + "train_batch_size": 1, + "train_data_dir": "./test/img", + "use_wandb": false, + "v2": false, + "v_parameterization": false, + "vae": "", + "vae_batch_size": 0, + "wandb_api_key": "", + "weighted_captions": false, + "xformers": true +} \ No newline at end of file diff --git a/test/config/dreambooth-Prodigy.json b/test/config/dreambooth-Prodigy.json new file mode 100644 index 000000000..0bbc337ae --- /dev/null +++ b/test/config/dreambooth-Prodigy.json @@ -0,0 +1,73 @@ +{ + "adaptive_noise_scale": 0, + "additional_parameters": "", + "bucket_no_upscale": true, + "bucket_reso_steps": 1, + "cache_latents": true, + "cache_latents_to_disk": false, + "caption_dropout_every_n_epochs": 0.0, + "caption_dropout_rate": 0, + "caption_extension": "", + "clip_skip": 2, + "color_aug": false, + "enable_bucket": true, + "epoch": 1, + "flip_aug": false, + "full_fp16": false, + "gradient_accumulation_steps": 1.0, + "gradient_checkpointing": false, + "keep_tokens": "0", + "learning_rate": 1.0, + "logging_dir": "./test/logs", + "lr_scheduler": "cosine", + "lr_warmup": 0, + "max_data_loader_n_workers": "0", + "max_resolution": "512,512", + "max_token_length": "75", + "max_train_epochs": "", + "mem_eff_attn": false, + "min_snr_gamma": 0, + "mixed_precision": "bf16", + "model_list": "runwayml/stable-diffusion-v1-5", + "multires_noise_discount": 0.2, + "multires_noise_iterations": 8, + "no_token_padding": false, + "noise_offset": "0.05", + "noise_offset_type": "Multires", + "num_cpu_threads_per_process": 2, + "optimizer": "Prodigy", + "optimizer_args": "decouple=True weight_decay=0.6 betas=0.9,0.99 use_bias_correction=True", + "output_dir": "./test/output", + "output_name": "db-Prodigy", + "persistent_data_loader_workers": false, + "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", + "prior_loss_weight": 1.0, + "random_crop": false, + "reg_data_dir": "", + "resume": "", + "sample_every_n_epochs": 0, + "sample_every_n_steps": 25, + "sample_prompts": "a painting of a gas mask , by darius kawasaki", + "sample_sampler": "euler_a", + "save_every_n_epochs": 1, + "save_every_n_steps": 0, + "save_last_n_steps": 0, + "save_last_n_steps_state": 0, + "save_model_as": "safetensors", + "save_precision": "fp16", + "save_state": false, + "scale_v_pred_loss_like_noise_pred": false, + "seed": "1234", + "shuffle_caption": false, + "stop_text_encoder_training": 0, + "train_batch_size": 1, + "train_data_dir": "./test/img", + "use_wandb": false, + "v2": false, + "v_parameterization": false, + "vae": "", + "vae_batch_size": 0, + "wandb_api_key": "", + "weighted_captions": false, + "xformers": true +} \ No newline at end of file diff --git a/test/config/dreambooth.json b/test/config/dreambooth.json new file mode 100644 index 000000000..5fae2f9d1 --- /dev/null +++ b/test/config/dreambooth.json @@ -0,0 +1,73 @@ +{ + "adaptive_noise_scale": 0, + "additional_parameters": "", + "bucket_no_upscale": true, + "bucket_reso_steps": 64, + "cache_latents": true, + "cache_latents_to_disk": false, + "caption_dropout_every_n_epochs": 0.0, + "caption_dropout_rate": 0.05, + "caption_extension": "", + "clip_skip": 2, + "color_aug": false, + "enable_bucket": true, + "epoch": 1, + "flip_aug": false, + "full_fp16": false, + "gradient_accumulation_steps": 1.0, + "gradient_checkpointing": false, + "keep_tokens": "0", + "learning_rate": 1.0, + "logging_dir": "./test/logs", + "lr_scheduler": "constant", + "lr_warmup": 0, + "max_data_loader_n_workers": "0", + "max_resolution": "512,512", + "max_token_length": "75", + "max_train_epochs": "", + "mem_eff_attn": false, + "min_snr_gamma": 0, + "mixed_precision": "bf16", + "model_list": "runwayml/stable-diffusion-v1-5", + "multires_noise_discount": 0, + "multires_noise_iterations": 0, + "no_token_padding": false, + "noise_offset": "0.05", + "noise_offset_type": "Original", + "num_cpu_threads_per_process": 2, + "optimizer": "DAdaptation", + "optimizer_args": "", + "output_dir": "./test/output", + "output_name": "db", + "persistent_data_loader_workers": false, + "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", + "prior_loss_weight": 1.0, + "random_crop": false, + "reg_data_dir": "", + "resume": "", + "sample_every_n_epochs": 0, + "sample_every_n_steps": 25, + "sample_prompts": "a painting of a gas mask , by darius kawasaki", + "sample_sampler": "euler_a", + "save_every_n_epochs": 1, + "save_every_n_steps": 0, + "save_last_n_steps": 0, + "save_last_n_steps_state": 0, + "save_model_as": "safetensors", + "save_precision": "fp16", + "save_state": false, + "scale_v_pred_loss_like_noise_pred": false, + "seed": "1234", + "shuffle_caption": false, + "stop_text_encoder_training": 0, + "train_batch_size": 4, + "train_data_dir": "./test/img", + "use_wandb": false, + "v2": false, + "v_parameterization": false, + "vae": "", + "vae_batch_size": 0, + "wandb_api_key": "", + "weighted_captions": false, + "xformers": true +} \ No newline at end of file diff --git a/test/config/locon-AdamW.json b/test/config/locon-AdamW.json new file mode 100644 index 000000000..7703464b7 --- /dev/null +++ b/test/config/locon-AdamW.json @@ -0,0 +1,101 @@ +{ + "LoRA_type": "Kohya LoCon", + "adaptive_noise_scale": 0, + "additional_parameters": "", + "block_alphas": "", + "block_dims": "", + "block_lr_zero_threshold": "", + "bucket_no_upscale": true, + "bucket_reso_steps": 64, + "cache_latents": true, + "cache_latents_to_disk": false, + "caption_dropout_every_n_epochs": 0.0, + "caption_dropout_rate": 0.05, + "caption_extension": "", + "clip_skip": 2, + "color_aug": false, + "conv_alpha": 8, + "conv_alphas": "", + "conv_dim": 16, + "conv_dims": "", + "decompose_both": false, + "dim_from_weights": false, + "down_lr_weight": "", + "enable_bucket": true, + "epoch": 1, + "factor": -1, + "flip_aug": false, + "full_fp16": false, + "gradient_accumulation_steps": 1, + "gradient_checkpointing": false, + "keep_tokens": "0", + "learning_rate": 0.0005, + "logging_dir": "./test/logs", + "lora_network_weights": "", + "lr_scheduler": "constant", + "lr_scheduler_num_cycles": "", + "lr_scheduler_power": "", + "lr_warmup": 0, + "max_data_loader_n_workers": "0", + "max_resolution": "512,512", + "max_token_length": "75", + "max_train_epochs": "", + "mem_eff_attn": false, + "mid_lr_weight": "", + "min_snr_gamma": 0, + "mixed_precision": "bf16", + "model_list": "runwayml/stable-diffusion-v1-5", + "module_dropout": 0.1, + "multires_noise_discount": 0, + "multires_noise_iterations": 0, + "network_alpha": 8, + "network_dim": 16, + "network_dropout": 0.1, + "no_token_padding": false, + "noise_offset": "0.05", + "noise_offset_type": "Original", + "num_cpu_threads_per_process": 2, + "optimizer": "AdamW", + "optimizer_args": "", + "output_dir": "./test/output", + "output_name": "locon-AdamW", + "persistent_data_loader_workers": false, + "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", + "prior_loss_weight": 1.0, + "random_crop": false, + "rank_dropout": 0.1, + "reg_data_dir": "", + "resume": "", + "sample_every_n_epochs": 0, + "sample_every_n_steps": 25, + "sample_prompts": "a painting of a gas mask , by darius kawasaki", + "sample_sampler": "euler_a", + "save_every_n_epochs": 1, + "save_every_n_steps": 0, + "save_last_n_steps": 0, + "save_last_n_steps_state": 0, + "save_model_as": "safetensors", + "save_precision": "fp16", + "save_state": false, + "scale_v_pred_loss_like_noise_pred": false, + "scale_weight_norms": 1, + "seed": "1234", + "shuffle_caption": false, + "stop_text_encoder_training": 0, + "text_encoder_lr": 0.0001, + "train_batch_size": 4, + "train_data_dir": "./test/img", + "train_on_input": false, + "training_comment": "", + "unet_lr": 0.0001, + "unit": 1, + "up_lr_weight": "", + "use_cp": false, + "use_wandb": false, + "v2": false, + "v_parameterization": false, + "vae_batch_size": 0, + "wandb_api_key": "", + "weighted_captions": false, + "xformers": true +} \ No newline at end of file diff --git a/test/config/locon-AdamW8bit.json b/test/config/locon-AdamW8bit.json new file mode 100644 index 000000000..1c1666ef3 --- /dev/null +++ b/test/config/locon-AdamW8bit.json @@ -0,0 +1,101 @@ +{ + "LoRA_type": "Kohya LoCon", + "adaptive_noise_scale": 0, + "additional_parameters": "", + "block_alphas": "", + "block_dims": "", + "block_lr_zero_threshold": "", + "bucket_no_upscale": true, + "bucket_reso_steps": 64, + "cache_latents": true, + "cache_latents_to_disk": false, + "caption_dropout_every_n_epochs": 0.0, + "caption_dropout_rate": 0.05, + "caption_extension": "", + "clip_skip": 2, + "color_aug": false, + "conv_alpha": 8, + "conv_alphas": "", + "conv_dim": 16, + "conv_dims": "", + "decompose_both": false, + "dim_from_weights": false, + "down_lr_weight": "", + "enable_bucket": true, + "epoch": 1, + "factor": -1, + "flip_aug": false, + "full_fp16": false, + "gradient_accumulation_steps": 1, + "gradient_checkpointing": false, + "keep_tokens": "0", + "learning_rate": 0.0005, + "logging_dir": "./test/logs", + "lora_network_weights": "", + "lr_scheduler": "constant", + "lr_scheduler_num_cycles": "", + "lr_scheduler_power": "", + "lr_warmup": 0, + "max_data_loader_n_workers": "0", + "max_resolution": "512,512", + "max_token_length": "75", + "max_train_epochs": "", + "mem_eff_attn": false, + "mid_lr_weight": "", + "min_snr_gamma": 0, + "mixed_precision": "bf16", + "model_list": "runwayml/stable-diffusion-v1-5", + "module_dropout": 0.1, + "multires_noise_discount": 0, + "multires_noise_iterations": 0, + "network_alpha": 8, + "network_dim": 16, + "network_dropout": 0.1, + "no_token_padding": false, + "noise_offset": "0.05", + "noise_offset_type": "Original", + "num_cpu_threads_per_process": 2, + "optimizer": "AdamW8bit", + "optimizer_args": "", + "output_dir": "./test/output", + "output_name": "locon-AdamW8bit", + "persistent_data_loader_workers": false, + "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", + "prior_loss_weight": 1.0, + "random_crop": false, + "rank_dropout": 0.1, + "reg_data_dir": "", + "resume": "", + "sample_every_n_epochs": 0, + "sample_every_n_steps": 25, + "sample_prompts": "a painting of a gas mask , by darius kawasaki", + "sample_sampler": "euler_a", + "save_every_n_epochs": 1, + "save_every_n_steps": 0, + "save_last_n_steps": 0, + "save_last_n_steps_state": 0, + "save_model_as": "safetensors", + "save_precision": "fp16", + "save_state": false, + "scale_v_pred_loss_like_noise_pred": false, + "scale_weight_norms": 1, + "seed": "1234", + "shuffle_caption": false, + "stop_text_encoder_training": 0, + "text_encoder_lr": 0.0001, + "train_batch_size": 4, + "train_data_dir": "./test/img", + "train_on_input": false, + "training_comment": "", + "unet_lr": 0.0001, + "unit": 1, + "up_lr_weight": "", + "use_cp": false, + "use_wandb": false, + "v2": false, + "v_parameterization": false, + "vae_batch_size": 0, + "wandb_api_key": "", + "weighted_captions": false, + "xformers": true +} \ No newline at end of file diff --git a/test/config/locon-Prodigy.json b/test/config/locon-Prodigy.json new file mode 100644 index 000000000..cf88d2520 --- /dev/null +++ b/test/config/locon-Prodigy.json @@ -0,0 +1,101 @@ +{ + "LoRA_type": "Kohya LoCon", + "adaptive_noise_scale": 0, + "additional_parameters": "", + "block_alphas": "", + "block_dims": "", + "block_lr_zero_threshold": "", + "bucket_no_upscale": true, + "bucket_reso_steps": 1, + "cache_latents": true, + "cache_latents_to_disk": false, + "caption_dropout_every_n_epochs": 0.0, + "caption_dropout_rate": 0, + "caption_extension": "", + "clip_skip": 2, + "color_aug": false, + "conv_alpha": 8, + "conv_alphas": "", + "conv_dim": 16, + "conv_dims": "", + "decompose_both": false, + "dim_from_weights": false, + "down_lr_weight": "", + "enable_bucket": true, + "epoch": 1, + "factor": -1, + "flip_aug": false, + "full_fp16": false, + "gradient_accumulation_steps": 1, + "gradient_checkpointing": false, + "keep_tokens": "0", + "learning_rate": 1.0, + "logging_dir": "./test/logs", + "lora_network_weights": "", + "lr_scheduler": "cosine", + "lr_scheduler_num_cycles": "", + "lr_scheduler_power": "", + "lr_warmup": 0, + "max_data_loader_n_workers": "0", + "max_resolution": "512,512", + "max_token_length": "75", + "max_train_epochs": "", + "mem_eff_attn": false, + "mid_lr_weight": "", + "min_snr_gamma": 10, + "mixed_precision": "bf16", + "model_list": "runwayml/stable-diffusion-v1-5", + "module_dropout": 0.1, + "multires_noise_discount": 0.2, + "multires_noise_iterations": 8, + "network_alpha": 8, + "network_dim": 16, + "network_dropout": 0.1, + "no_token_padding": false, + "noise_offset": "0.05", + "noise_offset_type": "Multires", + "num_cpu_threads_per_process": 2, + "optimizer": "Prodigy", + "optimizer_args": "decouple=True weight_decay=0.6 betas=0.9,0.99 use_bias_correction=True", + "output_dir": "./test/output", + "output_name": "locon-Prodigy", + "persistent_data_loader_workers": false, + "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", + "prior_loss_weight": 1.0, + "random_crop": false, + "rank_dropout": 0.1, + "reg_data_dir": "", + "resume": "", + "sample_every_n_epochs": 0, + "sample_every_n_steps": 25, + "sample_prompts": "a painting of a gas mask , by darius kawasaki", + "sample_sampler": "euler_a", + "save_every_n_epochs": 1, + "save_every_n_steps": 0, + "save_last_n_steps": 0, + "save_last_n_steps_state": 0, + "save_model_as": "safetensors", + "save_precision": "fp16", + "save_state": false, + "scale_v_pred_loss_like_noise_pred": false, + "scale_weight_norms": 1, + "seed": "1234", + "shuffle_caption": false, + "stop_text_encoder_training": 0, + "text_encoder_lr": 1.0, + "train_batch_size": 4, + "train_data_dir": "./test/img", + "train_on_input": false, + "training_comment": "", + "unet_lr": 1.0, + "unit": 1, + "up_lr_weight": "", + "use_cp": true, + "use_wandb": false, + "v2": false, + "v_parameterization": false, + "vae_batch_size": 0, + "wandb_api_key": "", + "weighted_captions": false, + "xformers": true +} \ No newline at end of file diff --git a/test/config/loha-Prodigy.json b/test/config/loha-Prodigy.json new file mode 100644 index 000000000..41f30bfaf --- /dev/null +++ b/test/config/loha-Prodigy.json @@ -0,0 +1,101 @@ +{ + "LoRA_type": "LyCORIS/LoHa", + "adaptive_noise_scale": 0, + "additional_parameters": "", + "block_alphas": "", + "block_dims": "", + "block_lr_zero_threshold": "", + "bucket_no_upscale": true, + "bucket_reso_steps": 1, + "cache_latents": true, + "cache_latents_to_disk": false, + "caption_dropout_every_n_epochs": 0.0, + "caption_dropout_rate": 0, + "caption_extension": "", + "clip_skip": 2, + "color_aug": false, + "conv_alpha": 8, + "conv_alphas": "", + "conv_dim": 16, + "conv_dims": "", + "decompose_both": false, + "dim_from_weights": false, + "down_lr_weight": "", + "enable_bucket": true, + "epoch": 1, + "factor": -1, + "flip_aug": false, + "full_fp16": false, + "gradient_accumulation_steps": 1, + "gradient_checkpointing": false, + "keep_tokens": "0", + "learning_rate": 1.0, + "logging_dir": "./test/logs", + "lora_network_weights": "", + "lr_scheduler": "cosine", + "lr_scheduler_num_cycles": "", + "lr_scheduler_power": "", + "lr_warmup": 0, + "max_data_loader_n_workers": "0", + "max_resolution": "512,512", + "max_token_length": "75", + "max_train_epochs": "", + "mem_eff_attn": false, + "mid_lr_weight": "", + "min_snr_gamma": 10, + "mixed_precision": "bf16", + "model_list": "runwayml/stable-diffusion-v1-5", + "module_dropout": 0.1, + "multires_noise_discount": 0.2, + "multires_noise_iterations": 8, + "network_alpha": 8, + "network_dim": 16, + "network_dropout": 0.1, + "no_token_padding": false, + "noise_offset": "0.05", + "noise_offset_type": "Multires", + "num_cpu_threads_per_process": 2, + "optimizer": "Prodigy", + "optimizer_args": "decouple=True weight_decay=0.6 betas=0.9,0.99 use_bias_correction=True", + "output_dir": "./test/output", + "output_name": "loha-Prodigy", + "persistent_data_loader_workers": false, + "pretrained_model_name_or_path": "runwayml/stable-diffusion-v1-5", + "prior_loss_weight": 1.0, + "random_crop": false, + "rank_dropout": 0.1, + "reg_data_dir": "", + "resume": "", + "sample_every_n_epochs": 0, + "sample_every_n_steps": 25, + "sample_prompts": "a painting of a gas mask , by darius kawasaki", + "sample_sampler": "euler_a", + "save_every_n_epochs": 1, + "save_every_n_steps": 0, + "save_last_n_steps": 0, + "save_last_n_steps_state": 0, + "save_model_as": "safetensors", + "save_precision": "fp16", + "save_state": false, + "scale_v_pred_loss_like_noise_pred": false, + "scale_weight_norms": 1, + "seed": "1234", + "shuffle_caption": false, + "stop_text_encoder_training": 0, + "text_encoder_lr": 1.0, + "train_batch_size": 4, + "train_data_dir": "./test/img", + "train_on_input": false, + "training_comment": "", + "unet_lr": 1.0, + "unit": 1, + "up_lr_weight": "", + "use_cp": true, + "use_wandb": false, + "v2": false, + "v_parameterization": false, + "vae_batch_size": 0, + "wandb_api_key": "", + "weighted_captions": false, + "xformers": true +} \ No newline at end of file diff --git a/tools/cudann_1.8_install.py b/tools/cudann_1.8_install.py deleted file mode 100644 index 04f96fd52..000000000 --- a/tools/cudann_1.8_install.py +++ /dev/null @@ -1,26 +0,0 @@ -import filecmp -import os -import shutil -import sys -import sysconfig - -# Check for "different" B&B Files and copy only if necessary -if os.name == "nt": - python = sys.executable - cudnn_src = os.path.join(os.path.dirname(os.path.realpath(__file__)), "..\cudnn_windows") - cudnn_dest = os.path.join(sysconfig.get_paths()["purelib"], "torch", "lib") - - print(f"Checking for CUDNN files in {cudnn_dest}") - if os.path.exists(cudnn_src): - if os.path.exists(cudnn_dest): - # check for different files - filecmp.clear_cache() - for file in os.listdir(cudnn_src): - src_file = os.path.join(cudnn_src, file) - dest_file = os.path.join(cudnn_dest, file) - #if dest file exists, check if it's different - if os.path.exists(dest_file): - shutil.copy2(src_file, cudnn_dest) - print("Copied CUDNN 8.6 files to destination") - else: - print(f"Installation Failed: \"{cudnn_src}\" could not be found. ") diff --git a/tools/group_images.py b/tools/group_images.py index 6ee4a8d6a..8ca1f18af 100644 --- a/tools/group_images.py +++ b/tools/group_images.py @@ -20,7 +20,7 @@ def __init__(self, input_folder, output_folder, group_size, include_subfolders, self.pad = pad self.caption = caption self.caption_ext = caption_ext - self.image_extensions = ('.png', '.jpg', '.jpeg', '.gif', '.webp') + self.image_extensions = ('.png', '.jpg', '.jpeg', '.gif', '.webp', '.tiff') def get_image_paths(self): images = [] diff --git a/tools/validate_requirements.py b/tools/validate_requirements.py deleted file mode 100644 index 4117119d5..000000000 --- a/tools/validate_requirements.py +++ /dev/null @@ -1,122 +0,0 @@ -import os -import re -import sys -import shutil -import argparse -from setup_windows import install, check_repo_version - -# Get the absolute path of the current file's directory (Kohua_SS project directory) -project_directory = os.path.dirname(os.path.abspath(__file__)) - -# Check if the "tools" directory is present in the project_directory -if "tools" in project_directory: - # If the "tools" directory is present, move one level up to the parent directory - project_directory = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - -# Add the project directory to the beginning of the Python search path -sys.path.insert(0, project_directory) - -from library.custom_logging import setup_logging - -# Set up logging -log = setup_logging() - - -def check_torch(): - # Check for nVidia toolkit or AMD toolkit - if shutil.which('nvidia-smi') is not None or os.path.exists( - os.path.join( - os.environ.get('SystemRoot') or r'C:\Windows', - 'System32', - 'nvidia-smi.exe', - ) - ): - log.info('nVidia toolkit detected') - elif shutil.which('rocminfo') is not None or os.path.exists( - '/opt/rocm/bin/rocminfo' - ): - log.info('AMD toolkit detected') - else: - log.info('Using CPU-only Torch') - - try: - import torch - - log.info(f'Torch {torch.__version__}') - - # Check if CUDA is available - if not torch.cuda.is_available(): - log.warning('Torch reports CUDA not available') - else: - if torch.version.cuda: - # Log nVidia CUDA and cuDNN versions - log.info( - f'Torch backend: nVidia CUDA {torch.version.cuda} cuDNN {torch.backends.cudnn.version() if torch.backends.cudnn.is_available() else "N/A"}' - ) - elif torch.version.hip: - # Log AMD ROCm HIP version - log.info(f'Torch backend: AMD ROCm HIP {torch.version.hip}') - else: - log.warning('Unknown Torch backend') - - # Log information about detected GPUs - for device in [ - torch.cuda.device(i) for i in range(torch.cuda.device_count()) - ]: - log.info( - f'Torch detected GPU: {torch.cuda.get_device_name(device)} VRAM {round(torch.cuda.get_device_properties(device).total_memory / 1024 / 1024)} Arch {torch.cuda.get_device_capability(device)} Cores {torch.cuda.get_device_properties(device).multi_processor_count}' - ) - return int(torch.__version__[0]) - except Exception as e: - log.error(f'Could not load torch: {e}') - sys.exit(1) - - -def install_requirements(requirements_file): - log.info('Verifying requirements') - with open(requirements_file, 'r', encoding='utf8') as f: - # Read lines from the requirements file, strip whitespace, and filter out empty lines, comments, and lines starting with '.' - lines = [ - line.strip() - for line in f.readlines() - if line.strip() != '' - and not line.startswith('#') - and line is not None - and not line.startswith('.') - ] - - # Iterate over each line and install the requirements - for line in lines: - # Remove brackets and their contents from the line using regular expressions - # eg diffusers[torch]==0.10.2 becomes diffusers==0.10.2 - package_name = re.sub(r'\[.*?\]', '', line) - install(line, package_name) - - -def main(): - check_repo_version() - # Parse command line arguments - parser = argparse.ArgumentParser( - description='Validate that requirements are satisfied.' - ) - parser.add_argument( - '-r', - '--requirements', - type=str, - help='Path to the requirements file.', - ) - parser.add_argument('--debug', action='store_true', help='Debug on') - args = parser.parse_args() - - if not args.requirements: - # Check Torch - if check_torch() == 1: - install_requirements('requirements_windows_torch1.txt') - else: - install_requirements('requirements_windows_torch2.txt') - else: - install_requirements(args.requirements) - - -if __name__ == '__main__': - main() diff --git a/upgrade.bat b/upgrade.bat index 0fdf50360..3dd329145 100644 --- a/upgrade.bat +++ b/upgrade.bat @@ -13,4 +13,4 @@ git pull call .\venv\Scripts\activate.bat :: Validate requirements -python.exe .\tools\validate_requirements.py +python.exe .\setup\validate_requirements.py diff --git a/upgrade.sh b/upgrade.sh deleted file mode 100644 index 6c2f39f41..000000000 --- a/upgrade.sh +++ /dev/null @@ -1,16 +0,0 @@ -#!/bin/bash - -# Check if there are any changes that need to be committed -if git status --short | grep -q "^[^ ?][^?]*"; then - echo "There are changes that need to be committed. Please stash or undo your changes before running this script." - exit 1 -fi - -# Pull the latest changes from the remote repository -git pull - -# Activate the virtual environment -source venv/bin/activate - -# Upgrade the required packages -pip install --use-pep517 --upgrade -r requirements_unix.txt