diff --git a/README.md b/README.md index 20e9ba52..8f51a72b 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,7 @@ https://invoke-ai.github.io/invoke-training/ - DreamBooth LoRA - Textual Inversion - Stable Diffusion XL + - Full finetuning - LoRA - DreamBooth LoRA - Textual Inversion diff --git a/docs/tutorials/stable_diffusion/robocats_finetune_sdxl.md b/docs/tutorials/stable_diffusion/robocats_finetune_sdxl.md index de529262..fa8287be 100644 --- a/docs/tutorials/stable_diffusion/robocats_finetune_sdxl.md +++ b/docs/tutorials/stable_diffusion/robocats_finetune_sdxl.md @@ -124,7 +124,7 @@ python src/invoke_training/scripts/_experimental/lora_extraction/extract_lora_fr --lora-rank 32 ``` -## 5 - Import into InvokeAI +## 6 - Import into InvokeAI If you haven't already, setup [InvokeAI](https://github.com/invoke-ai/InvokeAI) by following its documentation. @@ -132,7 +132,7 @@ Import your finetuned diffusers model or your extracted LoRA from the 'Models' t Congratulations, you can now use your new robocat model! 🎉 -## 6 - Comparison: Finetune vs. LoRA Extraction +## 7 - Comparison: Finetune vs. LoRA Extraction As noted earlier, the LoRA extraction process is lossy for a number of reasons. (There are some significant improvements coming to the LoRA extraction process soon to improve the fidelity of the extracted LoRA model.) diff --git a/src/invoke_training/pipelines/stable_diffusion_xl/finetune/train.py b/src/invoke_training/pipelines/stable_diffusion_xl/finetune/train.py index b6e4e04d..7370f1b2 100644 --- a/src/invoke_training/pipelines/stable_diffusion_xl/finetune/train.py +++ b/src/invoke_training/pipelines/stable_diffusion_xl/finetune/train.py @@ -131,6 +131,7 @@ def train(config: SdxlFinetuneConfig, callbacks: list[PipelineCallbacks] | None logger.info("Loading models.") tokenizer_1, tokenizer_2, noise_scheduler, text_encoder_1, text_encoder_2, vae, unet = load_models_sdxl( + logger=logger, model_name_or_path=config.model, hf_variant=config.hf_variant, vae_model=config.vae_model,