Skip to content

Commit

Permalink
feat(T2I): add Black Forrest Labs Flux 1 support (#147)
Browse files Browse the repository at this point in the history
This commit adds support for the [Black Forrest Labs Flux 1 Schnell](https://huggingface.co/black-forest-labs/FLUX.1-schnell) model to the T2I pipeline. It is important to note that this model can only run on GPUs with more than 33 GB or VRAM.
  • Loading branch information
rickstaa authored Aug 7, 2024
1 parent d2df29a commit b512eb6
Show file tree
Hide file tree
Showing 3 changed files with 21 additions and 2 deletions.
9 changes: 8 additions & 1 deletion runner/app/pipelines/text_to_image.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
is_turbo_model, split_prompt)
from diffusers import (AutoPipelineForText2Image, EulerDiscreteScheduler,
StableDiffusion3Pipeline, StableDiffusionXLPipeline,
UNet2DConditionModel)
UNet2DConditionModel, FluxPipeline)
from diffusers.models import AutoencoderKL
from huggingface_hub import file_download, hf_hub_download
from safetensors.torch import load_file
Expand All @@ -25,6 +25,7 @@ class ModelName(Enum):
SDXL_LIGHTNING = "ByteDance/SDXL-Lightning"
SD3_MEDIUM = "stabilityai/stable-diffusion-3-medium-diffusers"
REALISTIC_VISION_V6 = "SG161222/Realistic_Vision_V6.0_B1_noVAE"
FLUX_1_SCHNELL = "black-forest-labs/FLUX.1-schnell"

@classmethod
def list(cls):
Expand Down Expand Up @@ -110,6 +111,12 @@ def __init__(self, model_id: str):
self.ldm = StableDiffusion3Pipeline.from_pretrained(model_id, **kwargs).to(
torch_device
)
elif ModelName.FLUX_1_SCHNELL.value in model_id:
# Decrease precision to preven OOM errors.
kwargs["torch_dtype"] = torch.bfloat16
self.ldm = FluxPipeline.from_pretrained(model_id, **kwargs).to(
torch_device
)
else:
self.ldm = AutoPipelineForText2Image.from_pretrained(model_id, **kwargs).to(
torch_device
Expand Down
2 changes: 1 addition & 1 deletion runner/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
diffusers==0.29.2
diffusers==0.30.0
accelerate==0.30.1
transformers==4.41.1
fastapi==0.111.0
Expand Down
12 changes: 12 additions & 0 deletions runner/test_prompts.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,12 @@
from app.pipelines.utils.utils import split_prompt

if __name__ == "__main__":
input_prompt = "A photo of a cat.|"
test = split_prompt(input_prompt)

input_prompt2 = ""
test2 = split_prompt(input_prompt2)

input_pormpt3 = "A photo of a cat.|A photo of a dog.|A photo of a bird."
test3 = split_prompt(input_pormpt3)

0 comments on commit b512eb6

Please sign in to comment.