Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Upgrade Gradio #273

Merged
merged 1 commit into from
Mar 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 4 additions & 2 deletions library/common_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,6 +355,7 @@ def gradio_source_model():
pretrained_model_name_or_path = gr.Textbox(
label='Pretrained model name or path',
placeholder='enter the path to custom model or name of pretrained model',
value='runwayml/stable-diffusion-v1-5'
)
pretrained_model_name_or_path_file = gr.Button(
document_symbol, elem_id='open_folder_small'
Expand All @@ -373,7 +374,7 @@ def gradio_source_model():
outputs=pretrained_model_name_or_path,
)
model_list = gr.Dropdown(
label='(Optional) Model Quick Pick',
label='Model Quick Pick',
choices=[
'custom',
'stabilityai/stable-diffusion-2-1-base',
Expand All @@ -383,6 +384,7 @@ def gradio_source_model():
'runwayml/stable-diffusion-v1-5',
'CompVis/stable-diffusion-v1-4',
],
value='runwayml/stable-diffusion-v1-5'
)
save_model_as = gr.Dropdown(
label='Save trained model as',
Expand All @@ -397,7 +399,7 @@ def gradio_source_model():
)

with gr.Row():
v2 = gr.Checkbox(label='v2', value=True)
v2 = gr.Checkbox(label='v2', value=False)
v_parameterization = gr.Checkbox(
label='v_parameterization', value=False
)
Expand Down
70 changes: 48 additions & 22 deletions lora_gui.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,7 +123,7 @@ def save_configuration(
caption_dropout_rate,
optimizer,
optimizer_args,noise_offset,
locon=0, conv_dim=0, conv_alpha=0,
LoRA_type='Standard', conv_dim=0, conv_alpha=0,
):
# Get list of function parameters and values
parameters = list(locals().items())
Expand Down Expand Up @@ -231,7 +231,7 @@ def open_configuration(
caption_dropout_rate,
optimizer,
optimizer_args,noise_offset,
locon=0, conv_dim=0, conv_alpha=0,
LoRA_type='Standard', conv_dim=0, conv_alpha=0,
):
# Get list of function parameters and values
parameters = list(locals().items())
Expand All @@ -256,6 +256,12 @@ def open_configuration(
if not key in ['file_path']:
values.append(my_data.get(key, value))

# This next section is about making the LoCon parameters visible if LoRA_type = 'Standard'
if my_data.get('LoRA_type', 'Standard') == 'LoCon':
values.append(gr.Group.update(visible=True))
else:
values.append(gr.Group.update(visible=False))

return tuple(values)


Expand Down Expand Up @@ -319,7 +325,7 @@ def train_model(
caption_dropout_rate,
optimizer,
optimizer_args,noise_offset,
locon, conv_dim, conv_alpha,
LoRA_type, conv_dim, conv_alpha,
):
if pretrained_model_name_or_path == '':
msgbox('Source model information is missing')
Expand Down Expand Up @@ -455,7 +461,7 @@ def train_model(
run_cmd += f' --save_model_as={save_model_as}'
if not float(prior_loss_weight) == 1.0:
run_cmd += f' --prior_loss_weight={prior_loss_weight}'
if locon:
if LoRA_type == 'LoCon':
getlocon(os.path.exists(os.path.join(path_of_this_folder, 'locon')))
run_cmd += f' --network_module=locon.locon.locon_kohya'
run_cmd += f' --network_args "conv_dim={conv_dim}" "conv_alpha={conv_alpha}"'
Expand Down Expand Up @@ -634,6 +640,14 @@ def lora_tab(
)
with gr.Tab('Training parameters'):
with gr.Row():
LoRA_type = gr.Dropdown(
label='LoRA type',
choices=[
'Standard',
'LoCon',
],
value='Standard'
)
lora_network_weights = gr.Textbox(
label='LoRA network weights',
placeholder='{Optional) Path to existing LoRA network weights to resume training',
Expand Down Expand Up @@ -666,6 +680,7 @@ def lora_tab(
lr_scheduler_value='cosine',
lr_warmup_value='10',
)

with gr.Row():
text_encoder_lr = gr.Textbox(
label='Text Encoder learning rate',
Expand Down Expand Up @@ -693,23 +708,17 @@ def lora_tab(
step=1,
interactive=True,
)
with gr.Row():
max_resolution = gr.Textbox(
label='Max resolution',
value='512,512',
placeholder='512,512',
)
stop_text_encoder_training = gr.Slider(
minimum=0,
maximum=100,
value=0,
step=1,
label='Stop text encoder training',
)
enable_bucket = gr.Checkbox(label='Enable buckets', value=True)
with gr.Accordion('Advanced Configuration', open=False):

with gr.Group(visible=False) as LoCon_group:
def LoRA_type_change(LoRA_type):
if LoRA_type == "LoCon":
return gr.Group.update(visible=True)
else:
return gr.Group.update(visible=False)

with gr.Row():
locon= gr.Checkbox(label='Train a LoCon instead of a general LoRA (does not support v2 base models) (may not be able to some utilities now)', value=False)

# locon= gr.Checkbox(label='Train a LoCon instead of a general LoRA (does not support v2 base models) (may not be able to some utilities now)', value=False)
conv_dim = gr.Slider(
minimum=1,
maximum=512,
Expand All @@ -724,6 +733,23 @@ def lora_tab(
step=1,
label='LoCon Convolution Alpha',
)
# Show of hide LoCon conv settings depending on LoRA type selection
LoRA_type.change(LoRA_type_change, inputs=[LoRA_type], outputs=[LoCon_group])
with gr.Row():
max_resolution = gr.Textbox(
label='Max resolution',
value='512,512',
placeholder='512,512',
)
stop_text_encoder_training = gr.Slider(
minimum=0,
maximum=100,
value=0,
step=1,
label='Stop text encoder training',
)
enable_bucket = gr.Checkbox(label='Enable buckets', value=True)
with gr.Accordion('Advanced Configuration', open=False):
with gr.Row():
no_token_padding = gr.Checkbox(
label='No token padding', value=False
Expand Down Expand Up @@ -869,13 +895,13 @@ def lora_tab(
caption_dropout_rate,
optimizer,
optimizer_args,noise_offset,
locon, conv_dim, conv_alpha,
LoRA_type, conv_dim, conv_alpha,
]

button_open_config.click(
open_configuration,
inputs=[config_file_name] + settings_list,
outputs=[config_file_name] + settings_list,
outputs=[config_file_name] + settings_list + [LoCon_group],
)

button_save_config.click(
Expand Down
26 changes: 13 additions & 13 deletions requirements.txt
Original file line number Diff line number Diff line change
@@ -1,28 +1,28 @@
accelerate==0.15.0
transformers==4.26.0
ftfy==6.1.1
albumentations==1.3.0
opencv-python==4.7.0.68
einops==0.6.0
altair==4.2.2
bitsandbytes==0.35.0
dadaptation==1.5
diffusers[torch]==0.10.2
easygui==0.98.3
einops==0.6.0
ftfy==6.1.1
gradio==3.19.1
lion-pytorch==0.0.6
opencv-python==4.7.0.68
pytorch-lightning==1.9.0
bitsandbytes==0.35.0
tensorboard==2.10.1
safetensors==0.2.6
gradio==3.16.2
altair==4.2.2
easygui==0.98.3
tensorboard==2.10.1
tk==0.1.0
lion-pytorch==0.0.6
dadaptation==1.5
transformers==4.26.0
# for BLIP captioning
fairscale==0.4.13
requests==2.28.2
timm==0.6.12
fairscale==0.4.13
# for WD14 captioning
# tensorflow<2.11
tensorflow==2.10.1
huggingface-hub==0.12.0
tensorflow==2.10.1
# xformers @ https://github.com/C43H66N12O12S2/stable-diffusion-webui/releases/download/f/xformers-0.0.14.dev0-cp310-cp310-win_amd64.whl
# for kohya_ss library
.