Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Reduce peak RAM usage #332

Merged
merged 1 commit into from
Mar 30, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 8 additions & 12 deletions library/model_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -831,7 +831,7 @@ def is_safetensors(path):
return os.path.splitext(path)[1].lower() == '.safetensors'


def load_checkpoint_with_text_encoder_conversion(ckpt_path):
def load_checkpoint_with_text_encoder_conversion(ckpt_path, device):
# text encoderの格納形式が違うモデルに対応する ('text_model'がない)
TEXT_ENCODER_KEY_REPLACEMENTS = [
('cond_stage_model.transformer.embeddings.', 'cond_stage_model.transformer.text_model.embeddings.'),
Expand All @@ -841,9 +841,9 @@ def load_checkpoint_with_text_encoder_conversion(ckpt_path):

if is_safetensors(ckpt_path):
checkpoint = None
state_dict = load_file(ckpt_path, "cpu")
state_dict = load_file(ckpt_path, device)
else:
checkpoint = torch.load(ckpt_path, map_location="cpu")
checkpoint = torch.load(ckpt_path, map_location=device)
if "state_dict" in checkpoint:
state_dict = checkpoint["state_dict"]
else:
Expand All @@ -865,26 +865,22 @@ def load_checkpoint_with_text_encoder_conversion(ckpt_path):


# TODO dtype指定の動作が怪しいので確認する text_encoderを指定形式で作れるか未確認
def load_models_from_stable_diffusion_checkpoint(v2, ckpt_path, dtype=None):
_, state_dict = load_checkpoint_with_text_encoder_conversion(ckpt_path)
if dtype is not None:
for k, v in state_dict.items():
if type(v) is torch.Tensor:
state_dict[k] = v.to(dtype)
def load_models_from_stable_diffusion_checkpoint(v2, ckpt_path, device='cpu', dtype=None):
_, state_dict = load_checkpoint_with_text_encoder_conversion(ckpt_path, device)

# Convert the UNet2DConditionModel model.
unet_config = create_unet_diffusers_config(v2)
converted_unet_checkpoint = convert_ldm_unet_checkpoint(v2, state_dict, unet_config)

unet = UNet2DConditionModel(**unet_config)
unet = UNet2DConditionModel(**unet_config).to(device)
info = unet.load_state_dict(converted_unet_checkpoint)
print("loading u-net:", info)

# Convert the VAE model.
vae_config = create_vae_diffusers_config()
converted_vae_checkpoint = convert_ldm_vae_checkpoint(state_dict, vae_config)

vae = AutoencoderKL(**vae_config)
vae = AutoencoderKL(**vae_config).to(device)
info = vae.load_state_dict(converted_vae_checkpoint)
print("loading vae:", info)

Expand Down Expand Up @@ -918,7 +914,7 @@ def load_models_from_stable_diffusion_checkpoint(v2, ckpt_path, dtype=None):
converted_text_encoder_checkpoint = convert_ldm_clip_checkpoint_v1(state_dict)

logging.set_verbosity_error() # don't show annoying warning
text_model = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14")
text_model = CLIPTextModel.from_pretrained("openai/clip-vit-large-patch14").to(device)
logging.set_verbosity_warning()

info = text_model.load_state_dict(converted_text_encoder_checkpoint)
Expand Down
4 changes: 2 additions & 2 deletions library/train_util.py
Original file line number Diff line number Diff line change
Expand Up @@ -2536,13 +2536,13 @@ def prepare_dtype(args: argparse.Namespace):
return weight_dtype, save_dtype


def load_target_model(args: argparse.Namespace, weight_dtype):
def load_target_model(args: argparse.Namespace, weight_dtype, device='cpu'):
name_or_path = args.pretrained_model_name_or_path
name_or_path = os.readlink(name_or_path) if os.path.islink(name_or_path) else name_or_path
load_stable_diffusion_format = os.path.isfile(name_or_path) # determine SD or Diffusers
if load_stable_diffusion_format:
print("load StableDiffusion checkpoint")
text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, name_or_path)
text_encoder, vae, unet = model_util.load_models_from_stable_diffusion_checkpoint(args.v2, name_or_path, device)
else:
print("load Diffusers pretrained models")
try:
Expand Down
12 changes: 9 additions & 3 deletions train_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,12 +123,18 @@ def train(args):
weight_dtype, save_dtype = train_util.prepare_dtype(args)

# モデルを読み込む
text_encoder, vae, unet, _ = train_util.load_target_model(args, weight_dtype)
for pi in range(accelerator.state.num_processes):
if pi == accelerator.state.local_process_index:
print(f"loading model for process {accelerator.state.local_process_index}/{accelerator.state.num_processes}")
text_encoder, vae, unet, _ = train_util.load_target_model(args, weight_dtype, accelerator.device)
gc.collect()
torch.cuda.empty_cache()
accelerator.wait_for_everyone()

# work on low-ram device
if args.lowram:
text_encoder.to("cuda")
unet.to("cuda")
text_encoder.to(accelerator.device)
unet.to(accelerator.device)

# モデルに xformers とか memory efficient attention を組み込む
train_util.replace_unet_modules(unet, args.mem_eff_attn, args.xformers)
Expand Down