-
Notifications
You must be signed in to change notification settings - Fork 146
/
main.py
252 lines (217 loc) · 11.9 KB
/
main.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
import argparse
import logging
import sys
import os
from configparser import ConfigParser
from torch import optim
from disvae import init_specific_model, Trainer, Evaluator
from disvae.utils.modelIO import save_model, load_model, load_metadata
from disvae.models.losses import LOSSES, RECON_DIST, get_loss_f
from disvae.models.vae import MODELS
from utils.datasets import get_dataloaders, get_img_size, DATASETS
from utils.helpers import (create_safe_directory, get_device, set_seed, get_n_param,
get_config_section, update_namespace_, FormatterNoDuplicate)
from utils.visualize import GifTraversalsTraining
CONFIG_FILE = "hyperparam.ini"
RES_DIR = "results"
LOG_LEVELS = list(logging._levelToName.values())
ADDITIONAL_EXP = ['custom', "debug", "best_celeba", "best_dsprites"]
EXPERIMENTS = ADDITIONAL_EXP + ["{}_{}".format(loss, data)
for loss in LOSSES
for data in DATASETS]
def parse_arguments(args_to_parse):
"""Parse the command line arguments.
Parameters
----------
args_to_parse: list of str
Arguments to parse (splitted on whitespaces).
"""
default_config = get_config_section([CONFIG_FILE], "Custom")
description = "PyTorch implementation and evaluation of disentangled Variational AutoEncoders and metrics."
parser = argparse.ArgumentParser(description=description,
formatter_class=FormatterNoDuplicate)
# General options
general = parser.add_argument_group('General options')
general.add_argument('name', type=str,
help="Name of the model for storing and loading purposes.")
general.add_argument('-L', '--log-level', help="Logging levels.",
default=default_config['log_level'], choices=LOG_LEVELS)
general.add_argument('--no-progress-bar', action='store_true',
default=default_config['no_progress_bar'],
help='Disables progress bar.')
general.add_argument('--no-cuda', action='store_true',
default=default_config['no_cuda'],
help='Disables CUDA training, even when have one.')
general.add_argument('-s', '--seed', type=int, default=default_config['seed'],
help='Random seed. Can be `None` for stochastic behavior.')
# Learning options
training = parser.add_argument_group('Training specific options')
training.add_argument('--checkpoint-every',
type=int, default=default_config['checkpoint_every'],
help='Save a checkpoint of the trained model every n epoch.')
training.add_argument('-d', '--dataset', help="Path to training data.",
default=default_config['dataset'], choices=DATASETS)
training.add_argument('-x', '--experiment',
default=default_config['experiment'], choices=EXPERIMENTS,
help='Predefined experiments to run. If not `custom` this will overwrite some other arguments.')
training.add_argument('-e', '--epochs', type=int,
default=default_config['epochs'],
help='Maximum number of epochs to run for.')
training.add_argument('-b', '--batch-size', type=int,
default=default_config['batch_size'],
help='Batch size for training.')
training.add_argument('--lr', type=float, default=default_config['lr'],
help='Learning rate.')
# Model Options
model = parser.add_argument_group('Model specfic options')
model.add_argument('-m', '--model-type',
default=default_config['model'], choices=MODELS,
help='Type of encoder and decoder to use.')
model.add_argument('-z', '--latent-dim', type=int,
default=default_config['latent_dim'],
help='Dimension of the latent variable.')
model.add_argument('-l', '--loss',
default=default_config['loss'], choices=LOSSES,
help="Type of VAE loss function to use.")
model.add_argument('-r', '--rec-dist', default=default_config['rec_dist'],
choices=RECON_DIST,
help="Form of the likelihood ot use for each pixel.")
model.add_argument('-a', '--reg-anneal', type=float,
default=default_config['reg_anneal'],
help="Number of annealing steps where gradually adding the regularisation. What is annealed is specific to each loss.")
# Loss Specific Options
betaH = parser.add_argument_group('BetaH specific parameters')
betaH.add_argument('--betaH-B', type=float,
default=default_config['betaH_B'],
help="Weight of the KL (beta in the paper).")
betaB = parser.add_argument_group('BetaB specific parameters')
betaB.add_argument('--betaB-initC', type=float,
default=default_config['betaB_initC'],
help="Starting annealed capacity.")
betaB.add_argument('--betaB-finC', type=float,
default=default_config['betaB_finC'],
help="Final annealed capacity.")
betaB.add_argument('--betaB-G', type=float,
default=default_config['betaB_G'],
help="Weight of the KL divergence term (gamma in the paper).")
factor = parser.add_argument_group('factor VAE specific parameters')
factor.add_argument('--factor-G', type=float,
default=default_config['factor_G'],
help="Weight of the TC term (gamma in the paper).")
factor.add_argument('--lr-disc', type=float,
default=default_config['lr_disc'],
help='Learning rate of the discriminator.')
btcvae = parser.add_argument_group('beta-tcvae specific parameters')
btcvae.add_argument('--btcvae-A', type=float,
default=default_config['btcvae_A'],
help="Weight of the MI term (alpha in the paper).")
btcvae.add_argument('--btcvae-G', type=float,
default=default_config['btcvae_G'],
help="Weight of the dim-wise KL term (gamma in the paper).")
btcvae.add_argument('--btcvae-B', type=float,
default=default_config['btcvae_B'],
help="Weight of the TC term (beta in the paper).")
# Learning options
evaluation = parser.add_argument_group('Evaluation specific options')
evaluation.add_argument('--is-eval-only', action='store_true',
default=default_config['is_eval_only'],
help='Whether to only evaluate using precomputed model `name`.')
evaluation.add_argument('--is-metrics', action='store_true',
default=default_config['is_metrics'],
help="Whether to compute the disentangled metrcics. Currently only possible with `dsprites` as it is the only dataset with known true factors of variations.")
evaluation.add_argument('--no-test', action='store_true',
default=default_config['no_test'],
help="Whether not to compute the test losses.`")
evaluation.add_argument('--eval-batchsize', type=int,
default=default_config['eval_batchsize'],
help='Batch size for evaluation.')
args = parser.parse_args(args_to_parse)
if args.experiment != 'custom':
if args.experiment not in ADDITIONAL_EXP:
# update all common sections first
model, dataset = args.experiment.split("_")
common_data = get_config_section([CONFIG_FILE], "Common_{}".format(dataset))
update_namespace_(args, common_data)
common_model = get_config_section([CONFIG_FILE], "Common_{}".format(model))
update_namespace_(args, common_model)
try:
experiments_config = get_config_section([CONFIG_FILE], args.experiment)
update_namespace_(args, experiments_config)
except KeyError as e:
if args.experiment in ADDITIONAL_EXP:
raise e # only reraise if didn't use common section
return args
def main(args):
"""Main train and evaluation function.
Parameters
----------
args: argparse.Namespace
Arguments
"""
formatter = logging.Formatter('%(asctime)s %(levelname)s - %(funcName)s: %(message)s',
"%H:%M:%S")
logger = logging.getLogger(__name__)
logger.setLevel(args.log_level.upper())
stream = logging.StreamHandler()
stream.setLevel(args.log_level.upper())
stream.setFormatter(formatter)
logger.addHandler(stream)
set_seed(args.seed)
device = get_device(is_gpu=not args.no_cuda)
exp_dir = os.path.join(RES_DIR, args.name)
logger.info("Root directory for saving and loading experiments: {}".format(exp_dir))
if not args.is_eval_only:
create_safe_directory(exp_dir, logger=logger)
if args.loss == "factor":
logger.info("FactorVae needs 2 batches per iteration. To replicate this behavior while being consistent, we double the batch size and the the number of epochs.")
args.batch_size *= 2
args.epochs *= 2
# PREPARES DATA
train_loader = get_dataloaders(args.dataset,
batch_size=args.batch_size,
logger=logger)
logger.info("Train {} with {} samples".format(args.dataset, len(train_loader.dataset)))
# PREPARES MODEL
args.img_size = get_img_size(args.dataset) # stores for metadata
model = init_specific_model(args.model_type, args.img_size, args.latent_dim)
logger.info('Num parameters in model: {}'.format(get_n_param(model)))
# TRAINS
optimizer = optim.Adam(model.parameters(), lr=args.lr)
model = model.to(device) # make sure trainer and viz on same device
gif_visualizer = GifTraversalsTraining(model, args.dataset, exp_dir)
loss_f = get_loss_f(args.loss,
n_data=len(train_loader.dataset),
device=device,
**vars(args))
trainer = Trainer(model, optimizer, loss_f,
device=device,
logger=logger,
save_dir=exp_dir,
is_progress_bar=not args.no_progress_bar,
gif_visualizer=gif_visualizer)
trainer(train_loader,
epochs=args.epochs,
checkpoint_every=args.checkpoint_every,)
# SAVE MODEL AND EXPERIMENT INFORMATION
save_model(trainer.model, exp_dir, metadata=vars(args))
if args.is_metrics or not args.no_test:
model = load_model(exp_dir, is_gpu=not args.no_cuda)
metadata = load_metadata(exp_dir)
# TO-DO: currently uses train datatset
test_loader = get_dataloaders(metadata["dataset"],
batch_size=args.eval_batchsize,
shuffle=False,
logger=logger)
loss_f = get_loss_f(args.loss,
n_data=len(test_loader.dataset),
device=device,
**vars(args))
evaluator = Evaluator(model, loss_f,
device=device,
logger=logger,
save_dir=exp_dir,
is_progress_bar=not args.no_progress_bar)
evaluator(test_loader, is_metrics=args.is_metrics, is_losses=not args.no_test)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
main(args)