-
Notifications
You must be signed in to change notification settings - Fork 23
/
engine.py
166 lines (129 loc) · 6.33 KB
/
engine.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
import math
import sys
import warnings
from typing import Iterable, Optional
import torch
from torch.utils.tensorboard import SummaryWriter
from timm.data import Mixup
from timm.utils import accuracy, ModelEma
import utils.deit_util as utils
from utils import AverageMeter, to_device
def train_one_epoch(data_loader: Iterable,
model: torch.nn.Module,
criterion: torch.nn.Module,
optimizer: torch.optim.Optimizer,
epoch: int,
device: torch.device,
loss_scaler = None,
fp16: bool = False,
max_norm: float = 0, # clip_grad
model_ema: Optional[ModelEma] = None,
mixup_fn: Optional[Mixup] = None,
writer: Optional[SummaryWriter] = None,
set_training_mode=True):
global_step = epoch * len(data_loader)
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}'))
metric_logger.add_meter('n_ways', utils.SmoothedValue(window_size=1, fmt='{value:d}'))
metric_logger.add_meter('n_imgs', utils.SmoothedValue(window_size=1, fmt='{value:d}'))
header = 'Epoch: [{}]'.format(epoch)
print_freq = 10
model.train(set_training_mode)
for batch in metric_logger.log_every(data_loader, print_freq, header):
batch = to_device(batch, device)
SupportTensor, SupportLabel, x, y = batch
if mixup_fn is not None:
x, y = mixup_fn(x, y)
# forward
with torch.cuda.amp.autocast(fp16):
output = model(SupportTensor, SupportLabel, x)
output = output.view(x.shape[0] * x.shape[1], -1)
y = y.view(-1)
loss = criterion(output, y)
loss_value = loss.item()
if not math.isfinite(loss_value):
print("Loss is {}, stopping training".format(loss_value))
sys.exit(1)
optimizer.zero_grad()
if fp16:
# this attribute is added by timm on one optimizer (adahessian)
is_second_order = hasattr(optimizer, 'is_second_order') and optimizer.is_second_order
loss_scaler(loss, optimizer, clip_grad=max_norm,
parameters=model.parameters(), create_graph=is_second_order)
else:
loss.backward()
optimizer.step()
torch.cuda.synchronize()
if model_ema is not None:
model_ema.update(model)
lr = optimizer.param_groups[0]["lr"]
metric_logger.update(loss=loss_value)
metric_logger.update(lr=lr)
metric_logger.update(n_ways=SupportLabel.max()+1)
metric_logger.update(n_imgs=SupportTensor.shape[1] + x.shape[1])
# tensorboard
if utils.is_main_process() and global_step % print_freq == 0:
writer.add_scalar("train/loss", scalar_value=loss_value, global_step=global_step)
writer.add_scalar("train/lr", scalar_value=lr, global_step=global_step)
global_step += 1
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print("Averaged stats:", metric_logger)
return {k: meter.global_avg for k, meter in metric_logger.meters.items()}
def evaluate(data_loaders, model, criterion, device, seed=None, ep=None):
if isinstance(data_loaders, dict):
test_stats_lst = {}
test_stats_glb = {}
for j, (source, data_loader) in enumerate(data_loaders.items()):
print(f'* Evaluating {source}:')
seed_j = seed + j if seed else None
test_stats = _evaluate(data_loader, model, criterion, device, seed_j)
test_stats_lst[source] = test_stats
test_stats_glb[source] = test_stats['acc1']
# apart from individual's acc1, accumulate metrics over all domains to compute mean
for k in test_stats_lst[source].keys():
test_stats_glb[k] = torch.tensor([test_stats[k] for test_stats in test_stats_lst.values()]).mean().item()
return test_stats_glb
elif isinstance(data_loaders, torch.utils.data.DataLoader): # when args.eval = True
return _evaluate(data_loaders, model, criterion, device, seed, ep)
else:
warnings.warn(f'The structure of {data_loaders} is not recognizable.')
return _evaluate(data_loaders, model, criterion, device, seed)
@torch.no_grad()
def _evaluate(data_loader, model, criterion, device, seed=None, ep=None):
metric_logger = utils.MetricLogger(delimiter=" ")
metric_logger.add_meter('n_ways', utils.SmoothedValue(window_size=1, fmt='{value:d}'))
metric_logger.add_meter('n_imgs', utils.SmoothedValue(window_size=1, fmt='{value:d}'))
metric_logger.add_meter('acc1', utils.SmoothedValue(window_size=len(data_loader.dataset)))
metric_logger.add_meter('acc5', utils.SmoothedValue(window_size=len(data_loader.dataset)))
header = 'Test:'
# switch to evaluation mode
model.eval()
if seed is not None:
data_loader.generator.manual_seed(seed)
for ii, batch in enumerate(metric_logger.log_every(data_loader, 10, header)):
if ep is not None:
if ii > ep:
break
batch = to_device(batch, device)
SupportTensor, SupportLabel, x, y = batch
# compute output
with torch.cuda.amp.autocast():
output = model(SupportTensor, SupportLabel, x)
output = output.view(x.shape[0] * x.shape[1], -1)
y = y.view(-1)
loss = criterion(output, y)
acc1, acc5 = accuracy(output, y, topk=(1, 5))
batch_size = x.shape[0]
metric_logger.update(loss=loss.item())
metric_logger.meters['acc1'].update(acc1.item(), n=batch_size)
metric_logger.meters['acc5'].update(acc5.item(), n=batch_size)
metric_logger.update(n_ways=SupportLabel.max()+1)
metric_logger.update(n_imgs=SupportTensor.shape[1] + x.shape[1])
# gather the stats from all processes
metric_logger.synchronize_between_processes()
print('* Acc@1 {top1.global_avg:.3f} Acc@5 {top5.global_avg:.3f} loss {losses.global_avg:.3f}'
.format(top1=metric_logger.acc1, top5=metric_logger.acc5, losses=metric_logger.loss))
ret_dict = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
ret_dict['acc_std'] = metric_logger.meters['acc1'].std
return ret_dict