-
Notifications
You must be signed in to change notification settings - Fork 0
/
trainer_mnist.py
133 lines (119 loc) · 5.63 KB
/
trainer_mnist.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
# run as python trainer_minist.py with "observe=False" without sacred logging
# run as python trainer_mnist.py with sacred / neptune logging
# write over config parameters using the command line ie python trainer_mnist.py with "elbo_samples=10"
import os
import torch
import torch.optim as optim
import torchvision
from torch.utils.data import DataLoader
from torchvision import transforms
import numpy as np
from sacred import Experiment
from sacred.observers import FileStorageObserver
from neptune.new.integrations.sacred import NeptuneObserver
import neptune.new as neptune
import ddu_dirty_mnist
from models.mnist import BayesianMnist
ex = Experiment()
@ex.config
def config():
observe = True
if observe:
token = os.environ.get("NEPTUNE_API_TOKEN")
nep_run = neptune.init(api_token=token, project="pgm/mnistbl")
ex.observers.append(FileStorageObserver("sacred_files"))
ex.observers.append(NeptuneObserver(run=nep_run))
print("*****Observing runs*****")
else:
print("*****Not oberving runs*****")
num_epochs = 100
batch_size = 100
elbo_samples = 4
train_samples = None
dirty_mnist = False
mixture_prior = True
lr = 0.01
cuda = True
@ex.automain
def train(elbo_samples: int, batch_size: int, num_epochs: int, lr: float,
cuda: bool, mixture_prior: bool, checkpoint_name: str = None,
train_samples: int = None, dirty_mnist: bool = False):
device = torch.device("cuda" if cuda else "cpu")
device_str = "cuda" if cuda else "cpu"
if dirty_mnist:
train_data = ddu_dirty_mnist.DirtyMNIST("~/data", train=True, download=True,
transform=transforms.Compose([
transforms.Normalize((-0.0651),(0.8897))
]), device=device_str)
test_data = ddu_dirty_mnist.DirtyMNIST("~/data", train=False, download=True,
transform=transforms.Compose([
transforms.Normalize((-0.0651),(0.8897))
]), device=device_str)
else:
train_data = torchvision.datasets.MNIST(root="~/data", train=True,
download=True,
transform=transforms.Compose([
transforms.ToTensor()]))
test_data = torchvision.datasets.MNIST(root="~/data", train=False,
download=True,
transform=transforms.Compose([
transforms.ToTensor()]))
model = BayesianMnist(28 * 28, mixture_prior=mixture_prior).to(device)
optimiser = optim.Adam(model.parameters(), lr=lr)
num_classes = 10
if train_samples != None:
subset = list(range(0, train_samples))
train_data = torch.utils.data.Subset(train_data, subset)
loader_train = DataLoader(train_data, batch_size=batch_size, shuffle=True, drop_last=True)
loader_test = DataLoader(test_data, batch_size=batch_size, shuffle=True, drop_last=True)
num_train_batches = len(loader_train)
num_test_batches = len(loader_test)
for epoch in range(num_epochs):
train_loss = 0
total_accuracy = 0
num_train_batches = len(loader_train)
for x_train, y_train in loader_train:
x_train = x_train.reshape(batch_size, -1).to(device)
y_train = y_train.to(device)
optimiser.zero_grad()
loss, accuracy = model.energy_loss(
x_train, y_train, num_train_batches, num_classes, elbo_samples)
loss.backward()
optimiser.step()
train_loss += loss.item()
total_accuracy += accuracy
epoch_accuracy = total_accuracy / num_train_batches
print("epoch number: {} TRAIN loss: {} ".format(epoch, train_loss))
print("epoch number: {} TRAIN accuracy: {} ".format(epoch, epoch_accuracy))
ex.log_scalar("train loss", loss.item())
ex.log_scalar("train accuracy", epoch_accuracy)
ex.log_scalar("train error", 1 - epoch_accuracy)
test_loss = 0
total_test_accuracy = 0
total_entropy = 0
num_test_batches = len(loader_test)
for x_test, y_test in loader_test:
x_test = x_test.reshape(batch_size, -1).to(device)
y_test = y_test.to(device)
test_loss, _ = model.energy_loss(
x_test, y_test, num_test_batches, num_classes, elbo_samples)
test_loss += test_loss.item()
softmax_averaged, entropy = model.inference(x_test, 10, elbo_samples, batch_size)
pred = softmax_averaged.argmax(axis=1)
accuracy = sum(pred == y_test.to("cpu").numpy())
total_test_accuracy += (accuracy / batch_size)
total_entropy += np.sum(entropy)
average_test_acc = total_test_accuracy/num_test_batches
print(
"epoch number {} TEST loss: {} ".format(epoch, test_loss))
print(
"epoch number {} TEST acc: {} ".format(
epoch, average_test_acc.item()))
print("epoch number {} entropy {}".format(epoch, total_entropy))
ex.log_scalar("test loss", test_loss.item())
ex.log_scalar("test accuracy", average_test_acc)
ex.log_scalar("test error", 1 - average_test_acc)
ex.log_scalar("test entropy", total_entropy )
if checkpoint_name is not None:
torch.save(model.state_dict(), checkpoint_name + ".checkpoint")
nep_run.stop()