-
Notifications
You must be signed in to change notification settings - Fork 9
/
temperature_scaling.py
94 lines (77 loc) · 3.22 KB
/
temperature_scaling.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
import torch
from torch import nn, optim
from torch.nn import functional as F
from torch.autograd import Variable
import numpy as np
class ModelWithTemperature(nn.Module):
"""
A thin decorator, which wraps a model with temperature scaling
model (nn.Module):
A classification neural network
NB: Output of the neural network should be the classification logits,
NOT the softmax (or log softmax)!
"""
def __init__(self, model):
super(ModelWithTemperature, self).__init__()
self.model = model
self.temperature = nn.Parameter(torch.ones(1) * 1.5)
self.output = nn.Softmax(dim=-1)
def forward(self, input):
# print('input:', input.shape)
logits = self.model(input)
logits_scaled = self.temperature_scale(logits)
return self.output(logits_scaled)
def temperature_scale(self, logits):
"""
Perform temperature scaling on logits
"""
# Expand temperature to match the size of logits
temperature = self.temperature.unsqueeze(1).expand(logits.size(0), logits.size(1))
return logits / temperature
class ModelWithSoftmax(nn.Module):
"""
Adds the log softmax back to the model
"""
def __init__(self, model):
super(ModelWithSoftmax, self).__init__()
self.model = model
self.output = nn.LogSoftmax(dim=-1)
def forward(self, input):
x = self.model(input)
return self.output(x)
class ECELoss(nn.Module):
"""
Calculates the Expected Calibration Error of a model.
(This isn't necessary for temperature scaling, just a cool metric).
The input to this loss is the logits of a model, NOT the softmax scores.
This divides the confidence outputs into equally-sized interval bins.
In each bin, we compute the confidence gap:
bin_gap = | avg_confidence_in_bin - accuracy_in_bin |
We then return a weighted average of the gaps, based on the number
of samples in each bin
See: Naeini, Mahdi Pakdaman, Gregory F. Cooper, and Milos Hauskrecht.
"Obtaining Well Calibrated Probabilities Using Bayesian Binning." AAAI.
2015.
"""
def __init__(self, n_bins=15):
"""
n_bins (int): number of confidence interval bins
"""
super(ECELoss, self).__init__()
bin_boundaries = torch.linspace(0, 1, n_bins + 1).cuda()
self.bin_lowers = bin_boundaries[:-1]
self.bin_uppers = bin_boundaries[1:]
def forward(self, logits, labels):
softmaxes = F.softmax(logits, dim=-1)
confidences, predictions = torch.max(softmaxes, 1)
accuracies = predictions.eq(labels)
ece = Variable(torch.zeros(1)).type_as(logits)
for bin_lower, bin_upper in zip(self.bin_lowers, self.bin_uppers):
# Calculated |confidence - accuracy| in each bin
in_bin = confidences.gt(bin_lower) * confidences.le(bin_upper)
prop_in_bin = in_bin.float().mean()
if prop_in_bin.data.cpu().numpy() > 0:
accuracy_in_bin = accuracies[in_bin].float().mean()
avg_confidence_in_bin = confidences[in_bin].mean()
ece += torch.abs(avg_confidence_in_bin - accuracy_in_bin) * prop_in_bin
return ece