-
Notifications
You must be signed in to change notification settings - Fork 32
/
classifier32.py
103 lines (86 loc) · 3.08 KB
/
classifier32.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
import torch
from torch import nn
class classifier32(nn.Module):
def __init__(self, num_classes=10, feat_dim=128):
super(self.__class__, self).__init__()
if feat_dim is None:
feat_dim = 128
self.num_classes = num_classes
self.conv1 = nn.Conv2d(3, 64, 3, 1, 1, bias=False)
self.conv2 = nn.Conv2d(64, 64, 3, 1, 1, bias=False)
self.conv3 = nn.Conv2d(64, 128, 3, 2, 1, bias=False)
self.conv4 = nn.Conv2d(128, 128, 3, 1, 1, bias=False)
self.conv5 = nn.Conv2d(128, 128, 3, 1, 1, bias=False)
self.conv6 = nn.Conv2d(128, 128, 3, 2, 1, bias=False)
self.conv7 = nn.Conv2d(128, 128, 3, 1, 1, bias=False)
self.conv8 = nn.Conv2d(128, 128, 3, 1, 1, bias=False)
self.conv9 = nn.Conv2d(128, feat_dim, 3, 2, 1, bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.bn2 = nn.BatchNorm2d(64)
self.bn3 = nn.BatchNorm2d(128)
self.bn4 = nn.BatchNorm2d(128)
self.bn5 = nn.BatchNorm2d(128)
self.bn6 = nn.BatchNorm2d(128)
self.bn7 = nn.BatchNorm2d(128)
self.bn8 = nn.BatchNorm2d(128)
self.bn9 = nn.BatchNorm2d(feat_dim)
self.bn10 = nn.BatchNorm2d(128)
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(feat_dim, num_classes, bias=False)
self.dr1 = nn.Dropout2d(0.2)
self.dr2 = nn.Dropout2d(0.2)
self.dr3 = nn.Dropout2d(0.2)
self.apply(weights_init)
self.cuda()
def forward(self, x, return_feature=False):
x = self.dr1(x)
x = self.conv1(x)
x = self.bn1(x)
x = nn.LeakyReLU(0.2)(x)
x = self.conv2(x)
x = self.bn2(x)
x = nn.LeakyReLU(0.2)(x)
x = self.conv3(x)
x = self.bn3(x)
x = nn.LeakyReLU(0.2)(x)
x = self.dr2(x)
x = self.conv4(x)
x = self.bn4(x)
x = nn.LeakyReLU(0.2)(x)
x = self.conv5(x)
x = self.bn5(x)
x = nn.LeakyReLU(0.2)(x)
x = self.conv6(x)
x = self.bn6(x)
x = nn.LeakyReLU(0.2)(x)
x = self.dr3(x)
x = self.conv7(x)
x = self.bn7(x)
x = nn.LeakyReLU(0.2)(x)
x = self.conv8(x)
x = self.bn8(x)
x = nn.LeakyReLU(0.2)(x)
x = self.conv9(x)
x = self.bn9(x)
x = nn.LeakyReLU(0.2)(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
y = self.fc(x)
if return_feature:
return x, y
else:
return y
def weights_init(m):
classname = m.__class__.__name__
# TODO: what about fully-connected layers?
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.05)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
if __name__ == '__main__':
import numpy as np
model = classifier32(num_classes=4)
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print(params)