-
Notifications
You must be signed in to change notification settings - Fork 0
/
GDnet_IP.py
139 lines (122 loc) · 5.65 KB
/
GDnet_IP.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
import random
import WeDIV2
import torch.nn as nn
import torch
import numpy as np
device = torch.device("cuda:0")
def GDnet(name, **kwargs): # Factory function to create a GDnet-IP model
model = GDnetIP(vgg_structure(cfgs[name]), class_num=20)
return model
class GDnetIP(nn.Module):
def __init__(self, feature_net, class_num=20):
super(GDnetIP, self).__init__()
self.feature_net = feature_net
self.inception = Inception(in_channel=256) # 256
self.bn1 = nn.Sequential(nn.PReLU(), nn.BatchNorm2d(256))
self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2)
self.inception2 = Inception(in_channel=256)
self.bn2 = nn.Sequential(nn.PReLU(), nn.BatchNorm2d(256))
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
self.inception3 = Inception(in_channel=256)
self.bn3 = nn.Sequential(nn.PReLU(), nn.BatchNorm2d(256))
self.av = nn.AdaptiveAvgPool2d(1)
self.classifier = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=class_num, kernel_size=1))
self._initialize_wight()
# For clustering-based GDnet-IP (default): inception_prob=[1.0,1.0,1.0], is_wediv=1
# For inception-based GDnet-IP: [prob,prob,prob], is_wediv=0
def forward(self, x, inception_prob=[1.0,1.0,1.0], is_wediv=1):
x = self.feature_net(x)
x = self.inception(x, inception_prob[0])
x = self.bn1(x)
x = self.pool1(x)
x = self.inception2(x, inception_prob[1])
x = self.bn2(x)
x = self.pool2(x)
x = self.inception3(x, inception_prob[2])
x = self.bn3(x)
x = self.av(x)
if is_wediv == 1:
x = self.wediv(x)
x = self.classifier(x)
return x
def _initialize_wight(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.xavier_uniform(m.weight)
if m.bias is not None:
nn.init.constant(m.bias, 0)
class Inception(nn.Module): # Inception module
def __init__(self, in_channel):
super(Inception, self).__init__()
self.branch1 = nn.Conv2d(in_channel, out_channels=64, kernel_size=1)
self.branch2 = nn.Sequential(nn.Conv2d(in_channel, out_channels=64, kernel_size=1),
nn.Conv2d(64, out_channels=96, kernel_size=3, padding=1))
self.branch3 = nn.Sequential(nn.Conv2d(in_channel, out_channels=16, kernel_size=1),
nn.Conv2d(16, out_channels=64, kernel_size=3, padding=1))
self.branch4 = nn.Sequential(nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
nn.Conv2d(in_channel, out_channels=32, kernel_size=1))
def forward(self, x, inception_prob):
self.bernouli1 = torch.distributions.bernoulli.Bernoulli(torch.tensor([inception_prob]))
if self.training:
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x)
if torch.equal(self.bernouli1.sample(), torch.ones(1)) != 1:
branch1=torch.zeros_like(branch1)
if torch.equal(self.bernouli1.sample(), torch.ones(1)) != 1:
branch2 = torch.zeros_like(branch2)
if torch.equal(self.bernouli1.sample(), torch.ones(1)) != 1:
branch3 = torch.zeros_like(branch3)
if torch.equal(self.bernouli1.sample(), torch.ones(1)) != 1:
branch4 = torch.zeros_like(branch4)
x = [branch1, branch2, branch3, branch4]
output = torch.cat(x, 1)
return output
else:
branch1 = self.branch1(x)
branch2 = self.branch2(x)
branch3 = self.branch3(x)
branch4 = self.branch4(x)
x = [branch1, branch2, branch3, branch4]
output = torch.cat(x, 1)
return output
cfgs = {"V1": ["M", 128, "M", 256, "M", 256, "M"]} # Convolution layers kernel size
def vgg_structure(cfg: list):
layer = []
layer.append(nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1))
layer.append(nn.PReLU()) # set PReLU
in_channel = 64
for i in cfg:
if i == "M":
layer.append(nn.MaxPool2d(kernel_size=2, stride=2))
else:
con_2d = nn.Conv2d(in_channel, out_channels=i, kernel_size=3, stride=1, padding=1)
layer.append(con_2d)
layer += [nn.PReLU()]
in_channel = i
return nn.Sequential(*layer)
class Wediv(nn.Module):
def __init__(self, size):
super(Wediv, self).__init__()
self.berbouli = torch.distributions.bernoulli.Bernoulli(torch.tensor([0.5]))
self.mask = torch.ones(1, size)
def forward(self, x):
if self.training:
self.indentity = x.detach().cpu().squeeze().numpy().T
Y_CL, K_optimal, W_optimal, rch = WeDIV2.WeDIV2(self.indentity, w_step=0.2, KList=[4])
group_indices = {}
self.mask = self.mask.to(device)
for group_label in range(K_optimal):
group_indices[group_label] = [i for i, label in enumerate(Y_CL) if label == group_label]
for group in group_indices:
if torch.equal(self.berbouli.sample(), torch.ones(1)) != 1:
self.mask[0, group_indices[group]] = 0
result = (x * self.mask.unsqueeze(2).unsqueeze(3))
self.mask = torch.ones(1, 256)
return result
else:
return x
if __name__ == '__main__':
net = GDnet("V1")