-
Notifications
You must be signed in to change notification settings - Fork 6
/
train_net.py
347 lines (302 loc) · 12.6 KB
/
train_net.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
import mxnet as mx
import sys
import importlib
import re
from mxnet import autograd
from mxnet import gluon
from mxnet import image
from mxnet import init
from mxnet import nd
from mxnet.gluon import nn
from mxnet.gluon.data import vision
from mxnet.gluon.model_zoo import vision as models
from sklearn.metrics import roc_auc_score
from sklearn.model_selection import train_test_split
from loss import wSigmoidBinaryCrossEntropyLoss
import numpy as np
import pandas as pd
import cv2
import os
sigmoid_cross_entropy = gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=True)
wsigmoid_cross_entropy = wSigmoidBinaryCrossEntropyLoss(from_sigmoid=True)
def get_optimizer_params(optimizer=None, learning_rate=None, momentum=None,
weight_decay=None, ctx=None):
if optimizer.lower() == 'rmsprop':
opt = 'rmsprop'
print('you chose RMSProp, decreasing lr by a factor of 10')
optimizer_params = {'learning_rate': learning_rate / 10.0,
'wd': weight_decay,
'rescale_grad': 1.0 / len(ctx) if len(ctx) > 0 else 1.0}
elif optimizer.lower() == 'sgd':
opt = 'sgd'
optimizer_params = {'learning_rate': learning_rate,
'momentum': momentum,
'wd': weight_decay,
'rescale_grad': 1.0 / len(ctx) if len(ctx) > 0 else 1.0}
elif optimizer.lower() == 'adadelta':
opt = 'adadelta'
optimizer_params = {}
elif optimizer.lower() == 'adam':
opt = 'adam'
optimizer_params = {'learning_rate': learning_rate,
'rescale_grad': 1.0 / len(ctx) if len(ctx) > 0 else 1.0}
return opt, optimizer_params
def compute_aucs(output,label):
aurocs = []
row = output.shape[0]
column = output.shape[1]
label_np = label.asnumpy()
output_np = output.asnumpy()
for i in range(column):
if (label_np[:,i] == np.zeros((row,))).all():
label_np[0,i] = 1-label_np[0,i]
output_np[0,i] = 1-output_np[0,i]
aurocs.append(roc_auc_score(label_np[:,i], output_np[:,i]))
elif (label_np[:,i] == np.ones((row,))).all():
label_np[0,i] = 1-label_np[0,i]
output_np[0,i] = 1-output_np[0,i]
aurocs.append(roc_auc_score(label_np[:,i], output_np[:,i]))
else:
aurocs.append(roc_auc_score(label_np[:,i], output_np[:,i]))
return aurocs
def evaluate(net,data_iter,ctx):
loss,n = 0., 0.
n = len(data_iter)
for data, label in data_iter:
data, label = data.as_in_context(ctx), label.as_in_context(ctx)
output = net(data)
loss += nd.mean(sigmoid_cross_entropy(output,label)).asscalar()
return loss/n
def AUC(net,data_iter,n_classes,ctx):
AUC = np.zeros((n_classes,))
for data, label in data_iter:
data, label = data.as_in_context(ctx), label.as_in_context(ctx)
output = net(data)
auc = compute_aucs(output,label)
auc_np = np.array(auc)
AUC = np.row_stack((AUC,auc_np))
m = float(AUC.shape[0]-1)
AUCS = AUC.sum(axis=0)/m
return AUCS
def evaluate_resp(net, data_iter, weight, ctx):
loss, acc, n= 0., 0., 0.
n = len(data_iter)
for data, label in data_iter:
data, label = data.as_in_context(ctx), label.as_in_context(ctx)
output = net(data)
acc_list=compute_acc(output,label)
acc_list_avg=np.array(acc_list).mean()
acc+=acc_list_avg
loss += nd.mean(wsigmoid_cross_entropy(output, label, weight)).asscalar()
return loss/n, acc/n
def compute_acc(output,label):
acc = []
row = output.shape[0]
label_np=label.asnumpy()
output_np=output.asnumpy()
for i in range(row):
if round(output_np[i,0]) == label_np[i,0]:
acc.append(1.)
else:
acc.append(0.)
return acc
def train_net(network, train_csv, num_classes, batch_size,
data_shape, ctx, epochs, learning_rate,
momentum, weight_decay, lr_refactor_step, lr_refactor_ratio,
class_names=None,optimizer='sgd'):
"""
Wrapper for training phase.
Parameters:
----------
network : str
name for the network structure
train_csv : str
.csv file path for training
num_classes : int
number of object classes, not including background
batch_size : int
training batch-size
data_shape : int or tuple
width/height as integer or (3, height, width) tuple
ctx : [mx.cpu()] or [mx.gpu(x)]
list of mxnet contexts
epochs : int
epochs of training
optimizer : str
usage of different optimizers, other then default sgd
learning_rate : float
training learning rate
momentum : float
trainig momentum
weight_decay : float
training weight decay param
lr_refactor_ratio : float
multiplier for reducing learning rate
lr_refactor_step : comma separated integers
at which epoch to rescale learning rate, e.g. '30, 60, 90'
"""
# load data
df = pd.read_csv(train_csv)
n = len(df)
X = np.zeros((n,3,data_shape,data_shape),dtype=np.float32)
Y = np.zeros((n,num_classes), dtype=np.float32)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
# if
for i, dfv in enumerate(df.values):
img = cv2.imread('./images/%s'%dfv[0])
X[i] = ((cv2.resize(img, (data_shape, data_shape))[:,:,::-1] / 255.0 - mean) / std).transpose((2, 0, 1))
for j in range(num_classes):
Y[i,j] = dfv[j+2]
X_train, X_valid, Y_train, Y_valid = train_test_split(X, Y, random_state=8)
# fine-tune net
pretrained_net = getattr(models, network)(pretrained=True)
net = getattr(models, network)(classes=num_classes)
with net.name_scope():
net.features = pretrained_net.features
net.output = nn.Dense(num_classes,activation="sigmoid")
net.output.initialize(init.Xavier())
# init
net.collect_params().reset_ctx(ctx)
net.hybridize()
loss = gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=True)
best_auc_avg = 0
# optimizer
opt, opt_params = get_optimizer_params(optimizer=optimizer, learning_rate=learning_rate, momentum=momentum,
weight_decay=weight_decay, ctx=ctx)
train_data = gluon.data.DataLoader(gluon.data.ArrayDataset(X_train,Y_train), batch_size, shuffle=True)
test_data = gluon.data.DataLoader(gluon.data.ArrayDataset(X_valid,Y_valid), batch_size)
print('Running on', ctx)
trainer = gluon.Trainer(net.collect_params(), opt,opt_params)
for epoch in range(epochs):
train_loss = 0.
steps = len(train_data)
if len(lr_refactor_step) > 0 :
if epoch == lr_refactor_step[0]:
trainer.set_learning_rate(trainer.learning_rate*lr_refactor_ratio)
del lr_refactor_step[0]
for data, label in train_data:
data_list = gluon.utils.split_and_load(data, ctx)
label_list = gluon.utils.split_and_load(label, ctx)
with autograd.record():
losses = [loss(net(x),y) for x,y in zip(data_list,label_list)]
for l in losses:
l.backward()
lmean = [l.mean().asscalar() for l in losses]
train_loss += sum(lmean)/len(lmean)
trainer.step(batch_size)
val_loss = evaluate(net, test_data, ctx[0])
val_aucs = AUC(net, test_data, num_classes, ctx[0])
val_aucs_avg = val_aucs.mean()
print("Epoch %d. loss: %.4f, val_loss %.4f" % (
epoch, train_loss/steps, val_loss))
print("The average AUROC is %.3f%%" %(val_aucs_avg))
if val_aucs_avg >= best_auc_avg:
best_auc_avg = val_aucs_avg
net.features.save_params('./model/densenet_cam_f_Epoch%d.params'%epoch)
net.output.save_params('./model/densenet_cam_f_Epoch%d.params'%epoch)
for i in range(num_classes):
print('The AUROC of {} is {}'.format(class_names[i], val_aucs[i]))
def train_net_resp(network, train_csv, num_classes, batch_size,
data_shape, ctx, epochs, learning_rate,
momentum, weight_decay, lr_refactor_step, lr_refactor_ratio, identifier,
class_names=None,optimizer='sgd'):
"""
Wrapper for training phase.
Parameters:
----------
network : str
name for the network structure
train_csv : str
.csv file path for training
num_classes : int
number of object classes, not including background
batch_size : int
training batch-size
data_shape : int or tuple
width/height as integer or (3, height, width) tuple
ctx : [mx.cpu()] or [mx.gpu(x)]
list of mxnet contexts
epochs : int
epochs of training
optimizer : str
usage of different optimizers, other then default sgd
learning_rate : float
training learning rate
momentum : float
trainig momentum
weight_decay : float
training weight decay param
lr_refactor_ratio : float
multiplier for reducing learning rate
lr_refactor_step : comma separated integers
at which epoch to rescale learning rate, e.g. '30, 60, 90'
identifier : int
identifier(number) of the object of class to classify
"""
# load data
df = pd.read_csv(train_csv)
n = len(df)
X = np.zeros((n, 3, data_shape, data_shape), dtype=np.float32)
Y = np.zeros((n, 1), dtype=np.float32)
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
# if
for i, dfv in enumerate(df.values):
img = cv2.imread('./images/%s'%dfv[0])
X[i] = ((cv2.resize(img, (data_shape, data_shape))[:,:,::-1] / 255.0 - mean) / std).transpose((2, 0, 1))
Y[i,0] = dfv[identifier+2]
X_train, X_valid, Y_train, Y_valid = train_test_split(X, Y, random_state=8)
w_train = 1.-np.sum(Y_train)/len(Y_train)
w_val=1.-np.sum(Y_valid)/len(Y_valid)
# fine-tune net
pretrained_net = getattr(models,network)(pretrained=True)
net = getattr(models,network)(classes=1)
with net.name_scope():
net.features = pretrained_net.features
net.output = nn.Dense(1,activation="sigmoid")
net.output.initialize(init.Xavier())
# init
net.collect_params().reset_ctx(ctx)
net.hybridize()
loss = wSigmoidBinaryCrossEntropyLoss(from_sigmoid=True)
best_auc_avg = 0
best_acc = 0
# optimizer
opt, opt_params = get_optimizer_params(optimizer=optimizer, learning_rate=learning_rate, momentum=momentum,
weight_decay=weight_decay, ctx=ctx)
train_data = gluon.data.DataLoader(gluon.data.ArrayDataset(X_train,Y_train), batch_size, shuffle=True)
test_data = gluon.data.DataLoader(gluon.data.ArrayDataset(X_valid,Y_valid), batch_size)
print('Running on', ctx)
trainer = gluon.Trainer(net.collect_params(), opt, opt_params)
for epoch in range(epochs):
train_loss = 0.
steps = len(train_data)
if len(lr_refactor_step) > 0 :
if epoch == lr_refactor_step[0]:
trainer.set_learning_rate(trainer.learning_rate*lr_refactor_ratio)
del lr_refactor_step[0]
for data, label in train_data:
data_list = gluon.utils.split_and_load(data, ctx)
label_list = gluon.utils.split_and_load(label, ctx)
with autograd.record():
losses = [loss(net(x), y, w_train) for x,y in zip(data_list,label_list)]
for l in losses:
l.backward()
lmean = [l.mean().asscalar() for l in losses]
train_loss += sum(lmean)/len(lmean)
trainer.step(batch_size)
val_loss, val_acc = evaluate_resp(net, test_data, w_val, ctx[0])
val_aucs = AUC(net, test_data, 1, ctx[0])
val_aucs_avg = val_aucs.mean()
print("Epoch %d. loss: %.4f, val_loss %.4f, val_acc %.2f%%" % (
epoch, train_loss/steps, val_loss, val_acc*100))
print('The AUROC of {} is {}'.format(class_names[identifier], val_aucs_avg))
if val_aucs_avg >= best_auc_avg:
best_auc_avg = val_aucs_avg
net.features.save_params('./model/%s_f_Epoch%d.params'%(class_names[identifier], epoch))
net.output.save_params('./model/%s_o_Epoch%d.params'%(class_names[identifier], epoch))
if val_acc >= best_acc:
best_acc = val_acc
net.features.save_params('./model/%s_f2_Epoch%d.params'%(class_names[identifier], epoch))
net.output.save_params('./model/%s_o2_Epoch%d.params'%(class_names[identifier], epoch))