-
Notifications
You must be signed in to change notification settings - Fork 0
/
loss.py
102 lines (83 loc) · 3.01 KB
/
loss.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
import torch
import numpy as np
import torch.nn.functional as F
def _sigmoid(x):
y = torch.clamp(x.sigmoid_(), min=1e-4, max=1-1e-4)
return y
def _neg_loss(pred, gt):
''' Modified focal loss. Exactly the same as CornerNet.
Runs faster and costs a little bit more memory
Arguments:
pred (batch x c x h x w)
gt_regr (batch x c x h x w)
'''
pos_inds = gt.eq(1).float()
neg_inds = gt.lt(1).float()
neg_weights = torch.pow(1 - gt, 4)
loss = 0
pos_loss = torch.log(pred) * torch.pow(1 - pred, 2) * pos_inds
neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
num_pos = pos_inds.float().sum()
pos_loss = pos_loss.sum()
neg_loss = neg_loss.sum()
if num_pos == 0:
loss = loss - neg_loss
else:
loss = loss - (pos_loss + neg_loss) / num_pos
return loss
def _gather_feat(feat, ind, mask=None):
dim = feat.size(2)
ind = ind.unsqueeze(2).expand(ind.size(0), ind.size(1), dim)
feat = feat.gather(1, ind)
if mask is not None:
mask = mask.unsqueeze(2).expand_as(feat)
feat = feat[mask]
feat = feat.view(-1, dim)
return feat
def _transpose_and_gather_feat(feat, ind):
feat = feat.permute(0, 2, 3, 1).contiguous()
feat = feat.view(feat.size(0), -1, feat.size(3))
feat = _gather_feat(feat, ind)
return feat
class FocalLoss(torch.nn.Module):
'''nn.Module warpper for focal loss'''
def __init__(self):
super(FocalLoss, self).__init__()
self.neg_loss = _neg_loss
def forward(self, out, target):
return self.neg_loss(out, target)
class RegL1Loss(torch.nn.Module):
def __init__(self):
super(RegL1Loss, self).__init__()
def forward(self, output, mask, ind, target):
pred = _transpose_and_gather_feat(output, ind)
mask = mask.unsqueeze(2).expand_as(pred).float()
# loss = F.l1_loss(pred * mask, target * mask, reduction='elementwise_mean')
loss = F.l1_loss(pred * mask, target * mask, reduction='sum')
loss = loss / (mask.sum() + 1e-4)
return loss
class CtdetLoss(torch.nn.Module):
def __init__(self, net):
super(CtdetLoss, self).__init__()
self.net = net
self.crit = FocalLoss()
self.crit_reg = RegL1Loss()
# self.crit_wh = self.crit_reg
self.crit_gd = torch.nn.SmoothL1Loss()
def forward(self, imgs, batch):
outputs = self.net(imgs)
output = outputs[0]
output['hm'] = _sigmoid(output['hm'])
hm_loss = self.crit(output['hm'], batch['hm'])
# wh_loss = self.crit_reg(
# output['wh'], batch['reg_mask'],
# batch['ind'], batch['wh'])
grad = torch.nn.functional.interpolate(output['gd'], size=[512,512], mode='bilinear', align_corners=True)
gd_loss = self.crit_gd(grad, batch['grad'].type(torch.float))
off_loss = self.crit_reg(output['reg'], batch['reg_mask'],
batch['ind'], batch['reg'])
loss = hm_loss + 1 * gd_loss + \
off_loss
loss_stats = {'loss': loss, 'hm_loss': hm_loss,
'wh_loss': gd_loss, 'off_loss': off_loss}
return loss, loss_stats