-
Notifications
You must be signed in to change notification settings - Fork 0
/
train_function.py~
executable file
·122 lines (113 loc) · 5.84 KB
/
train_function.py~
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.framework import ops
from tensorflow.python.ops import control_flow_ops
def loss(logits,label_batches):
with tf.name_scope('loss'):
# cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(logits=logits,labels=label_batches)
# cost = tf.reduce_mean(cross_entropy)
t = logits[:,1]
cost = focal_loss(logits[:,1],label_batches)
regularization_loss = ops.get_collection(ops.GraphKeys.REGULARIZATION_LOSSES)
#regularization_loss=tf.get_collection("regular_loss")
total_loss=tf.add_n([cost]+regularization_loss,name='total_loss')
tf.summary.scalar('loss_cross', cost)
tf.summary.scalar('loss_L2', tf.add_n(regularization_loss))
tf.summary.scalar('loss_total', total_loss)
return total_loss,t,cost
def focal_loss(prediction_tensor, target_tensor):
gama = 2.0
epsilon = 1e-8
alpha = 0.3
target_tensor = tf.cast(target_tensor, dtype=tf.float32)
fl = -(alpha*target_tensor*tf.pow(1-prediction_tensor,gama)*tf.log(prediction_tensor+epsilon)+
(1-alpha)*(1-target_tensor)*tf.pow(prediction_tensor,gama)*tf.log(1-prediction_tensor+epsilon))
loss = tf.reduce_mean(fl)
return loss
# def focal_loss(prediction_tensor, target_tensor, weights=None, alpha=0.25, gamma=2):
# r"""Compute focal loss for predictions.
# Multi-labels Focal loss formula:
# FL = -alpha * (z-p)^gamma * log(p) -(1-alpha) * p^gamma * log(1-p)
# ,which alpha = 0.25, gamma = 2, p = sigmoid(x), z = target_tensor.
# Args:
# prediction_tensor: A float tensor of shape [batch_size, num_anchors,
# num_classes] representing the predicted logits for each class
# target_tensor: A float tensor of shape [batch_size, num_anchors,
# num_classes] representing one-hot encoded classification targets
# weights: A float tensor of shape [batch_size, num_anchors]
# alpha: A scalar tensor for focal loss alpha hyper-parameter
# gamma: A scalar tensor for focal loss gamma hyper-parameter
# Returns:
# loss: A (scalar) tensor representing the value of the loss function
# """
# sigmoid_p = tf.nn.sigmoid(prediction_tensor)
# zeros = array_ops.zeros_like(sigmoid_p, dtype=sigmoid_p.dtype)
#
# # For poitive prediction, only need consider front part loss, back part is 0;
# # target_tensor > zeros <=> z=1, so poitive coefficient = z - p.
# target_tensor = tf.cast(target_tensor, tf.float32)
# y = tf.cast(target_tensor - sigmoid_p, tf.int32)
# pos_p_sub = array_ops.where(target_tensor > zeros, y, zeros)
#
# # For negative prediction, only need consider back part loss, front part is 0;
# # target_tensor > zeros <=> z=1, so negative coefficient = 0.
# neg_p_sub = array_ops.where(target_tensor > zeros, zeros, sigmoid_p)
# per_entry_cross_ent = - alpha * (pos_p_sub ** gamma) * tf.log(tf.clip_by_value(sigmoid_p, 1e-8, 1.0)) \
# - (1 - alpha) * (neg_p_sub ** gamma) * tf.log(tf.clip_by_value(1.0 - sigmoid_p, 1e-8, 1.0))
#
#
# return tf.reduce_sum(per_entry_cross_ent)
def center_loss(features, label, alfa, nrof_classes):
"""Center loss based on the paper "A Discriminative Feature Learning Approach for Deep Face Recognition"
(http://ydwen.github.io/papers/WenECCV16.pdf)
"""
nrof_features = features.get_shape()[1]
centers = tf.get_variable('centers', [nrof_classes, nrof_features], dtype=tf.float32,
initializer=tf.constant_initializer(0), trainable=False)
label = tf.reshape(label, [-1])
centers_batch = tf.gather(centers, label)
diff = (1 - alfa) * (centers_batch - features)
centers = tf.scatter_sub(centers, label, diff)
loss = tf.reduce_mean(tf.square(features - centers_batch))
return loss, centers
def get_accuracy_softmax(prediction,labels):
with tf.name_scope('softmaxaccuracy'):
correct_prediction = tf.equal(tf.argmax(prediction, 1),labels)
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
_ = tf.summary.scalar('train_accuracy', accuracy)
return accuracy
def get_accuracy(logits,labels):
with tf.name_scope('accuracy'):
acc1 = tf.nn.in_top_k(logits,labels,1)
acc2 = tf.cast(acc1,tf.float32)
acc = tf.reduce_mean(acc2)
_ = tf.summary.scalar('train_accuracy', acc)
return acc
def get_accuracy_test(logits,labels):
with tf.name_scope('testaccuracy'):
acc1 = tf.nn.in_top_k(logits,labels,1)
acc2 = tf.cast(acc1,tf.float32)
acc = tf.reduce_mean(acc2)
_ = tf.summary.scalar('test_accuracy', acc)
return acc
def get_accuracy_val(logits,labels):
with tf.name_scope('valaccuracy'):
acc1 = tf.nn.in_top_k(logits,labels,1)
acc2 = tf.cast(acc1,tf.float32)
acc = tf.reduce_mean(acc2)
# _ = tf.summary.scalar('val_accuracy', acc)
return acc
def training(loss,lr,global_step):
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# Compute gradients.
with tf.control_dependencies(update_ops):
# train_op = tf.train.RMSPropOptimizer(lr,decay=0.9, momentum=0.9).minimize(loss,global_step = global_step)
train_op = tf.train.AdamOptimizer(lr).minimize(loss, global_step=global_step)
# train_op = tf.train.MomentumOptimizer(lr, momentum=0.9).minimize(loss, global_step=global_step)
# variable_averages = tf.train.ExponentialMovingAverage(
# 0.9999, global_step)
# variables_averages_op = variable_averages.apply(tf.trainable_variables())
# with tf.control_dependencies([train_op, variables_averages_op]):
# # with tf.control_dependencies([apply_gradient_op]):
# train_op2 = tf.no_op(name='train')
return train_op