-
Notifications
You must be signed in to change notification settings - Fork 2
/
metrics.py
81 lines (56 loc) · 1.94 KB
/
metrics.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
import numpy as np
from sklearn.metrics import average_precision_score, accuracy_score, f1_score
def check_inputs(targs, preds):
'''
Helper function for input validation.
'''
assert (np.shape(preds) == np.shape(targs))
assert type(preds) is np.ndarray
assert type(targs) is np.ndarray
assert (np.max(preds) <= 1.0) and (np.min(preds) >= 0.0)
assert (np.max(targs) <= 1.0) and (np.min(targs) >= 0.0)
assert (len(np.unique(targs)) <= 2)
def compute_avg_precision(targs, preds):
'''
Compute average precision.
Parameters
targs: Binary targets.
preds: Predicted probability scores.
'''
check_inputs(targs,preds)
if np.all(targs == 0):
# If a class has zero true positives, we define average precision to be zero.
metric_value = 0.0
else:
metric_value = average_precision_score(targs, preds)
return metric_value
def compute_precision_at_k(targs, preds, k):
'''
Compute precision@k.
Parameters
targs: Binary targets.
preds: Predicted probability scores.
k: Number of predictions to consider.
'''
check_inputs(targs, preds)
classes_rel = np.flatnonzero(targs == 1)
if len(classes_rel) == 0:
return 0.0
top_k_pred = np.argsort(preds)[::-1][:k]
metric_value = float(len(np.intersect1d(top_k_pred, classes_rel))) / k
return metric_value
def compute_recall_at_k(targs, preds, k):
'''
Compute recall@k.
Parameters
targs: Binary targets.
preds: Predicted probability scores.
k: Number of predictions to consider.
'''
check_inputs(targs,preds)
classes_rel = np.flatnonzero(targs == 1)
if len(classes_rel) == 0:
return 0.0
top_k_pred = np.argsort(preds)[::-1][:k]
metric_value = float(len(np.intersect1d(top_k_pred, classes_rel))) / len(classes_rel)
return metric_value