-
-
Notifications
You must be signed in to change notification settings - Fork 9
/
utils.py
87 lines (77 loc) · 2.92 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
import numpy as np
import matplotlib.pyplot as plt
from sklearn.metrics import root_mean_squared_error # type: ignore
import traceback
from functools import wraps
def catch_exceptions(func):
@wraps(func)
def wrapper(*args, **kwargs):
try:
return func(*args, **kwargs), None
except Exception as e:
return None, traceback.format_exc()
return wrapper
def cross_comparison(revlogs, algoA, algoB, graph=False):
if algoA != algoB:
cross_comparison_record = revlogs[[f"R ({algoA})", f"R ({algoB})", "y"]].copy()
bin_algo = (
algoA,
algoB,
)
pair_algo = [(algoA, algoB), (algoB, algoA)]
else:
cross_comparison_record = revlogs[[f"R ({algoA})", "y"]].copy()
bin_algo = (algoA,)
pair_algo = [(algoA, algoA)]
def get_bin(x, bins=20):
return (
np.log(np.minimum(np.floor(np.exp(np.log(bins + 1) * x) - 1), bins - 1) + 1)
/ np.log(bins)
).round(3)
for algo in bin_algo:
cross_comparison_record[f"{algo}_B-W"] = (
cross_comparison_record[f"R ({algo})"] - cross_comparison_record["y"]
)
cross_comparison_record[f"{algo}_bin"] = cross_comparison_record[
f"R ({algo})"
].map(get_bin)
if graph:
fig = plt.figure(figsize=(6, 6))
ax = fig.gca()
ax.axhline(y=0.0, color="black", linestyle="-")
universal_metric_list = []
for algoA, algoB in pair_algo:
cross_comparison_group = cross_comparison_record.groupby(by=f"{algoA}_bin").agg(
{"y": ["mean"], f"{algoB}_B-W": ["mean"], f"R ({algoB})": ["mean", "count"]}
)
universal_metric = root_mean_squared_error(
y_true=cross_comparison_group["y", "mean"],
y_pred=cross_comparison_group[f"R ({algoB})", "mean"],
sample_weight=cross_comparison_group[f"R ({algoB})", "count"],
)
cross_comparison_group[f"R ({algoB})", "percent"] = (
cross_comparison_group[f"R ({algoB})", "count"]
/ cross_comparison_group[f"R ({algoB})", "count"].sum()
)
if graph:
ax.scatter(
cross_comparison_group.index,
cross_comparison_group[f"{algoB}_B-W", "mean"],
s=cross_comparison_group[f"R ({algoB})", "percent"] * 1024,
alpha=0.5,
)
ax.plot(
cross_comparison_group[f"{algoB}_B-W", "mean"],
label=f"{algoB} by {algoA}, UM={universal_metric:.4f}",
)
universal_metric_list.append(universal_metric)
if graph:
ax.legend(loc="lower center")
ax.grid(linestyle="--")
ax.set_title(f"{algoA} vs {algoB}")
ax.set_xlabel("Predicted R")
ax.set_ylabel("B-W Metric")
ax.set_xlim(0, 1)
ax.set_xticks(np.arange(0, 1.1, 0.1))
fig.show()
return universal_metric_list