-
Notifications
You must be signed in to change notification settings - Fork 0
/
evaluate_model.py
145 lines (119 loc) · 4.97 KB
/
evaluate_model.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
import numpy as np
import argparse
import os
from sklearn.metrics import classification_report, confusion_matrix
import pandas as pd
import seaborn as sn
from models import *
from lib.dataset import *
from lib.utils import *
from constants import *
import matplotlib.pyplot as plt
from matplotlib import figure
# train LSTM
def train_lstm(dataset: Dataset, learning_rate: float):
model = LSTM(
ds=dataset,
learning_rate=learning_rate)
test_set_predictions, history = model.train()
return test_set_predictions, history
# train LSTM with GloVe
def train_lstm_glove(dataset: Dataset, learning_rate: float):
model = LSTM_Glove(
ds=dataset,
learning_rate=learning_rate,
glove_filename=GLOVE_FILENAME_6B_100D)
test_set_predictions, history = model.train()
return test_set_predictions, history
# train MLP with GloVe
def train_mlp_glove(dataset: Dataset, learning_rate: float):
model = MLP_Glove(
ds=dataset,
learning_rate=learning_rate,
glove_filename=GLOVE_FILENAME_6B_50D)
test_set_predictions, history = model.train()
return test_set_predictions, history
# train Naive Bayes
def naive_bayes(dataset: Dataset, classifier_str: str):
model = Naive_Bayes(
ds=dataset,
classifier_str=classifier_str)
test_set_predictions = model.train()
return test_set_predictions
# train SVM
def svm(dataset: Dataset):
model = SVM(dataset)
test_set_predictions = model.train()
return test_set_predictions
"""
Example call:
python train.py -m lstm
"""
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--model', required=True, type=str, help='Model to train.')
parser.add_argument('-lr', '--learning_rate', default=None, type=float, help='Learning rate for lstm and mlp.')
args = parser.parse_args()
if __name__ == '__main__':
model_name = args.model
if not model_name in MODELS:
exit(f"\nError: Model {model_name} is not implemented ..")
print(f"\nEvaluating Method {model_name} on full dataset (12 genres)..")
# create results folder if it does not exist
if not os.path.exists(EVALUATION_FOLDER):
os.makedirs(EVALUATION_FOLDER)
n_target_genres = 12 # use all genres
dataset = Dataset(n_target_genres)
test_set_predictions = None
# evaluate desired method
if model_name == "naive_bayes_bernoulli":
test_set_predictions = naive_bayes(dataset, NAIVE_BAYES_BERNOULLI_NB)
elif model_name == "naive_bayes_multinomial":
test_set_predictions = naive_bayes(dataset, NAIVE_BAYES_MULTINOMIAL_NB)
elif model_name == "svm":
test_set_predictions = svm(dataset)
else:
if args.learning_rate is None:
exit(f"\nError: For this model, learning rate needs to be specified ..")
learning_rate = args.learning_rate
if model_name == "mlp_glove":
test_set_predictions, _ = train_mlp_glove(dataset, learning_rate)
elif model_name == "lstm":
test_set_predictions, _ = train_lstm(dataset, learning_rate)
elif model_name == "lstm_glove":
test_set_predictions, _ = train_lstm_glove(dataset, learning_rate)
test_set_predictions = [LABEL_2_GENRE[p] for p in test_set_predictions]
report = classification_report(dataset.y_test, test_set_predictions, digits=4)
report_dict = classification_report(dataset.y_test, test_set_predictions, digits=4, output_dict=True)
# print macro-averages
stats = report_dict["macro avg"]
print(f"[Macro] P: {round_float(stats['precision']*100)} "
+ f"R: {round_float(stats['recall']*100)} "
+ f"F1: {round_float(stats['f1-score']*100)}")
# print weighted-averages
stats = report_dict["weighted avg"]
print(f"[Weighted] P: {round_float(stats['precision']*100)} "
+ f"R: {round_float(stats['recall']*100)} "
+ f"F1: {round_float(stats['f1-score']*100)}")
cm = confusion_matrix([GENRE_2_LABEL[g] for g in dataset.y_test], [GENRE_2_LABEL[g] for g in test_set_predictions])
cm = np.around(cm.astype('float') / cm.sum(axis=1)[:, np.newaxis], decimals=2)
modified_target_genres = TARGET_GENRES
modified_target_genres[-1] = "Gospel/\nReligioso"
df_cm = pd.DataFrame(cm, index = modified_target_genres, columns = modified_target_genres)
figure.Figure(figsize = (19.2, 15))
sn.set(font_scale=0.55) # Adjust to fit
svm = sn.heatmap(df_cm, annot=True,cmap="Blues")
plt.title(MODEL_2_NAME[model_name], fontweight='bold')
plt.xlabel("\nPredicted Category", fontweight='bold')
plt.ylabel("Target Category", fontweight='bold')
figure = svm.get_figure()
# save classification report and confusion matrix to local files
text_file = open(
os.path.join(EVALUATION_FOLDER,
f"{model_name}.txt"),
"w")
text_file.write(report)
text_file.close()
figure.savefig(
os.path.join(EVALUATION_FOLDER, f"{model_name}.png"),
dpi=300, bbox_inches = 'tight',
pad_inches=0.0)