-
Notifications
You must be signed in to change notification settings - Fork 0
/
run.py
154 lines (128 loc) · 4.63 KB
/
run.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
""" Runs the loan prediction.
author: Younggue Bae
"""
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import StandardScaler
from tabulate import tabulate
from prediction_model import PredictionModel
from preprocessing import preprocess, check_input_validation
from feature_importance import feature_importance
file_path = './input/DR_Demo_Lending_Club_reduced.csv'
df_raw_data = pd.read_csv(file_path)
df_raw_data.head()
####################################
# Data Preprocessing
####################################
print('\nData preprocessing..................')
ordinal_categorical_fields_mapping = {
"pymnt_plan": {"n": 0, "y": 1},
"initial_list_status": {"f": 0, "m": 1},
"home_ownership": {"NONE": 1, "OTHER": 2, "MORTGAGE": 3, "RENT": 4, "OWN": 5},
"verification_status": {"not verified": 0, "VERIFIED - income": 1, "VERIFIED - income source": 1},
# "policy_code": {"PC1": 1, "PC2": 2, "PC3": 3, "PC4": 4, "PC5": 5},
}
nominal_categorical_fields = [
# "pymnt_plan",
# "initial_list_status",
# "home_ownership",
"policy_code",
# "verification_status",
"purpose_cat",
"addr_state",
"zip_code",
]
drop_fields = [
"Id",
"collections_12_mths_ex_med",
# "mths_since_last_delinq",
# "mths_since_last_record",
# "addr_state",
# "zip_code",
]
df_data = preprocess(data=df_raw_data,
ordinal_categorical_fields_mapping=ordinal_categorical_fields_mapping,
nominal_categorical_fields=nominal_categorical_fields,
drop_fields=drop_fields
)
# Check input data validation.
validated = check_input_validation(df_data)
if validated:
print('Input validation check result: OK')
####################################
# Partitioning a dataset in training and test sets
####################################
X = df_data.drop('is_bad', axis=1)
y = df_data['is_bad'].values
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0)
# Standardization of feature scale
print('\n\nStandardization of feature scale..................')
ct = ColumnTransformer(
[
('std', StandardScaler(), [
'annual_inc',
'debt_to_income',
'delinq_2yrs',
'inq_last_6mths',
'mths_since_last_delinq',
'mths_since_last_record',
'open_acc',
'pub_rec',
'revol_bal',
'revol_util',
'total_acc',
'mths_since_last_major_derog',
])
],
remainder='passthrough'
)
X_train_std = ct.fit_transform(X_train)
X_test_std = ct.transform(X_test)
X_train_std = pd.DataFrame(X_train_std, columns=X_train.columns)
X_test_std = pd.DataFrame(X_test_std, columns=X_test.columns)
# X_train_std = X_train
# X_test_std = X_test
####################################
# Training a logistic regression model
####################################
# solver: {‘newton-cg’, ‘lbfgs’, ‘liblinear’, ‘sag’, ‘saga’}
model = PredictionModel(solver='liblinear',
penalty='l1',
max_iter=1000,
C=1000,
debug=False)
lr = model.fit(X_train_std, y_train)
# print(lr.coef_)
# Training evaluation
eval_train = model.evaluate(X_train_std, y_train)
print('\n\nTraining evaluation:', eval_train)
####################################
# Evaluate model
####################################
# Test evaluation
eval_test = model.evaluate(X_test_std, y_test)
print('\n\nTest evaluation:', eval_test)
####################################
# Predict
####################################
model.predict(X_test_std[0:50])
model.predict_proba(X_test_std[0:50])
####################################
# Tune hyperparameters
####################################
# The evaluation of all possible parameter combinations is computationally very expensive.
# Therefore, used the only 200 training data set here.
# best_params = model.tune_parameters(X_train, y_train)
best_params = model.tune_parameters(X_train_std[:200], y_train[:200])
print('\n\nTuning parameters:', best_params)
####################################
# Feature importance
####################################
# The feature importance analysis is computationally very expensive.
# Therefore, used the only 200 training data set here.
print('\n\nAnalyzing feature importance..................')
feature_importance = feature_importance(X_train_std[:200], y_train[:200])
print('\nFeature Importance:')
headers = ["name", "score"]
print(tabulate(feature_importance, headers, tablefmt="plain"))