Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

use gain for sklearn feature_importances_ #3876

Merged
merged 8 commits into from
Nov 13, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 8 additions & 3 deletions python-package/xgboost/sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,9 @@ class XGBModel(XGBModelBase):
missing : float, optional
Value in the data which needs to be present as a missing value. If
None, defaults to np.nan.
importance_type: string, default "gain"
The feature importance type for the feature_importances_ property: either "gain",
"weight", "cover", "total_gain" or "total_cover".
\*\*kwargs : dict, optional
Keyword arguments for XGBoost Booster object. Full documentation of parameters can
be found here: https://github.com/dmlc/xgboost/blob/master/doc/parameter.rst.
Expand Down Expand Up @@ -133,7 +136,8 @@ def __init__(self, max_depth=3, learning_rate=0.1, n_estimators=100,
n_jobs=1, nthread=None, gamma=0, min_child_weight=1, max_delta_step=0,
subsample=1, colsample_bytree=1, colsample_bylevel=1,
reg_alpha=0, reg_lambda=1, scale_pos_weight=1,
base_score=0.5, random_state=0, seed=None, missing=None, **kwargs):
base_score=0.5, random_state=0, seed=None, missing=None,
importance_type="gain", **kwargs):
if not SKLEARN_INSTALLED:
raise XGBoostError('sklearn needs to be installed in order to use this module')
self.max_depth = max_depth
Expand All @@ -159,6 +163,7 @@ def __init__(self, max_depth=3, learning_rate=0.1, n_estimators=100,
self.random_state = random_state
self.nthread = nthread
self.n_jobs = n_jobs
self.importance_type = importance_type

def __setstate__(self, state):
# backward compatibility code
Expand Down Expand Up @@ -517,8 +522,8 @@ def feature_importances_(self):
raise AttributeError('Feature importance is not defined for Booster type {}'
.format(self.booster))
b = self.get_booster()
fs = b.get_fscore()
all_features = [fs.get(f, 0.) for f in b.feature_names]
score = b.get_score(importance_type=self.importance_type)
all_features = [score.get(f, 0.) for f in b.feature_names]
all_features = np.array(all_features, dtype=np.float32)
return all_features / all_features.sum()

Expand Down
37 changes: 33 additions & 4 deletions tests/python/test_with_sklearn.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,14 +104,14 @@ def test_ranking():
np.testing.assert_almost_equal(pred, pred_orig)


def test_feature_importances():
def test_feature_importances_weight():
tm._skip_if_no_sklearn()
from sklearn.datasets import load_digits

digits = load_digits(2)
y = digits['target']
X = digits['data']
xgb_model = xgb.XGBClassifier(seed=0).fit(X, y)
xgb_model = xgb.XGBClassifier(random_state=0, importance_type="weight").fit(X, y)

exp = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.00833333, 0.,
0., 0., 0., 0., 0., 0., 0., 0.025, 0.14166667, 0., 0., 0.,
Expand All @@ -127,10 +127,39 @@ def test_feature_importances():
import pandas as pd
y = pd.Series(digits['target'])
X = pd.DataFrame(digits['data'])
xgb_model = xgb.XGBClassifier(seed=0).fit(X, y)
xgb_model = xgb.XGBClassifier(random_state=0, importance_type="weight").fit(X, y)
np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)

xgb_model = xgb.XGBClassifier(seed=0).fit(X, y)
xgb_model = xgb.XGBClassifier(random_state=0, importance_type="weight").fit(X, y)
np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)


def test_feature_importances_gain():
tm._skip_if_no_sklearn()
from sklearn.datasets import load_digits

digits = load_digits(2)
y = digits['target']
X = digits['data']
xgb_model = xgb.XGBClassifier(random_state=0, importance_type="gain").fit(X, y)

exp = np.array([0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0.00326159, 0., 0., 0.,
0., 0., 0., 0., 0., 0.00297238, 0.00988034, 0., 0., 0., 0.,
0., 0., 0.03512521, 0.41123885, 0., 0., 0., 0., 0.01326332,
0.00160674, 0., 0.4206952, 0., 0., 0., 0., 0.00616747, 0.01237546,
0., 0., 0., 0., 0., 0., 0., 0.08240705, 0., 0., 0., 0.,
0., 0., 0., 0.00100649, 0., 0., 0., 0., 0.], dtype=np.float32)

np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)

# numeric columns
import pandas as pd
y = pd.Series(digits['target'])
X = pd.DataFrame(digits['data'])
xgb_model = xgb.XGBClassifier(random_state=0, importance_type="gain").fit(X, y)
np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)

xgb_model = xgb.XGBClassifier(random_state=0, importance_type="gain").fit(X, y)
np.testing.assert_almost_equal(xgb_model.feature_importances_, exp)


Expand Down