From b67f22915d6335ae65b744fd28a7cfda6826d4ee Mon Sep 17 00:00:00 2001 From: Philip Cho Date: Wed, 26 Sep 2018 17:03:07 -0700 Subject: [PATCH] Fix #3730: scikit-learn 0.20 compatibility fix sklearn.cross_validation has been removed from scikit-learn 0.20, so replace it with sklearn.model_selection --- tests/python-gpu/test_gpu_prediction.py | 5 ++++- tests/python/test_with_sklearn.py | 25 ++++++++++++++++++++----- 2 files changed, 24 insertions(+), 6 deletions(-) diff --git a/tests/python-gpu/test_gpu_prediction.py b/tests/python-gpu/test_gpu_prediction.py index 07e86d8de80f..8eb1a3e61ce3 100644 --- a/tests/python-gpu/test_gpu_prediction.py +++ b/tests/python-gpu/test_gpu_prediction.py @@ -49,7 +49,10 @@ def non_decreasing(self, L): # Test case for a bug where multiple batch predictions made on a test set produce incorrect results def test_multi_predict(self): from sklearn.datasets import make_regression - from sklearn.cross_validation import train_test_split + try: + from sklearn.model_selection import train_test_split + except: + from sklearn.cross_validation import train_test_split n = 1000 X, y = make_regression(n, random_state=rng) diff --git a/tests/python/test_with_sklearn.py b/tests/python/test_with_sklearn.py index 338946306e88..cd6d505e31a3 100644 --- a/tests/python/test_with_sklearn.py +++ b/tests/python/test_with_sklearn.py @@ -149,7 +149,10 @@ def test_boston_housing_regression(): tm._skip_if_no_sklearn() from sklearn.metrics import mean_squared_error from sklearn.datasets import load_boston - from sklearn.cross_validation import KFold + try: + from sklearn.model_selection import KFold + except: + from sklearn.cross_validation import KFold boston = load_boston() y = boston['target'] @@ -191,7 +194,10 @@ def test_regression_with_custom_objective(): tm._skip_if_no_sklearn() from sklearn.metrics import mean_squared_error from sklearn.datasets import load_boston - from sklearn.cross_validation import KFold + try: + from sklearn.model_selection import KFold + except: + from sklearn.cross_validation import KFold def objective_ls(y_true, y_pred): grad = (y_pred - y_true) @@ -224,7 +230,10 @@ def dummy_objective(y_true, y_pred): def test_classification_with_custom_objective(): tm._skip_if_no_sklearn() from sklearn.datasets import load_digits - from sklearn.cross_validation import KFold + try: + from sklearn.model_selection import KFold + except: + from sklearn.cross_validation import KFold def logregobj(y_true, y_pred): y_pred = 1.0 / (1.0 + np.exp(-y_pred)) @@ -263,7 +272,10 @@ def dummy_objective(y_true, y_preds): def test_sklearn_api(): tm._skip_if_no_sklearn() from sklearn.datasets import load_iris - from sklearn.cross_validation import train_test_split + try: + from sklearn.model_selection import train_test_split + except: + from sklearn.cross_validation import train_test_split iris = load_iris() tr_d, te_d, tr_l, te_l = train_test_split(iris.data, iris.target, train_size=120) @@ -280,7 +292,10 @@ def test_sklearn_api(): def test_sklearn_api_gblinear(): tm._skip_if_no_sklearn() from sklearn.datasets import load_iris - from sklearn.cross_validation import train_test_split + try: + from sklearn.model_selection import train_test_split + except: + from sklearn.cross_validation import train_test_split iris = load_iris() tr_d, te_d, tr_l, te_l = train_test_split(iris.data, iris.target, train_size=120)