diff --git a/src/tree/updater_quantile_hist.cc b/src/tree/updater_quantile_hist.cc index 6d9f639e5a81..334f9e3247e9 100644 --- a/src/tree/updater_quantile_hist.cc +++ b/src/tree/updater_quantile_hist.cc @@ -263,7 +263,7 @@ void QuantileHistMaker::Builder::ExpandTree( ApplySplit(nodes_for_apply_split, gmat, column_matrix, p_tree); SplitSiblings(nodes_for_apply_split, &nodes_to_evaluate, p_tree); - if (depth < param_.max_depth) { + if (param_.max_depth == 0 || depth < param_.max_depth) { size_t i = 0; for (auto const &gidx : p_fmat->GetBatches( {GenericParameter::kCpuId, param_.max_bin})) { diff --git a/tests/python/test_tree_regularization.py b/tests/python/test_tree_regularization.py index 68d3944071a4..92fa9fb51ff2 100644 --- a/tests/python/test_tree_regularization.py +++ b/tests/python/test_tree_regularization.py @@ -60,3 +60,18 @@ def test_alpha_and_lambda(self): # sum_hess = 1.0 # 0.7 = 0.5 - (sum_grad - alpha * sgn(sum_grad)) / (sum_hess + lambda) assert_approx_equal(preds[0], 0.7) + + def test_unlimited_depth(self): + x = np.array([[0], [1], [2], [3]]) + y = np.array([0, 1, 2, 3]) + + model = xgb.XGBRegressor( + n_estimators=1, + eta=1, + tree_method="hist", + grow_policy="lossguide", + reg_lambda=0, + max_leaves=128, + max_depth=0, + ).fit(x, y) + assert np.array_equal(model.predict(x), y)