Skip to content

Commit

Permalink
Fix cv verbose_eval (#7291)
Browse files Browse the repository at this point in the history
  • Loading branch information
trivialfis authored Oct 8, 2021
1 parent f7caac2 commit 578de9f
Show file tree
Hide file tree
Showing 2 changed files with 46 additions and 25 deletions.
12 changes: 7 additions & 5 deletions python-package/xgboost/training.py
Original file line number Diff line number Diff line change
Expand Up @@ -472,13 +472,15 @@ def cv(params, dtrain, num_boost_round=10, nfold=3, stratified=False, folds=None
if is_new_callback:
assert all(isinstance(c, callback.TrainingCallback)
for c in callbacks), "You can't mix new and old callback styles."
if isinstance(verbose_eval, bool) and verbose_eval:
if verbose_eval:
verbose_eval = 1 if verbose_eval is True else verbose_eval
callbacks.append(callback.EvaluationMonitor(period=verbose_eval,
show_stdv=show_stdv))
callbacks.append(
callback.EvaluationMonitor(period=verbose_eval, show_stdv=show_stdv)
)
if early_stopping_rounds:
callbacks.append(callback.EarlyStopping(
rounds=early_stopping_rounds, maximize=maximize))
callbacks.append(
callback.EarlyStopping(rounds=early_stopping_rounds, maximize=maximize)
)
callbacks = callback.CallbackContainer(callbacks, metric=feval, is_cv=True)
else:
callbacks = _configure_deprecated_callbacks(
Expand Down
59 changes: 39 additions & 20 deletions tests/python/test_callback.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
from typing import Union
import xgboost as xgb
import pytest
import os
Expand All @@ -22,29 +23,47 @@ def setup_class(cls):
cls.X_valid = X[split:, ...]
cls.y_valid = y[split:, ...]

def run_evaluation_monitor(self, D_train, D_valid, rounds, verbose_eval):
evals_result = {}
def run_evaluation_monitor(
self,
D_train: xgb.DMatrix,
D_valid: xgb.DMatrix,
rounds: int,
verbose_eval: Union[bool, int]
):
def check_output(output: str) -> None:
if int(verbose_eval) == 1:
# Should print each iteration info
assert len(output.split('\n')) == rounds
elif int(verbose_eval) > rounds:
# Should print first and latest iteration info
assert len(output.split('\n')) == 2
else:
# Should print info by each period additionaly to first and latest
# iteration
num_periods = rounds // int(verbose_eval)
# Extra information is required for latest iteration
is_extra_info_required = num_periods * int(verbose_eval) < (rounds - 1)
assert len(output.split('\n')) == (
1 + num_periods + int(is_extra_info_required)
)

evals_result: xgb.callback.TrainingCallback.EvalsLog = {}
params = {'objective': 'binary:logistic', 'eval_metric': 'error'}
with tm.captured_output() as (out, err):
xgb.train({'objective': 'binary:logistic',
'eval_metric': 'error'}, D_train,
evals=[(D_train, 'Train'), (D_valid, 'Valid')],
num_boost_round=rounds,
evals_result=evals_result,
verbose_eval=verbose_eval)
xgb.train(
params, D_train,
evals=[(D_train, 'Train'), (D_valid, 'Valid')],
num_boost_round=rounds,
evals_result=evals_result,
verbose_eval=verbose_eval,
)
output: str = out.getvalue().strip()
check_output(output)

if int(verbose_eval) == 1:
# Should print each iteration info
assert len(output.split('\n')) == rounds
elif int(verbose_eval) > rounds:
# Should print first and latest iteration info
assert len(output.split('\n')) == 2
else:
# Should print info by each period additionaly to first and latest iteration
num_periods = rounds // int(verbose_eval)
# Extra information is required for latest iteration
is_extra_info_required = num_periods * int(verbose_eval) < (rounds - 1)
assert len(output.split('\n')) == 1 + num_periods + int(is_extra_info_required)
with tm.captured_output() as (out, err):
xgb.cv(params, D_train, num_boost_round=rounds, verbose_eval=verbose_eval)
output = out.getvalue().strip()
check_output(output)

def test_evaluation_monitor(self):
D_train = xgb.DMatrix(self.X_train, self.y_train)
Expand Down

0 comments on commit 578de9f

Please sign in to comment.